aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/stable/sysfs-bus-xen-backend9
-rw-r--r--Documentation/ABI/testing/sysfs-driver-xen-blkback10
-rw-r--r--Documentation/arm64/sve.txt4
-rw-r--r--Documentation/devicetree/bindings/display/atmel/hlcdc-dc.txt23
-rw-r--r--Documentation/devicetree/bindings/display/bridge/lvds-transmitter.txt8
-rw-r--r--Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt1
-rw-r--r--Documentation/devicetree/bindings/display/bridge/ti,sn65dsi86.txt87
-rw-r--r--Documentation/devicetree/bindings/display/bridge/toshiba,tc358764.txt35
-rw-r--r--Documentation/devicetree/bindings/display/mipi-dsi-bus.txt153
-rw-r--r--Documentation/devicetree/bindings/display/renesas,du.txt2
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt3
-rw-r--r--Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt14
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/riscv,cpu-intc.txt14
-rw-r--r--Documentation/devicetree/bindings/watchdog/renesas-wdt.txt5
-rw-r--r--Documentation/gpu/drivers.rst1
-rw-r--r--Documentation/gpu/drm-kms.rst18
-rw-r--r--Documentation/gpu/drm-mm.rst2
-rw-r--r--Documentation/gpu/todo.rst71
-rw-r--r--Documentation/gpu/vkms.rst24
-rw-r--r--Documentation/hwmon/ina2xx2
-rw-r--r--Documentation/i2c/DMA-considerations10
-rw-r--r--Documentation/ioctl/ioctl-number.txt1
-rw-r--r--MAINTAINERS19
-rw-r--r--Makefile5
-rw-r--r--[-rwxr-xr-x]arch/arm/boot/dts/am335x-osd3358-sm-red.dts0
-rw-r--r--arch/arm/boot/dts/am4372.dtsi1
-rw-r--r--arch/arm/boot/dts/imx23-evk.dts90
-rw-r--r--arch/arm/boot/dts/imx28-evk.dts183
-rw-r--r--arch/arm/boot/dts/imx7d.dtsi12
-rw-r--r--arch/arm/boot/dts/omap4-droid4-xt894.dts20
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig1
-rw-r--r--arch/arm/configs/mxs_defconfig1
-rw-r--r--arch/arm/configs/versatile_defconfig14
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c39
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/configs/defconfig3
-rw-r--r--arch/arm64/crypto/ghash-ce-glue.c29
-rw-r--r--arch/arm64/crypto/sm4-ce-glue.c2
-rw-r--r--arch/m68k/mac/misc.c10
-rw-r--r--arch/nios2/Kconfig.debug9
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/riscv/include/asm/tlb.h4
-rw-r--r--arch/riscv/kernel/sys_riscv.c15
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/Makefile23
-rw-r--r--arch/x86/crypto/aesni-intel_asm.S66
-rw-r--r--arch/x86/events/core.c2
-rw-r--r--arch/x86/include/asm/irqflags.h3
-rw-r--r--arch/x86/include/asm/pgtable-3level.h7
-rw-r--r--arch/x86/include/asm/processor.h4
-rw-r--r--arch/x86/include/asm/signal.h7
-rw-r--r--arch/x86/include/asm/stacktrace.h2
-rw-r--r--arch/x86/include/asm/tlbflush.h40
-rw-r--r--arch/x86/include/asm/vgtod.h2
-rw-r--r--arch/x86/kernel/alternative.c9
-rw-r--r--arch/x86/kernel/cpu/bugs.c46
-rw-r--r--arch/x86/kernel/cpu/common.c1
-rw-r--r--arch/x86/kernel/cpu/intel.c3
-rw-r--r--arch/x86/kernel/dumpstack.c20
-rw-r--r--arch/x86/lib/usercopy.c5
-rw-r--r--arch/x86/mm/fault.c2
-rw-r--r--arch/x86/mm/pageattr.c25
-rw-r--r--arch/x86/mm/pti.c2
-rw-r--r--arch/x86/mm/tlb.c7
-rw-r--r--arch/x86/platform/efi/efi_32.c8
-rw-r--r--arch/x86/xen/mmu_pv.c9
-rw-r--r--block/blk-wbt.c89
-rw-r--r--block/bsg.c8
-rw-r--r--block/elevator.c3
-rw-r--r--drivers/ata/pata_ftide010.c27
-rw-r--r--drivers/base/power/clock_ops.c2
-rw-r--r--drivers/block/xen-blkback/blkback.c99
-rw-r--r--drivers/block/xen-blkback/common.h14
-rw-r--r--drivers/block/xen-blkfront.c110
-rw-r--r--drivers/bluetooth/Kconfig1
-rw-r--r--drivers/bluetooth/btmtkuart.c8
-rw-r--r--drivers/bus/ti-sysc.c37
-rw-r--r--drivers/cdrom/cdrom.c2
-rw-r--r--drivers/clk/clk-npcm7xx.c4
-rw-r--r--drivers/clk/x86/clk-st.c2
-rw-r--r--drivers/cpuidle/governors/menu.c13
-rw-r--r--drivers/crypto/caam/caamalg_qi.c6
-rw-r--r--drivers/crypto/caam/caampkc.c20
-rw-r--r--drivers/crypto/caam/jr.c3
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_dev.h3
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_lib.c1
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_reqmgr.c57
-rw-r--r--drivers/crypto/chelsio/chtls/chtls.h5
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_main.c7
-rw-r--r--drivers/crypto/vmx/aes_cbc.c30
-rw-r--r--drivers/crypto/vmx/aes_xts.c21
-rw-r--r--drivers/dma-buf/Kconfig9
-rw-r--r--drivers/dma-buf/Makefile1
-rw-r--r--drivers/dma-buf/dma-buf.c1
-rw-r--r--drivers/dma-buf/udmabuf.c293
-rw-r--r--drivers/gpu/drm/Makefile3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c36
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c10
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h7
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c20
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c2
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c7
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c1
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c100
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h1
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c92
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c5
-rw-r--r--drivers/gpu/drm/bochs/bochs.h21
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c64
-rw-r--r--drivers/gpu/drm/bochs/bochs_fbdev.c79
-rw-r--r--drivers/gpu/drm/bochs/bochs_hw.c2
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c7
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c76
-rw-r--r--drivers/gpu/drm/bridge/Kconfig18
-rw-r--r--drivers/gpu/drm/bridge/Makefile2
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c4
-rw-r--r--drivers/gpu/drm/bridge/synopsys/Makefile2
-rw-r--r--drivers/gpu/drm/bridge/tc358764.c499
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c779
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c27
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c51
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c4
-rw-r--r--drivers/gpu/drm/drm_atomic.c1506
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c35
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c1393
-rw-r--r--drivers/gpu/drm/drm_blend.c145
-rw-r--r--drivers/gpu/drm/drm_bridge.c4
-rw-r--r--drivers/gpu/drm/drm_bufs.c32
-rw-r--r--drivers/gpu/drm/drm_client.c2
-rw-r--r--drivers/gpu/drm/drm_color_mgmt.c4
-rw-r--r--drivers/gpu/drm/drm_connector.c8
-rw-r--r--drivers/gpu/drm/drm_context.c16
-rw-r--r--drivers/gpu/drm/drm_crtc.c9
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c1
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h19
-rw-r--r--drivers/gpu/drm/drm_debugfs_crc.c92
-rw-r--r--drivers/gpu/drm/drm_dp_cec.c18
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c20
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c1
-rw-r--r--drivers/gpu/drm/drm_drv.c3
-rw-r--r--drivers/gpu/drm/drm_encoder.c2
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c26
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c26
-rw-r--r--drivers/gpu/drm/drm_fourcc.c37
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c60
-rw-r--r--drivers/gpu/drm/drm_gem.c6
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c4
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c1
-rw-r--r--drivers/gpu/drm/drm_internal.h5
-rw-r--r--drivers/gpu/drm/drm_ioctl.c6
-rw-r--r--drivers/gpu/drm/drm_irq.c4
-rw-r--r--drivers/gpu/drm/drm_lease.c8
-rw-r--r--drivers/gpu/drm/drm_lock.c4
-rw-r--r--drivers/gpu/drm/drm_mode_config.c3
-rw-r--r--drivers/gpu/drm/drm_mode_object.c4
-rw-r--r--drivers/gpu/drm/drm_panel.c2
-rw-r--r--drivers/gpu/drm/drm_pci.c4
-rw-r--r--drivers/gpu/drm/drm_plane.c23
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c1
-rw-r--r--drivers/gpu/drm/drm_prime.c4
-rw-r--r--drivers/gpu/drm/drm_property.c8
-rw-r--r--drivers/gpu/drm/drm_scatter.c8
-rw-r--r--drivers/gpu/drm/drm_syncobj.c102
-rw-r--r--drivers/gpu/drm/drm_vblank.c10
-rw-r--r--drivers/gpu/drm/drm_vma_manager.c3
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h1
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug12
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c12
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c11
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c9
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c9
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c34
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c13
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/page_track.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/reg.h18
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c64
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c88
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c200
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h64
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c65
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_clflush.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c232
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.h26
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c45
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c52
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h23
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.h10
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c36
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c16
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c55
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h727
-rw-r--r--drivers/gpu/drm/i915/i915_request.c9
-rw-r--r--drivers/gpu/drm/i915/i915_request.h39
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c9
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h10
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c6
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c3
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c6
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c33
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c240
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c210
-rw-r--r--drivers/gpu/drm/i915/intel_display.h32
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c542
-rw-r--r--drivers/gpu/drm/i915/intel_dp_link_training.c25
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c20
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c62
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.h1
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h52
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c93
-rw-r--r--drivers/gpu/drm/i915/intel_guc.c102
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h12
-rw-r--r--drivers/gpu/drm/i915/intel_guc_ads.c2
-rw-r--r--drivers/gpu/drm/i915/intel_guc_ct.c7
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fwif.h1
-rw-r--r--drivers/gpu/drm/i915/intel_guc_log.c2
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.c35
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.h4
-rw-r--r--drivers/gpu/drm/i915/intel_hangcheck.c2
-rw-r--r--drivers/gpu/drm/i915/intel_hdcp.c6
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c18
-rw-r--r--drivers/gpu/drm/i915/intel_huc.c2
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c16
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c159
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h2
-rw-r--r--drivers/gpu/drm/i915/intel_lrc_reg.h2
-rw-r--r--drivers/gpu/drm/i915/intel_lspcon.c2
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.c11
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.h2
-rw-r--r--drivers/gpu/drm/i915/intel_pipe_crc.c119
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c101
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c277
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c110
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h41
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c1003
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c13
-rw-r--r--drivers/gpu/drm/i915/intel_uc_fw.c2
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c130
-rw-r--r--drivers/gpu/drm/i915/intel_wopcm.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_pages.c11
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c221
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_coherency.c38
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_object.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_sw_fence.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_guc.c38
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_hangcheck.c101
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_context.c11
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c2
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c11
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c92
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c47
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.h3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp.c18
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c27
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c21
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c2
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c2
-rw-r--r--drivers/gpu/drm/msm/msm_fence.c8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c1
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c139
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-dvi.c175
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-hdmi.c277
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-opa362.c132
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c187
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c197
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dpi.c88
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c247
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c103
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c146
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c101
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c165
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c102
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c192
-rw-r--r--drivers/gpu/drm/omapdrm/dss/base.c217
-rw-r--r--drivers/gpu/drm/omapdrm/dss/core.c26
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c26
-rw-r--r--drivers/gpu/drm/omapdrm/dss/display.c134
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c192
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c569
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss-of.c47
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c45
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.h11
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi.h8
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c351
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c334
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5_core.c6
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_wp.c8
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss.h306
-rw-r--r--drivers/gpu/drm/omapdrm/dss/output.c208
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c149
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c291
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c371
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.h7
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c111
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.h6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c302
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h19
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c159
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.h6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.c4
-rw-r--r--drivers/gpu/drm/panel/panel-lvds.c6
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c117
-rw-r--r--drivers/gpu/drm/qxl/qxl_draw.c6
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c28
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h32
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c197
-rw-r--r--drivers/gpu/drm/qxl/qxl_gem.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c80
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c23
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c330
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.h9
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c44
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.h18
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c6
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.h6
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.c6
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.h6
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c87
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.h6
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c12
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.h6
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_regs.h13
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.c14
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.h6
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c6
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds_regs.h5
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig25
-rw-r--r--drivers/gpu/drm/rockchip/Makefile1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c104
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c48
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.c173
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.h33
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c215
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.h99
-rw-r--r--drivers/gpu/drm/shmobile/Kconfig1
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_backlight.c6
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_backlight.h6
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c6
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.h6
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c6
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.h6
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_kms.c6
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_kms.h6
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_plane.c6
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_plane.h6
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_regs.h6
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c1
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c81
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.h3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c19
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_layer.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c203
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h27
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c17
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h2
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c12
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c24
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_tcon_top.c3
-rw-r--r--drivers/gpu/drm/tegra/drm.c4
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-core.c6
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c6
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c20
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c5
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c2
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c13
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c7
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drm_bus.c26
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h21
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fb.c9
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ttm.c39
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c75
-rw-r--r--drivers/gpu/drm/vkms/Makefile2
-rw-r--r--drivers/gpu/drm/vkms/vkms_crc.c261
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c114
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c14
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h75
-rw-r--r--drivers/gpu/drm/vkms/vkms_gem.c83
-rw-r--r--drivers/gpu/drm/vkms/vkms_output.c19
-rw-r--r--drivers/gpu/drm/vkms/vkms_plane.c175
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c4
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_gem.c2
-rw-r--r--drivers/hwmon/adt7475.c25
-rw-r--r--drivers/hwmon/ina2xx.c13
-rw-r--r--drivers/hwmon/nct6775.c2
-rw-r--r--drivers/i2c/algos/i2c-algo-bit.c55
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c1
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c7
-rw-r--r--drivers/i2c/busses/i2c-i801.c9
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c15
-rw-r--r--drivers/i2c/i2c-core-base.c11
-rw-r--r--drivers/mmc/core/queue.c12
-rw-r--r--drivers/mmc/core/queue.h1
-rw-r--r--drivers/mmc/host/android-goldfish.c4
-rw-r--r--drivers/mmc/host/atmel-mci.c12
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c10
-rw-r--r--drivers/mtd/nand/raw/denali.c5
-rw-r--r--drivers/mtd/nand/raw/docg4.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c6
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c36
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c108
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h6
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c15
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h25
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c30
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c29
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c52
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c115
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h16
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c7
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c36
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c31
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c20
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c187
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h27
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c23
-rw-r--r--drivers/net/ethernet/renesas/ravb.h5
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c5
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c13
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c5
-rw-r--r--drivers/net/hyperv/netvsc_drv.c5
-rw-r--r--drivers/net/usb/r8152.c4
-rw-r--r--drivers/nvme/host/pci.c8
-rw-r--r--drivers/nvme/target/core.c4
-rw-r--r--drivers/nvme/target/fcloop.c3
-rw-r--r--drivers/of/base.c47
-rw-r--r--drivers/staging/vboxvideo/vbox_fb.c3
-rw-r--r--drivers/thermal/of-thermal.c7
-rw-r--r--drivers/thermal/qoriq_thermal.c27
-rw-r--r--drivers/thermal/rcar_gen3_thermal.c11
-rw-r--r--drivers/thermal/rcar_thermal.c16
-rw-r--r--drivers/tty/vt/vt.c12
-rw-r--r--drivers/vhost/vhost.c2
-rw-r--r--drivers/video/fbdev/core/fbcon.c4
-rw-r--r--drivers/video/fbdev/core/fbmem.c67
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c9
-rw-r--r--fs/buffer.c1
-rw-r--r--fs/isofs/inode.c7
-rw-r--r--fs/notify/mark.c6
-rw-r--r--fs/quota/quota.c14
-rw-r--r--fs/udf/super.c93
-rw-r--r--include/drm/drmP.h3
-rw-r--r--include/drm/drm_atomic.h23
-rw-r--r--include/drm/drm_atomic_helper.h3
-rw-r--r--include/drm/drm_atomic_uapi.h58
-rw-r--r--include/drm/drm_blend.h6
-rw-r--r--include/drm/drm_color_mgmt.h1
-rw-r--r--include/drm/drm_connector.h1
-rw-r--r--include/drm/drm_crtc.h41
-rw-r--r--include/drm/drm_device.h10
-rw-r--r--include/drm/drm_dp_helper.h11
-rw-r--r--include/drm/drm_drv.h9
-rw-r--r--include/drm/drm_encoder.h1
-rw-r--r--include/drm/drm_fb_cma_helper.h1
-rw-r--r--include/drm/drm_fb_helper.h34
-rw-r--r--include/drm/drm_fourcc.h22
-rw-r--r--include/drm/drm_mode_config.h15
-rw-r--r--include/drm/drm_panel.h1
-rw-r--r--include/drm/drm_plane.h19
-rw-r--r--include/drm/drm_print.h2
-rw-r--r--include/drm/drm_property.h2
-rw-r--r--include/drm/drm_syncobj.h9
-rw-r--r--include/drm/drm_util.h32
-rw-r--r--include/drm/i915_pciids.h1
-rw-r--r--include/linux/arm-smccc.h38
-rw-r--r--include/linux/console_struct.h1
-rw-r--r--include/linux/fb.h11
-rw-r--r--include/linux/i2c.h2
-rw-r--r--include/linux/of.h33
-rw-r--r--include/linux/platform_data/ina2xx.h2
-rw-r--r--include/linux/platform_data/shmob_drm.h6
-rw-r--r--include/linux/quota.h8
-rw-r--r--include/linux/vt_kern.h7
-rw-r--r--include/net/act_api.h7
-rw-r--r--include/net/pkt_cls.h25
-rw-r--r--include/uapi/drm/drm_fourcc.h39
-rw-r--r--include/uapi/drm/drm_mode.h3
-rw-r--r--include/uapi/drm/i915_drm.h22
-rw-r--r--include/uapi/linux/udmabuf.h33
-rw-r--r--kernel/bpf/hashtab.c23
-rw-r--r--kernel/bpf/sockmap.c11
-rw-r--r--kernel/cpu.c26
-rw-r--r--kernel/printk/printk.c1
-rw-r--r--kernel/watchdog.c4
-rw-r--r--kernel/watchdog_hld.c2
-rw-r--r--kernel/workqueue.c2
-rw-r--r--lib/percpu_counter.c1
-rw-r--r--lib/rhashtable.c1
-rw-r--r--mm/page-writeback.c1
-rw-r--r--mm/page_alloc.c1
-rw-r--r--mm/slub.c1
-rw-r--r--net/core/dev.c1
-rw-r--r--net/dsa/slave.c4
-rw-r--r--net/ipv4/tcp_bbr.c42
-rw-r--r--net/ipv4/tcp_ipv4.c6
-rw-r--r--net/ipv6/addrconf.c6
-rw-r--r--net/ipv6/ip6_fib.c2
-rw-r--r--net/ipv6/ip6_vti.c3
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/ncsi/ncsi-netlink.c4
-rw-r--r--net/rds/tcp.c1
-rw-r--r--net/sched/act_api.c70
-rw-r--r--net/sched/act_bpf.c8
-rw-r--r--net/sched/act_connmark.c8
-rw-r--r--net/sched/act_csum.c8
-rw-r--r--net/sched/act_gact.c8
-rw-r--r--net/sched/act_ife.c92
-rw-r--r--net/sched/act_ipt.c16
-rw-r--r--net/sched/act_mirred.c8
-rw-r--r--net/sched/act_nat.c8
-rw-r--r--net/sched/act_pedit.c8
-rw-r--r--net/sched/act_police.c8
-rw-r--r--net/sched/act_sample.c8
-rw-r--r--net/sched/act_simple.c8
-rw-r--r--net/sched/act_skbedit.c8
-rw-r--r--net/sched/act_skbmod.c8
-rw-r--r--net/sched/act_tunnel_key.c8
-rw-r--r--net/sched/act_vlan.c8
-rw-r--r--net/sched/cls_u32.c10
-rw-r--r--net/sched/sch_cake.c24
-rw-r--r--net/tls/tls_main.c9
-rw-r--r--net/xdp/xdp_umem.c4
-rw-r--r--scripts/Kbuild.include4
-rw-r--r--scripts/Makefile.build2
-rw-r--r--tools/bpf/bpftool/map_perf_ring.c5
-rw-r--r--tools/testing/selftests/drivers/dma-buf/Makefile5
-rw-r--r--tools/testing/selftests/drivers/dma-buf/udmabuf.c96
582 files changed, 15455 insertions, 10452 deletions
diff --git a/Documentation/ABI/stable/sysfs-bus-xen-backend b/Documentation/ABI/stable/sysfs-bus-xen-backend
index 3d5951c8bf5f..e8b60bd766f7 100644
--- a/Documentation/ABI/stable/sysfs-bus-xen-backend
+++ b/Documentation/ABI/stable/sysfs-bus-xen-backend
@@ -73,3 +73,12 @@ KernelVersion: 3.0
Contact: Konrad Rzeszutek Wilk <[email protected]>
Description:
Number of sectors written by the frontend.
+
+What: /sys/bus/xen-backend/devices/*/state
+Date: August 2018
+KernelVersion: 4.19
+Contact: Joe Jin <[email protected]>
+Description:
+ The state of the device. One of: 'Unknown',
+ 'Initialising', 'Initialised', 'Connected', 'Closing',
+ 'Closed', 'Reconfiguring', 'Reconfigured'.
diff --git a/Documentation/ABI/testing/sysfs-driver-xen-blkback b/Documentation/ABI/testing/sysfs-driver-xen-blkback
index 8bb43b66eb55..4e7babb3ba1f 100644
--- a/Documentation/ABI/testing/sysfs-driver-xen-blkback
+++ b/Documentation/ABI/testing/sysfs-driver-xen-blkback
@@ -15,3 +15,13 @@ Description:
blkback. If the frontend tries to use more than
max_persistent_grants, the LRU kicks in and starts
removing 5% of max_persistent_grants every 100ms.
+
+What: /sys/module/xen_blkback/parameters/persistent_grant_unused_seconds
+Date: August 2018
+KernelVersion: 4.19
+Contact: Roger Pau Monné <[email protected]>
+Description:
+ How long a persistent grant is allowed to remain
+ allocated without being in use. The time is in
+ seconds, 0 means indefinitely long.
+ The default is 60 seconds.
diff --git a/Documentation/arm64/sve.txt b/Documentation/arm64/sve.txt
index f128f736b4a5..7169a0ec41d8 100644
--- a/Documentation/arm64/sve.txt
+++ b/Documentation/arm64/sve.txt
@@ -200,7 +200,7 @@ prctl(PR_SVE_SET_VL, unsigned long arg)
thread.
* Changing the vector length causes all of P0..P15, FFR and all bits of
- Z0..V31 except for Z0 bits [127:0] .. Z31 bits [127:0] to become
+ Z0..Z31 except for Z0 bits [127:0] .. Z31 bits [127:0] to become
unspecified. Calling PR_SVE_SET_VL with vl equal to the thread's current
vector length, or calling PR_SVE_SET_VL with the PR_SVE_SET_VL_ONEXEC
flag, does not constitute a change to the vector length for this purpose.
@@ -500,7 +500,7 @@ References
[2] arch/arm64/include/uapi/asm/ptrace.h
AArch64 Linux ptrace ABI definitions
-[3] linux/Documentation/arm64/cpu-feature-registers.txt
+[3] Documentation/arm64/cpu-feature-registers.txt
[4] ARM IHI0055C
http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055c/IHI0055C_beta_aapcs64.pdf
diff --git a/Documentation/devicetree/bindings/display/atmel/hlcdc-dc.txt b/Documentation/devicetree/bindings/display/atmel/hlcdc-dc.txt
index 82f2acb3d374..0398aec488ac 100644
--- a/Documentation/devicetree/bindings/display/atmel/hlcdc-dc.txt
+++ b/Documentation/devicetree/bindings/display/atmel/hlcdc-dc.txt
@@ -15,6 +15,13 @@ Required children nodes:
to external devices using the OF graph reprensentation (see ../graph.txt).
At least one port node is required.
+Optional properties in grandchild nodes:
+ Any endpoint grandchild node may specify a desired video interface
+ according to ../../media/video-interfaces.txt, specifically
+ - bus-width: recognized values are <12>, <16>, <18> and <24>, and
+ override any output mode selection heuristic, forcing "rgb444",
+ "rgb565", "rgb666" and "rgb888" respectively.
+
Example:
hlcdc: hlcdc@f0030000 {
@@ -50,3 +57,19 @@ Example:
#pwm-cells = <3>;
};
};
+
+Example 2: With a video interface override to force rgb565; as above
+but with these changes/additions:
+
+ &hlcdc {
+ hlcdc-display-controller {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lcd_base &pinctrl_lcd_rgb565>;
+
+ port@0 {
+ hlcdc_panel_output: endpoint@0 {
+ bus-width = <16>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/bridge/lvds-transmitter.txt b/Documentation/devicetree/bindings/display/bridge/lvds-transmitter.txt
index fd39ad34c383..50220190c203 100644
--- a/Documentation/devicetree/bindings/display/bridge/lvds-transmitter.txt
+++ b/Documentation/devicetree/bindings/display/bridge/lvds-transmitter.txt
@@ -22,7 +22,13 @@ among others.
Required properties:
-- compatible: Must be "lvds-encoder"
+- compatible: Must be one or more of the following
+ - "ti,ds90c185" for the TI DS90C185 FPD-Link Serializer
+ - "lvds-encoder" for a generic LVDS encoder device
+
+ When compatible with the generic version, nodes must list the
+ device-specific version corresponding to the device first
+ followed by the generic version.
Required nodes:
diff --git a/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt b/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt
index 4f0ab3ed3b6f..5a4e379bb414 100644
--- a/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt
+++ b/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt
@@ -14,6 +14,7 @@ Required properties:
- "renesas,r8a7795-lvds" for R8A7795 (R-Car H3) compatible LVDS encoders
- "renesas,r8a7796-lvds" for R8A7796 (R-Car M3-W) compatible LVDS encoders
- "renesas,r8a77970-lvds" for R8A77970 (R-Car V3M) compatible LVDS encoders
+ - "renesas,r8a77980-lvds" for R8A77980 (R-Car V3H) compatible LVDS encoders
- "renesas,r8a77995-lvds" for R8A77995 (R-Car D3) compatible LVDS encoders
- reg: Base address and length for the memory-mapped registers
diff --git a/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi86.txt b/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi86.txt
new file mode 100644
index 000000000000..0a3fbb53a16e
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi86.txt
@@ -0,0 +1,87 @@
+SN65DSI86 DSI to eDP bridge chip
+--------------------------------
+
+This is the binding for Texas Instruments SN65DSI86 bridge.
+http://www.ti.com/general/docs/lit/getliterature.tsp?genericPartNumber=sn65dsi86&fileType=pdf
+
+Required properties:
+- compatible: Must be "ti,sn65dsi86"
+- reg: i2c address of the chip, 0x2d as per datasheet
+- enable-gpios: gpio specification for bridge_en pin (active high)
+
+- vccio-supply: A 1.8V supply that powers up the digital IOs.
+- vpll-supply: A 1.8V supply that powers up the displayport PLL.
+- vcca-supply: A 1.2V supply that powers up the analog circuits.
+- vcc-supply: A 1.2V supply that powers up the digital core.
+
+Optional properties:
+- interrupts-extended: Specifier for the SN65DSI86 interrupt line.
+
+- gpio-controller: Marks the device has a GPIO controller.
+- #gpio-cells : Should be two. The first cell is the pin number and
+ the second cell is used to specify flags.
+ See ../../gpio/gpio.txt for more information.
+- #pwm-cells : Should be one. See ../../pwm/pwm.txt for description of
+ the cell formats.
+
+- clock-names: should be "refclk"
+- clocks: Specification for input reference clock. The reference
+ clock rate must be 12 MHz, 19.2 MHz, 26 MHz, 27 MHz or 38.4 MHz.
+
+- data-lanes: See ../../media/video-interface.txt
+- lane-polarities: See ../../media/video-interface.txt
+
+- suspend-gpios: specification for GPIO1 pin on bridge (active low)
+
+Required nodes:
+This device has two video ports. Their connections are modelled using the
+OF graph bindings specified in Documentation/devicetree/bindings/graph.txt.
+
+- Video port 0 for DSI input
+- Video port 1 for eDP output
+
+Example
+-------
+
+edp-bridge@2d {
+ compatible = "ti,sn65dsi86";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x2d>;
+
+ enable-gpios = <&msmgpio 33 GPIO_ACTIVE_HIGH>;
+ suspend-gpios = <&msmgpio 34 GPIO_ACTIVE_LOW>;
+
+ interrupts-extended = <&gpio3 4 IRQ_TYPE_EDGE_FALLING>;
+
+ vccio-supply = <&pm8916_l17>;
+ vcca-supply = <&pm8916_l6>;
+ vpll-supply = <&pm8916_l17>;
+ vcc-supply = <&pm8916_l6>;
+
+ clock-names = "refclk";
+ clocks = <&input_refclk>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ edp_bridge_in: endpoint {
+ remote-endpoint = <&dsi_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ edp_bridge_out: endpoint {
+ data-lanes = <2 1 3 0>;
+ lane-polarities = <0 1 0 1>;
+ remote-endpoint = <&edp_panel_in>;
+ };
+ };
+ };
+}
diff --git a/Documentation/devicetree/bindings/display/bridge/toshiba,tc358764.txt b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358764.txt
new file mode 100644
index 000000000000..8f9abf28a8fa
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358764.txt
@@ -0,0 +1,35 @@
+TC358764 MIPI-DSI to LVDS panel bridge
+
+Required properties:
+ - compatible: "toshiba,tc358764"
+ - reg: the virtual channel number of a DSI peripheral
+ - vddc-supply: core voltage supply, 1.2V
+ - vddio-supply: I/O voltage supply, 1.8V or 3.3V
+ - vddlvds-supply: LVDS1/2 voltage supply, 3.3V
+ - reset-gpios: a GPIO spec for the reset pin
+
+The device node can contain following 'port' child nodes,
+according to the OF graph bindings defined in [1]:
+ 0: DSI Input, not required, if the bridge is DSI controlled
+ 1: LVDS Output, mandatory
+
+[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
+
+Example:
+
+ bridge@0 {
+ reg = <0>;
+ compatible = "toshiba,tc358764";
+ vddc-supply = <&vcc_1v2_reg>;
+ vddio-supply = <&vcc_1v8_reg>;
+ vddlvds-supply = <&vcc_3v3_reg>;
+ reset-gpios = <&gpd1 6 GPIO_ACTIVE_LOW>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@1 {
+ reg = <1>;
+ lvds_ep: endpoint {
+ remote-endpoint = <&panel_ep>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/mipi-dsi-bus.txt b/Documentation/devicetree/bindings/display/mipi-dsi-bus.txt
index 973c27273772..a336599f6c03 100644
--- a/Documentation/devicetree/bindings/display/mipi-dsi-bus.txt
+++ b/Documentation/devicetree/bindings/display/mipi-dsi-bus.txt
@@ -16,7 +16,7 @@ The following assumes that only a single peripheral is connected to a DSI
host. Experience shows that this is true for the large majority of setups.
DSI host
---------
+========
In addition to the standard properties and those defined by the parent bus of
a DSI host, the following properties apply to a node representing a DSI host.
@@ -29,12 +29,24 @@ Required properties:
- #size-cells: Should be 0. There are cases where it makes sense to use a
different value here. See below.
+Optional properties:
+- clock-master: boolean. Should be enabled if the host is being used in
+ conjunction with another DSI host to drive the same peripheral. Hardware
+ supporting such a configuration generally requires the data on both the busses
+ to be driven by the same clock. Only the DSI host instance controlling this
+ clock should contain this property.
+
DSI peripheral
---------------
+==============
+
+Peripherals with DSI as control bus, or no control bus
+------------------------------------------------------
-Peripherals are represented as child nodes of the DSI host's node. Properties
-described here apply to all DSI peripherals, but individual bindings may want
-to define additional, device-specific properties.
+Peripherals with the DSI bus as the primary control bus, or peripherals with
+no control bus but use the DSI bus to transmit pixel data are represented
+as child nodes of the DSI host's node. Properties described here apply to all
+DSI peripherals, but individual bindings may want to define additional,
+device-specific properties.
Required properties:
- reg: The virtual channel number of a DSI peripheral. Must be in the range
@@ -49,9 +61,37 @@ case two alternative representations can be chosen:
property is the number of the first virtual channel and the second cell is
the number of consecutive virtual channels.
-Example
--------
-
+Peripherals with a different control bus
+----------------------------------------
+
+There are peripherals that have I2C/SPI (or some other non-DSI bus) as the
+primary control bus, but are also connected to a DSI bus (mostly for the data
+path). Connections between such peripherals and a DSI host can be represented
+using the graph bindings [1], [2].
+
+Peripherals that support dual channel DSI
+-----------------------------------------
+
+Peripherals with higher bandwidth requirements can be connected to 2 DSI
+busses. Each DSI bus/channel drives some portion of the pixel data (generally
+left/right half of each line of the display, or even/odd lines of the display).
+The graph bindings should be used to represent the multiple DSI busses that are
+connected to this peripheral. Each DSI host's output endpoint can be linked to
+an input endpoint of the DSI peripheral.
+
+[1] Documentation/devicetree/bindings/graph.txt
+[2] Documentation/devicetree/bindings/media/video-interfaces.txt
+
+Examples
+========
+- (1), (2) and (3) are examples of a DSI host and peripheral on the DSI bus
+ with different virtual channel configurations.
+- (4) is an example of a peripheral on a I2C control bus connected to a
+ DSI host using of-graph bindings.
+- (5) is an example of 2 DSI hosts driving a dual-channel DSI peripheral,
+ which uses I2C as its primary control bus.
+
+1)
dsi-host {
...
@@ -67,6 +107,7 @@ Example
...
};
+2)
dsi-host {
...
@@ -82,6 +123,7 @@ Example
...
};
+3)
dsi-host {
...
@@ -96,3 +138,98 @@ Example
...
};
+
+4)
+ i2c-host {
+ ...
+
+ dsi-bridge@35 {
+ compatible = "...";
+ reg = <0x35>;
+
+ ports {
+ ...
+
+ port {
+ bridge_mipi_in: endpoint {
+ remote-endpoint = <&host_mipi_out>;
+ };
+ };
+ };
+ };
+ };
+
+ dsi-host {
+ ...
+
+ ports {
+ ...
+
+ port {
+ host_mipi_out: endpoint {
+ remote-endpoint = <&bridge_mipi_in>;
+ };
+ };
+ };
+ };
+
+5)
+ i2c-host {
+ dsi-bridge@35 {
+ compatible = "...";
+ reg = <0x35>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ dsi0_in: endpoint {
+ remote-endpoint = <&dsi0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ dsi1_in: endpoint {
+ remote-endpoint = <&dsi1_out>;
+ };
+ };
+ };
+ };
+ };
+
+ dsi0-host {
+ ...
+
+ /*
+ * this DSI instance drives the clock for both the host
+ * controllers
+ */
+ clock-master;
+
+ ports {
+ ...
+
+ port {
+ dsi0_out: endpoint {
+ remote-endpoint = <&dsi0_in>;
+ };
+ };
+ };
+ };
+
+ dsi1-host {
+ ...
+
+ ports {
+ ...
+
+ port {
+ dsi1_out: endpoint {
+ remote-endpoint = <&dsi1_in>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/renesas,du.txt b/Documentation/devicetree/bindings/display/renesas,du.txt
index ec9d34be2ff7..caae2348a292 100644
--- a/Documentation/devicetree/bindings/display/renesas,du.txt
+++ b/Documentation/devicetree/bindings/display/renesas,du.txt
@@ -15,6 +15,7 @@ Required Properties:
- "renesas,du-r8a7796" for R8A7796 (R-Car M3-W) compatible DU
- "renesas,du-r8a77965" for R8A77965 (R-Car M3-N) compatible DU
- "renesas,du-r8a77970" for R8A77970 (R-Car V3M) compatible DU
+ - "renesas,du-r8a77980" for R8A77980 (R-Car V3H) compatible DU
- "renesas,du-r8a77995" for R8A77995 (R-Car D3) compatible DU
- reg: the memory-mapped I/O registers base address and length
@@ -61,6 +62,7 @@ corresponding to each DU output.
R8A7796 (R-Car M3-W) DPAD 0 HDMI 0 LVDS 0 -
R8A77965 (R-Car M3-N) DPAD 0 HDMI 0 LVDS 0 -
R8A77970 (R-Car V3M) DPAD 0 LVDS 0 - -
+ R8A77980 (R-Car V3H) DPAD 0 LVDS 0 - -
R8A77995 (R-Car D3) DPAD 0 LVDS 0 LVDS 1 -
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt
index eeda3597011e..b79e5769f0ae 100644
--- a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt
+++ b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt
@@ -8,6 +8,9 @@ Required properties:
- compatible: value should be one of the following
"rockchip,rk3036-vop";
"rockchip,rk3126-vop";
+ "rockchip,px30-vop-lit";
+ "rockchip,px30-vop-big";
+ "rockchip,rk3188-vop";
"rockchip,rk3288-vop";
"rockchip,rk3368-vop";
"rockchip,rk3366-vop";
diff --git a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
index f8773ecb7525..22d6dda587c5 100644
--- a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
+++ b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
@@ -78,6 +78,7 @@ Required properties:
- compatible: value must be one of:
* "allwinner,sun8i-a83t-dw-hdmi"
+ * "allwinner,sun50i-a64-dw-hdmi", "allwinner,sun8i-a83t-dw-hdmi"
- reg: base address and size of memory-mapped region
- reg-io-width: See dw_hdmi.txt. Shall be 1.
- interrupts: HDMI interrupt number
@@ -96,6 +97,9 @@ Required properties:
first port should be the input endpoint. The second should be the
output, usually to an HDMI connector.
+Optional properties:
+ - hvcc-supply: the VCC power supply of the controller
+
DWC HDMI PHY
------------
@@ -103,6 +107,7 @@ Required properties:
- compatible: value must be one of:
* allwinner,sun8i-a83t-hdmi-phy
* allwinner,sun8i-h3-hdmi-phy
+ * allwinner,sun8i-r40-hdmi-phy
* allwinner,sun50i-a64-hdmi-phy
- reg: base address and size of memory-mapped region
- clocks: phandles to the clocks feeding the HDMI PHY
@@ -112,9 +117,9 @@ Required properties:
- resets: phandle to the reset controller driving the PHY
- reset-names: must be "phy"
-H3 and A64 HDMI PHY require additional clocks:
+H3, A64 and R40 HDMI PHY require additional clocks:
- pll-0: parent of phy clock
- - pll-1: second possible phy clock parent (A64 only)
+ - pll-1: second possible phy clock parent (A64/R40 only)
TV Encoder
----------
@@ -151,6 +156,8 @@ Required properties:
* allwinner,sun8i-v3s-tcon
* allwinner,sun9i-a80-tcon-lcd
* allwinner,sun9i-a80-tcon-tv
+ * "allwinner,sun50i-a64-tcon-lcd", "allwinner,sun8i-a83t-tcon-lcd"
+ * "allwinner,sun50i-a64-tcon-tv", "allwinner,sun8i-a83t-tcon-tv"
- reg: base address and size of memory-mapped region
- interrupts: interrupt associated to this IP
- clocks: phandles to the clocks feeding the TCON.
@@ -370,6 +377,8 @@ Required properties:
* allwinner,sun8i-a83t-de2-mixer-1
* allwinner,sun8i-h3-de2-mixer-0
* allwinner,sun8i-v3s-de2-mixer
+ * allwinner,sun50i-a64-de2-mixer-0
+ * allwinner,sun50i-a64-de2-mixer-1
- reg: base address and size of the memory-mapped region.
- clocks: phandles to the clocks feeding the mixer
* bus: the mixer interface clock
@@ -403,6 +412,7 @@ Required properties:
* allwinner,sun8i-r40-display-engine
* allwinner,sun8i-v3s-display-engine
* allwinner,sun9i-a80-display-engine
+ * allwinner,sun50i-a64-display-engine
- allwinner,pipelines: list of phandle to the display engine
frontends (DE 1.0) or mixers (DE 2.0) available.
diff --git a/Documentation/devicetree/bindings/interrupt-controller/riscv,cpu-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/riscv,cpu-intc.txt
index b0a8af51c388..265b223cd978 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/riscv,cpu-intc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/riscv,cpu-intc.txt
@@ -11,7 +11,7 @@ The RISC-V supervisor ISA manual specifies three interrupt sources that are
attached to every HLIC: software interrupts, the timer interrupt, and external
interrupts. Software interrupts are used to send IPIs between cores. The
timer interrupt comes from an architecturally mandated real-time timer that is
-controller via Supervisor Binary Interface (SBI) calls and CSR reads. External
+controlled via Supervisor Binary Interface (SBI) calls and CSR reads. External
interrupts connect all other device interrupts to the HLIC, which are routed
via the platform-level interrupt controller (PLIC).
@@ -25,7 +25,15 @@ in the system.
Required properties:
- compatible : "riscv,cpu-intc"
-- #interrupt-cells : should be <1>
+- #interrupt-cells : should be <1>. The interrupt sources are defined by the
+ RISC-V supervisor ISA manual, with only the following three interrupts being
+ defined for supervisor mode:
+ - Source 1 is the supervisor software interrupt, which can be sent by an SBI
+ call and is reserved for use by software.
+ - Source 5 is the supervisor timer interrupt, which can be configured by
+ SBI calls and implements a one-shot timer.
+ - Source 9 is the supervisor external interrupt, which chains to all other
+ device interrupts.
- interrupt-controller : Identifies the node as an interrupt controller
Furthermore, this interrupt-controller MUST be embedded inside the cpu
@@ -38,7 +46,7 @@ An example device tree entry for a HLIC is show below.
...
cpu1-intc: interrupt-controller {
#interrupt-cells = <1>;
- compatible = "riscv,cpu-intc", "sifive,fu540-c000-cpu-intc";
+ compatible = "sifive,fu540-c000-cpu-intc", "riscv,cpu-intc";
interrupt-controller;
};
};
diff --git a/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt b/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt
index 5d47a262474c..9407212a85a8 100644
--- a/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt
@@ -7,6 +7,7 @@ Required properties:
Examples with soctypes are:
- "renesas,r8a7743-wdt" (RZ/G1M)
- "renesas,r8a7745-wdt" (RZ/G1E)
+ - "renesas,r8a774a1-wdt" (RZ/G2M)
- "renesas,r8a7790-wdt" (R-Car H2)
- "renesas,r8a7791-wdt" (R-Car M2-W)
- "renesas,r8a7792-wdt" (R-Car V2H)
@@ -21,8 +22,8 @@ Required properties:
- "renesas,r7s72100-wdt" (RZ/A1)
The generic compatible string must be:
- "renesas,rza-wdt" for RZ/A
- - "renesas,rcar-gen2-wdt" for R-Car Gen2 and RZ/G
- - "renesas,rcar-gen3-wdt" for R-Car Gen3
+ - "renesas,rcar-gen2-wdt" for R-Car Gen2 and RZ/G1
+ - "renesas,rcar-gen3-wdt" for R-Car Gen3 and RZ/G2
- reg : Should contain WDT registers location and length
- clocks : the clock feeding the watchdog timer.
diff --git a/Documentation/gpu/drivers.rst b/Documentation/gpu/drivers.rst
index 65be325bf282..7d2d3875ff1a 100644
--- a/Documentation/gpu/drivers.rst
+++ b/Documentation/gpu/drivers.rst
@@ -13,6 +13,7 @@ GPU Driver Documentation
tve200
v3d
vc4
+ vkms
bridge/dw-hdmi
xen-front
diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst
index 5dee6b8a4c12..4b1501b4835b 100644
--- a/Documentation/gpu/drm-kms.rst
+++ b/Documentation/gpu/drm-kms.rst
@@ -287,8 +287,14 @@ Atomic Mode Setting Function Reference
.. kernel-doc:: drivers/gpu/drm/drm_atomic.c
:export:
-.. kernel-doc:: drivers/gpu/drm/drm_atomic.c
- :internal:
+Atomic Mode Setting IOCTL and UAPI Functions
+--------------------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_atomic_uapi.c
+ :doc: overview
+
+.. kernel-doc:: drivers/gpu/drm/drm_atomic_uapi.c
+ :export:
CRTC Abstraction
================
@@ -323,6 +329,12 @@ Frame Buffer Functions Reference
DRM Format Handling
===================
+.. kernel-doc:: include/uapi/drm/drm_fourcc.h
+ :doc: overview
+
+Format Functions Reference
+--------------------------
+
.. kernel-doc:: include/drm/drm_fourcc.h
:internal:
@@ -560,7 +572,7 @@ Tile Group Property
Explicit Fencing Properties
---------------------------
-.. kernel-doc:: drivers/gpu/drm/drm_atomic.c
+.. kernel-doc:: drivers/gpu/drm/drm_atomic_uapi.c
:doc: explicit fencing properties
Existing KMS Properties
diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst
index d3acb4949e2d..e725e8449e72 100644
--- a/Documentation/gpu/drm-mm.rst
+++ b/Documentation/gpu/drm-mm.rst
@@ -297,7 +297,7 @@ made up of several fields, the more interesting ones being:
struct vm_operations_struct {
void (*open)(struct vm_area_struct * area);
void (*close)(struct vm_area_struct * area);
- int (*fault)(struct vm_fault *vmf);
+ vm_fault_t (*fault)(struct vm_fault *vmf);
};
diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
index a7c150d6b63f..77c2b3c25565 100644
--- a/Documentation/gpu/todo.rst
+++ b/Documentation/gpu/todo.rst
@@ -127,7 +127,8 @@ interfaces to fix these issues:
the acquire context explicitly on stack and then also pass it down into
drivers explicitly so that the legacy-on-atomic functions can use them.
- Except for some driver code this is done.
+ Except for some driver code this is done. This task should be finished by
+ adding WARN_ON(!drm_drv_uses_atomic_modeset) in drm_modeset_lock_all().
* A bunch of the vtable hooks are now in the wrong place: DRM has a split
between core vfunc tables (named ``drm_foo_funcs``), which are used to
@@ -137,13 +138,6 @@ interfaces to fix these issues:
``_helper_funcs`` since they are not part of the core ABI. There's a
``FIXME`` comment in the kerneldoc for each such case in ``drm_crtc.h``.
-* There's a new helper ``drm_atomic_helper_best_encoder()`` which could be
- used by all atomic drivers which don't select the encoder for a given
- connector at runtime. That's almost all of them, and would allow us to get
- rid of a lot of ``best_encoder`` boilerplate in drivers.
-
- This was almost done, but new drivers added a few more cases again.
-
Contact: Daniel Vetter
Get rid of dev->struct_mutex from GEM drivers
@@ -164,9 +158,8 @@ private lock. The tricky part is the BO free functions, since those can't
reliably take that lock any more. Instead state needs to be protected with
suitable subordinate locks or some cleanup work pushed to a worker thread. For
performance-critical drivers it might also be better to go with a more
-fine-grained per-buffer object and per-context lockings scheme. Currently the
-following drivers still use ``struct_mutex``: ``msm``, ``omapdrm`` and
-``udl``.
+fine-grained per-buffer object and per-context lockings scheme. Currently only the
+``msm`` driver still use ``struct_mutex``.
Contact: Daniel Vetter, respective driver maintainers
@@ -190,7 +183,8 @@ Convert drivers to use simple modeset suspend/resume
Most drivers (except i915 and nouveau) that use
drm_atomic_helper_suspend/resume() can probably be converted to use
-drm_mode_config_helper_suspend/resume().
+drm_mode_config_helper_suspend/resume(). Also there's still open-coded version
+of the atomic suspend/resume code in older atomic modeset drivers.
Contact: Maintainer of the driver you plan to convert
@@ -246,20 +240,10 @@ Core refactorings
Clean up the DRM header mess
----------------------------
-Currently the DRM subsystem has only one global header, ``drmP.h``. This is
-used both for functions exported to helper libraries and drivers and functions
-only used internally in the ``drm.ko`` module. The goal would be to move all
-header declarations not needed outside of ``drm.ko`` into
-``drivers/gpu/drm/drm_*_internal.h`` header files. ``EXPORT_SYMBOL`` also
-needs to be dropped for these functions.
-
-This would nicely tie in with the below task to create kerneldoc after the API
-is cleaned up. Or with the "hide legacy cruft better" task.
-
-Note that this is well in progress, but ``drmP.h`` is still huge. The updated
-plan is to switch to per-file driver API headers, which will also structure
-the kerneldoc better. This should also allow more fine-grained ``#include``
-directives.
+The DRM subsystem originally had only one huge global header, ``drmP.h``. This
+is now split up, but many source files still include it. The remaining part of
+the cleanup work here is to replace any ``#include <drm/drmP.h>`` by only the
+headers needed (and fixing up any missing pre-declarations in the headers).
In the end no .c file should need to include ``drmP.h`` anymore.
@@ -278,26 +262,6 @@ See https://dri.freedesktop.org/docs/drm/ for what's there already.
Contact: Daniel Vetter
-Hide legacy cruft better
-------------------------
-
-Way back DRM supported only drivers which shadow-attached to PCI devices with
-userspace or fbdev drivers setting up outputs. Modern DRM drivers take charge
-of the entire device, you can spot them with the DRIVER_MODESET flag.
-
-Unfortunately there's still large piles of legacy code around which needs to
-be hidden so that driver writers don't accidentally end up using it. And to
-prevent security issues in those legacy IOCTLs from being exploited on modern
-drivers. This has multiple possible subtasks:
-
-* Extract support code for legacy features into a ``drm-legacy.ko`` kernel
- module and compile it only when one of the legacy drivers is enabled.
-
-This is mostly done, the only thing left is to split up ``drm_irq.c`` into
-legacy cruft and the parts needed by modern KMS drivers.
-
-Contact: Daniel Vetter
-
Make panic handling work
------------------------
@@ -396,17 +360,12 @@ converting things over. For modeset tests we also first need a bit of
infrastructure to use dumb buffers for untiled buffers, to be able to run all
the non-i915 specific modeset tests.
-Contact: Daniel Vetter
-
-Create a virtual KMS driver for testing (vkms)
-----------------------------------------------
-
-With all the latest helpers it should be fairly simple to create a virtual KMS
-driver useful for testing, or for running X or similar on headless machines
-(to be able to still use the GPU). This would be similar to vgem, but aimed at
-the modeset side.
+Extend virtual test driver (VKMS)
+---------------------------------
-Once the basics are there there's tons of possibilities to extend it.
+See the documentation of :ref:`VKMS <vkms>` for more details. This is an ideal
+internship task, since it only requires a virtual machine and can be sized to
+fit the available time.
Contact: Daniel Vetter
diff --git a/Documentation/gpu/vkms.rst b/Documentation/gpu/vkms.rst
new file mode 100644
index 000000000000..0a6ea6216e41
--- /dev/null
+++ b/Documentation/gpu/vkms.rst
@@ -0,0 +1,24 @@
+.. _vkms:
+
+==========================================
+ drm/vkms Virtual Kernel Modesetting
+==========================================
+
+.. kernel-doc:: drivers/gpu/drm/vkms/vkms_drv.c
+ :doc: vkms (Virtual Kernel Modesetting)
+
+TODO
+====
+
+CRC API
+-------
+
+- Optimize CRC computation ``compute_crc()`` and plane blending ``blend()``
+
+- Use the alpha value to blend vaddr_src with vaddr_dst instead of
+ overwriting it in ``blend()``.
+
+- Add igt test to check cleared alpha value for XRGB plane format.
+
+- Add igt test to check extreme alpha values i.e. fully opaque and fully
+ transparent (intermediate values are affected by hw-specific rounding modes).
diff --git a/Documentation/hwmon/ina2xx b/Documentation/hwmon/ina2xx
index 72d16f08e431..b8df81f6d6bc 100644
--- a/Documentation/hwmon/ina2xx
+++ b/Documentation/hwmon/ina2xx
@@ -32,7 +32,7 @@ Supported chips:
Datasheet: Publicly available at the Texas Instruments website
http://www.ti.com/
-Author: Lothar Felten <[email protected]>
+Author: Lothar Felten <[email protected]>
Description
-----------
diff --git a/Documentation/i2c/DMA-considerations b/Documentation/i2c/DMA-considerations
index 966610aa4620..203002054120 100644
--- a/Documentation/i2c/DMA-considerations
+++ b/Documentation/i2c/DMA-considerations
@@ -50,10 +50,14 @@ bounce buffer. But you don't need to care about that detail, just use the
returned buffer. If NULL is returned, the threshold was not met or a bounce
buffer could not be allocated. Fall back to PIO in that case.
-In any case, a buffer obtained from above needs to be released. It ensures data
-is copied back to the message and a potentially used bounce buffer is freed::
+In any case, a buffer obtained from above needs to be released. Another helper
+function ensures a potentially used bounce buffer is freed::
- i2c_release_dma_safe_msg_buf(msg, dma_buf);
+ i2c_put_dma_safe_msg_buf(dma_buf, msg, xferred);
+
+The last argument 'xferred' controls if the buffer is synced back to the
+message or not. No syncing is needed in cases setting up DMA had an error and
+there was no data transferred.
The bounce buffer handling from the core is generic and simple. It will always
allocate a new bounce buffer. If you want a more sophisticated handling (e.g.
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index 13a7c999c04a..f2ac672eb766 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -272,6 +272,7 @@ Code Seq#(hex) Include File Comments
't' 90-91 linux/toshiba.h toshiba and toshiba_acpi SMM
'u' 00-1F linux/smb_fs.h gone
'u' 20-3F linux/uvcvideo.h USB video class host driver
+'u' 40-4f linux/udmabuf.h userspace dma-buf misc device
'v' 00-1F linux/ext2_fs.h conflict!
'v' 00-1F linux/fs.h conflict!
'v' 00-0F linux/sonypi.h conflict!
diff --git a/MAINTAINERS b/MAINTAINERS
index a5b256b25905..567e03759af9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4870,9 +4870,10 @@ F: Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt
DRM DRIVERS FOR RENESAS
M: Laurent Pinchart <[email protected]>
+M: Kieran Bingham <[email protected]>
-T: git git://linuxtv.org/pinchartl/fbdev
+T: git git://linuxtv.org/pinchartl/media drm/du/next
S: Supported
F: drivers/gpu/drm/rcar-du/
F: drivers/gpu/drm/shmobile/
@@ -8255,9 +8256,9 @@ F: drivers/ata/pata_arasan_cf.c
LIBATA PATA DRIVERS
M: Bartlomiej Zolnierkiewicz <[email protected]>
-M: Jens Axboe <kernel.dk>
+M: Jens Axboe <[email protected]>
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
S: Maintained
F: drivers/ata/pata_*.c
F: drivers/ata/ata_generic.c
@@ -8275,7 +8276,7 @@ LIBATA SATA AHCI PLATFORM devices support
M: Hans de Goede <[email protected]>
M: Jens Axboe <[email protected]>
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
S: Maintained
F: drivers/ata/ahci_platform.c
F: drivers/ata/libahci_platform.c
@@ -8291,7 +8292,7 @@ F: drivers/ata/sata_promise.*
LIBATA SUBSYSTEM (Serial and Parallel ATA drivers)
M: Jens Axboe <[email protected]>
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
S: Maintained
F: drivers/ata/
F: include/linux/ata.h
@@ -15343,6 +15344,14 @@ F: arch/x86/um/
F: fs/hostfs/
F: fs/hppfs/
+USERSPACE DMA BUFFER DRIVER
+M: Gerd Hoffmann <[email protected]>
+S: Maintained
+F: drivers/dma-buf/udmabuf.c
+F: include/uapi/linux/udmabuf.h
+T: git git://anongit.freedesktop.org/drm/drm-misc
+
USERSPACE I/O (UIO)
M: Greg Kroah-Hartman <[email protected]>
S: Maintained
diff --git a/Makefile b/Makefile
index 2b458801ba74..19948e556941 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
NAME = Merciless Moray
# *DOCUMENTATION*
@@ -807,6 +807,9 @@ KBUILD_CFLAGS += $(call cc-option,-Wdeclaration-after-statement,)
# disable pointer signed / unsigned warnings in gcc 4.0
KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign)
+# disable stringop warnings in gcc 8+
+KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation)
+
# disable invalid "can't wrap" optimizations for signed / pointers
KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow)
diff --git a/arch/arm/boot/dts/am335x-osd3358-sm-red.dts b/arch/arm/boot/dts/am335x-osd3358-sm-red.dts
index 4d969013f99a..4d969013f99a 100755..100644
--- a/arch/arm/boot/dts/am335x-osd3358-sm-red.dts
+++ b/arch/arm/boot/dts/am335x-osd3358-sm-red.dts
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index f0cbd86312dc..d4b7c59eec68 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -469,6 +469,7 @@
ti,hwmods = "rtc";
clocks = <&clk_32768_ck>;
clock-names = "int-clk";
+ system-power-controller;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/imx23-evk.dts b/arch/arm/boot/dts/imx23-evk.dts
index 9fb47724b9c1..ad2ae25b7b4d 100644
--- a/arch/arm/boot/dts/imx23-evk.dts
+++ b/arch/arm/boot/dts/imx23-evk.dts
@@ -13,6 +13,43 @@
reg = <0x40000000 0x08000000>;
};
+ reg_vddio_sd0: regulator-vddio-sd0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vddio-sd0";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio1 29 0>;
+ };
+
+ reg_lcd_3v3: regulator-lcd-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "lcd-3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio1 18 0>;
+ enable-active-high;
+ };
+
+ reg_lcd_5v: regulator-lcd-5v {
+ compatible = "regulator-fixed";
+ regulator-name = "lcd-5v";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ panel {
+ compatible = "sii,43wvf1g";
+ backlight = <&backlight_display>;
+ dvdd-supply = <&reg_lcd_3v3>;
+ avdd-supply = <&reg_lcd_5v>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&display_out>;
+ };
+ };
+ };
+
apb@80000000 {
apbh@80000000 {
gpmi-nand@8000c000 {
@@ -52,31 +89,11 @@
lcdif@80030000 {
pinctrl-names = "default";
pinctrl-0 = <&lcdif_24bit_pins_a>;
- lcd-supply = <&reg_lcd_3v3>;
- display = <&display0>;
status = "okay";
- display0: display0 {
- bits-per-pixel = <32>;
- bus-width = <24>;
-
- display-timings {
- native-mode = <&timing0>;
- timing0: timing0 {
- clock-frequency = <9200000>;
- hactive = <480>;
- vactive = <272>;
- hback-porch = <15>;
- hfront-porch = <8>;
- vback-porch = <12>;
- vfront-porch = <4>;
- hsync-len = <1>;
- vsync-len = <1>;
- hsync-active = <0>;
- vsync-active = <0>;
- de-active = <1>;
- pixelclk-active = <0>;
- };
+ port {
+ display_out: endpoint {
+ remote-endpoint = <&panel_in>;
};
};
};
@@ -118,32 +135,7 @@
};
};
- regulators {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <0>;
-
- reg_vddio_sd0: regulator@0 {
- compatible = "regulator-fixed";
- reg = <0>;
- regulator-name = "vddio-sd0";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- gpio = <&gpio1 29 0>;
- };
-
- reg_lcd_3v3: regulator@1 {
- compatible = "regulator-fixed";
- reg = <1>;
- regulator-name = "lcd-3v3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- gpio = <&gpio1 18 0>;
- enable-active-high;
- };
- };
-
- backlight {
+ backlight_display: backlight {
compatible = "pwm-backlight";
pwms = <&pwm 2 5000000>;
brightness-levels = <0 4 8 16 32 64 128 255>;
diff --git a/arch/arm/boot/dts/imx28-evk.dts b/arch/arm/boot/dts/imx28-evk.dts
index 6b0ae667640f..93ab5bdfe068 100644
--- a/arch/arm/boot/dts/imx28-evk.dts
+++ b/arch/arm/boot/dts/imx28-evk.dts
@@ -13,6 +13,87 @@
reg = <0x40000000 0x08000000>;
};
+
+ reg_3p3v: regulator-3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "3P3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ reg_vddio_sd0: regulator-vddio-sd0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vddio-sd0";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio3 28 0>;
+ };
+
+ reg_fec_3v3: regulator-fec-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "fec-3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio2 15 0>;
+ };
+
+ reg_usb0_vbus: regulator-usb0-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "usb0_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio3 9 0>;
+ enable-active-high;
+ };
+
+ reg_usb1_vbus: regulator-usb1-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "usb1_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio3 8 0>;
+ enable-active-high;
+ };
+
+ reg_lcd_3v3: regulator-lcd-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "lcd-3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio3 30 0>;
+ enable-active-high;
+ };
+
+ reg_can_3v3: regulator-can-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "can-3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio2 13 0>;
+ enable-active-high;
+ };
+
+ reg_lcd_5v: regulator-lcd-5v {
+ compatible = "regulator-fixed";
+ regulator-name = "lcd-5v";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ panel {
+ compatible = "sii,43wvf1g";
+ backlight = <&backlight_display>;
+ dvdd-supply = <&reg_lcd_3v3>;
+ avdd-supply = <&reg_lcd_5v>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&display_out>;
+ };
+ };
+ };
+
apb@80000000 {
apbh@80000000 {
gpmi-nand@8000c000 {
@@ -116,31 +197,11 @@
pinctrl-names = "default";
pinctrl-0 = <&lcdif_24bit_pins_a
&lcdif_pins_evk>;
- lcd-supply = <&reg_lcd_3v3>;
- display = <&display0>;
status = "okay";
- display0: display0 {
- bits-per-pixel = <32>;
- bus-width = <24>;
-
- display-timings {
- native-mode = <&timing0>;
- timing0: timing0 {
- clock-frequency = <33500000>;
- hactive = <800>;
- vactive = <480>;
- hback-porch = <89>;
- hfront-porch = <164>;
- vback-porch = <23>;
- vfront-porch = <10>;
- hsync-len = <10>;
- vsync-len = <10>;
- hsync-active = <0>;
- vsync-active = <0>;
- de-active = <1>;
- pixelclk-active = <0>;
- };
+ port {
+ display_out: endpoint {
+ remote-endpoint = <&panel_in>;
};
};
};
@@ -269,80 +330,6 @@
};
};
- regulators {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <0>;
-
- reg_3p3v: regulator@0 {
- compatible = "regulator-fixed";
- reg = <0>;
- regulator-name = "3P3V";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- };
-
- reg_vddio_sd0: regulator@1 {
- compatible = "regulator-fixed";
- reg = <1>;
- regulator-name = "vddio-sd0";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- gpio = <&gpio3 28 0>;
- };
-
- reg_fec_3v3: regulator@2 {
- compatible = "regulator-fixed";
- reg = <2>;
- regulator-name = "fec-3v3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- gpio = <&gpio2 15 0>;
- };
-
- reg_usb0_vbus: regulator@3 {
- compatible = "regulator-fixed";
- reg = <3>;
- regulator-name = "usb0_vbus";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- gpio = <&gpio3 9 0>;
- enable-active-high;
- };
-
- reg_usb1_vbus: regulator@4 {
- compatible = "regulator-fixed";
- reg = <4>;
- regulator-name = "usb1_vbus";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- gpio = <&gpio3 8 0>;
- enable-active-high;
- };
-
- reg_lcd_3v3: regulator@5 {
- compatible = "regulator-fixed";
- reg = <5>;
- regulator-name = "lcd-3v3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- gpio = <&gpio3 30 0>;
- enable-active-high;
- };
-
- reg_can_3v3: regulator@6 {
- compatible = "regulator-fixed";
- reg = <6>;
- regulator-name = "can-3v3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- gpio = <&gpio2 13 0>;
- enable-active-high;
- };
-
- };
-
sound {
compatible = "fsl,imx28-evk-sgtl5000",
"fsl,mxs-audio-sgtl5000";
@@ -363,7 +350,7 @@
};
};
- backlight {
+ backlight_display: backlight {
compatible = "pwm-backlight";
pwms = <&pwm 2 5000000>;
brightness-levels = <0 4 8 16 32 64 128 255>;
diff --git a/arch/arm/boot/dts/imx7d.dtsi b/arch/arm/boot/dts/imx7d.dtsi
index 7cbc2ffa4b3a..7234e8330a57 100644
--- a/arch/arm/boot/dts/imx7d.dtsi
+++ b/arch/arm/boot/dts/imx7d.dtsi
@@ -126,10 +126,14 @@
interrupt-names = "msi";
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0x7>;
- interrupt-map = <0 0 0 1 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 0 2 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 0 3 &intc GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 0 4 &intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
+ /*
+ * Reference manual lists pci irqs incorrectly
+ * Real hardware ordering is same as imx6: D+MSI, C, B, A
+ */
+ interrupt-map = <0 0 0 1 &intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &intc GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX7D_PCIE_CTRL_ROOT_CLK>,
<&clks IMX7D_PLL_ENET_MAIN_100M_CLK>,
<&clks IMX7D_PCIE_PHY_ROOT_CLK>;
diff --git a/arch/arm/boot/dts/omap4-droid4-xt894.dts b/arch/arm/boot/dts/omap4-droid4-xt894.dts
index 12d6822f0057..04758a2a87f0 100644
--- a/arch/arm/boot/dts/omap4-droid4-xt894.dts
+++ b/arch/arm/boot/dts/omap4-droid4-xt894.dts
@@ -354,7 +354,7 @@
&mmc2 {
vmmc-supply = <&vsdio>;
bus-width = <8>;
- non-removable;
+ ti,non-removable;
};
&mmc3 {
@@ -621,15 +621,6 @@
OMAP4_IOPAD(0x10c, PIN_INPUT | MUX_MODE1) /* abe_mcbsp3_fsx */
>;
};
-};
-
-&omap4_pmx_wkup {
- usb_gpio_mux_sel2: pinmux_usb_gpio_mux_sel2_pins {
- /* gpio_wk0 */
- pinctrl-single,pins = <
- OMAP4_IOPAD(0x040, PIN_OUTPUT_PULLDOWN | MUX_MODE3)
- >;
- };
vibrator_direction_pin: pinmux_vibrator_direction_pin {
pinctrl-single,pins = <
@@ -644,6 +635,15 @@
};
};
+&omap4_pmx_wkup {
+ usb_gpio_mux_sel2: pinmux_usb_gpio_mux_sel2_pins {
+ /* gpio_wk0 */
+ pinctrl-single,pins = <
+ OMAP4_IOPAD(0x040, PIN_OUTPUT_PULLDOWN | MUX_MODE3)
+ >;
+ };
+};
+
/*
* As uart1 is wired to mdm6600 with rts and cts, we can use the cts pin for
* uart1 wakeirq.
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index e2c127608bcc..7eca43ff69bb 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -257,6 +257,7 @@ CONFIG_IMX_IPUV3_CORE=y
CONFIG_DRM=y
CONFIG_DRM_PANEL_LVDS=y
CONFIG_DRM_PANEL_SIMPLE=y
+CONFIG_DRM_PANEL_SEIKO_43WVF1G=y
CONFIG_DRM_DW_HDMI_AHB_AUDIO=m
CONFIG_DRM_DW_HDMI_CEC=y
CONFIG_DRM_IMX=y
diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig
index 148226e36152..7b8212857535 100644
--- a/arch/arm/configs/mxs_defconfig
+++ b/arch/arm/configs/mxs_defconfig
@@ -95,6 +95,7 @@ CONFIG_MFD_MXS_LRADC=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_DRM=y
+CONFIG_DRM_PANEL_SEIKO_43WVF1G=y
CONFIG_DRM_MXSFB=y
CONFIG_FB_MODE_HELPERS=y
CONFIG_BACKLIGHT_LCD_SUPPORT=y
diff --git a/arch/arm/configs/versatile_defconfig b/arch/arm/configs/versatile_defconfig
index df68dc4056e5..5282324c7cef 100644
--- a/arch/arm/configs/versatile_defconfig
+++ b/arch/arm/configs/versatile_defconfig
@@ -5,19 +5,19 @@ CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_PARTITION_ADVANCED=y
# CONFIG_ARCH_MULTI_V7 is not set
CONFIG_ARCH_VERSATILE=y
CONFIG_AEABI=y
CONFIG_OABI_COMPAT=y
-CONFIG_CMA=y
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_CMDLINE="root=1f03 mem=32M"
CONFIG_FPE_NWFPE=y
CONFIG_VFP=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_CMA=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -59,6 +59,7 @@ CONFIG_GPIO_PL061=y
CONFIG_DRM=y
CONFIG_DRM_PANEL_ARM_VERSATILE=y
CONFIG_DRM_PANEL_SIMPLE=y
+CONFIG_DRM_DUMB_VGA_DAC=y
CONFIG_DRM_PL111=y
CONFIG_FB_MODE_HELPERS=y
CONFIG_BACKLIGHT_LCD_SUPPORT=y
@@ -89,9 +90,10 @@ CONFIG_NFSD=y
CONFIG_NFSD_V3=y
CONFIG_NLS_CODEPAGE_850=m
CONFIG_NLS_ISO8859_1=m
+CONFIG_FONTS=y
+CONFIG_FONT_ACORN_8x8=y
+CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_USER=y
CONFIG_DEBUG_LL=y
-CONFIG_FONTS=y
-CONFIG_FONT_ACORN_8x8=y
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 2ceffd85dd3d..cd65ea4e9c54 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -2161,6 +2161,37 @@ static int of_dev_hwmod_lookup(struct device_node *np,
}
/**
+ * omap_hwmod_fix_mpu_rt_idx - fix up mpu_rt_idx register offsets
+ *
+ * @oh: struct omap_hwmod *
+ * @np: struct device_node *
+ *
+ * Fix up module register offsets for modules with mpu_rt_idx.
+ * Only needed for cpsw with interconnect target module defined
+ * in device tree while still using legacy hwmod platform data
+ * for rev, sysc and syss registers.
+ *
+ * Can be removed when all cpsw hwmod platform data has been
+ * dropped.
+ */
+static void omap_hwmod_fix_mpu_rt_idx(struct omap_hwmod *oh,
+ struct device_node *np,
+ struct resource *res)
+{
+ struct device_node *child = NULL;
+ int error;
+
+ child = of_get_next_child(np, child);
+ if (!child)
+ return;
+
+ error = of_address_to_resource(child, oh->mpu_rt_idx, res);
+ if (error)
+ pr_err("%s: error mapping mpu_rt_idx: %i\n",
+ __func__, error);
+}
+
+/**
* omap_hwmod_parse_module_range - map module IO range from device tree
* @oh: struct omap_hwmod *
* @np: struct device_node *
@@ -2220,7 +2251,13 @@ int omap_hwmod_parse_module_range(struct omap_hwmod *oh,
size = be32_to_cpup(ranges);
pr_debug("omap_hwmod: %s %s at 0x%llx size 0x%llx\n",
- oh->name, np->name, base, size);
+ oh ? oh->name : "", np->name, base, size);
+
+ if (oh && oh->mpu_rt_idx) {
+ omap_hwmod_fix_mpu_rt_idx(oh, np, res);
+
+ return 0;
+ }
res->start = base;
res->end = base + size - 1;
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 29e75b47becd..1b1a0e95c751 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -763,7 +763,6 @@ config NEED_PER_CPU_EMBED_FIRST_CHUNK
config HOLES_IN_ZONE
def_bool y
- depends on NUMA
source kernel/Kconfig.hz
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index f67e8d5e93ad..db8d364f8476 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -38,6 +38,7 @@ CONFIG_ARCH_BCM_IPROC=y
CONFIG_ARCH_BERLIN=y
CONFIG_ARCH_BRCMSTB=y
CONFIG_ARCH_EXYNOS=y
+CONFIG_ARCH_K3=y
CONFIG_ARCH_LAYERSCAPE=y
CONFIG_ARCH_LG1K=y
CONFIG_ARCH_HISI=y
@@ -605,6 +606,8 @@ CONFIG_ARCH_TEGRA_132_SOC=y
CONFIG_ARCH_TEGRA_210_SOC=y
CONFIG_ARCH_TEGRA_186_SOC=y
CONFIG_ARCH_TEGRA_194_SOC=y
+CONFIG_ARCH_K3_AM6_SOC=y
+CONFIG_SOC_TI=y
CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
CONFIG_EXTCON_USB_GPIO=y
CONFIG_EXTCON_USBC_CROS_EC=y
diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c
index 6e9f33d14930..067d8937d5af 100644
--- a/arch/arm64/crypto/ghash-ce-glue.c
+++ b/arch/arm64/crypto/ghash-ce-glue.c
@@ -417,7 +417,7 @@ static int gcm_encrypt(struct aead_request *req)
__aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds);
put_unaligned_be32(2, iv + GCM_IV_SIZE);
- while (walk.nbytes >= AES_BLOCK_SIZE) {
+ while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
int blocks = walk.nbytes / AES_BLOCK_SIZE;
u8 *dst = walk.dst.virt.addr;
u8 *src = walk.src.virt.addr;
@@ -437,11 +437,18 @@ static int gcm_encrypt(struct aead_request *req)
NULL);
err = skcipher_walk_done(&walk,
- walk.nbytes % AES_BLOCK_SIZE);
+ walk.nbytes % (2 * AES_BLOCK_SIZE));
}
- if (walk.nbytes)
+ if (walk.nbytes) {
__aes_arm64_encrypt(ctx->aes_key.key_enc, ks, iv,
nrounds);
+ if (walk.nbytes > AES_BLOCK_SIZE) {
+ crypto_inc(iv, AES_BLOCK_SIZE);
+ __aes_arm64_encrypt(ctx->aes_key.key_enc,
+ ks + AES_BLOCK_SIZE, iv,
+ nrounds);
+ }
+ }
}
/* handle the tail */
@@ -545,7 +552,7 @@ static int gcm_decrypt(struct aead_request *req)
__aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds);
put_unaligned_be32(2, iv + GCM_IV_SIZE);
- while (walk.nbytes >= AES_BLOCK_SIZE) {
+ while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
int blocks = walk.nbytes / AES_BLOCK_SIZE;
u8 *dst = walk.dst.virt.addr;
u8 *src = walk.src.virt.addr;
@@ -564,11 +571,21 @@ static int gcm_decrypt(struct aead_request *req)
} while (--blocks > 0);
err = skcipher_walk_done(&walk,
- walk.nbytes % AES_BLOCK_SIZE);
+ walk.nbytes % (2 * AES_BLOCK_SIZE));
}
- if (walk.nbytes)
+ if (walk.nbytes) {
+ if (walk.nbytes > AES_BLOCK_SIZE) {
+ u8 *iv2 = iv + AES_BLOCK_SIZE;
+
+ memcpy(iv2, iv, AES_BLOCK_SIZE);
+ crypto_inc(iv2, AES_BLOCK_SIZE);
+
+ __aes_arm64_encrypt(ctx->aes_key.key_enc, iv2,
+ iv2, nrounds);
+ }
__aes_arm64_encrypt(ctx->aes_key.key_enc, iv, iv,
nrounds);
+ }
}
/* handle the tail */
diff --git a/arch/arm64/crypto/sm4-ce-glue.c b/arch/arm64/crypto/sm4-ce-glue.c
index b7fb5274b250..0c4fc223f225 100644
--- a/arch/arm64/crypto/sm4-ce-glue.c
+++ b/arch/arm64/crypto/sm4-ce-glue.c
@@ -69,5 +69,5 @@ static void __exit sm4_ce_mod_fini(void)
crypto_unregister_alg(&sm4_ce_alg);
}
-module_cpu_feature_match(SM3, sm4_ce_mod_init);
+module_cpu_feature_match(SM4, sm4_ce_mod_init);
module_exit(sm4_ce_mod_fini);
diff --git a/arch/m68k/mac/misc.c b/arch/m68k/mac/misc.c
index 3534aa6a4dc2..1b083c500b9a 100644
--- a/arch/m68k/mac/misc.c
+++ b/arch/m68k/mac/misc.c
@@ -98,11 +98,10 @@ static time64_t pmu_read_time(void)
if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0)
return 0;
- while (!req.complete)
- pmu_poll();
+ pmu_wait_complete(&req);
- time = (u32)((req.reply[1] << 24) | (req.reply[2] << 16) |
- (req.reply[3] << 8) | req.reply[4]);
+ time = (u32)((req.reply[0] << 24) | (req.reply[1] << 16) |
+ (req.reply[2] << 8) | req.reply[3]);
return time - RTC_OFFSET;
}
@@ -116,8 +115,7 @@ static void pmu_write_time(time64_t time)
(data >> 24) & 0xFF, (data >> 16) & 0xFF,
(data >> 8) & 0xFF, data & 0xFF) < 0)
return;
- while (!req.complete)
- pmu_poll();
+ pmu_wait_complete(&req);
}
static __u8 pmu_read_pram(int offset)
diff --git a/arch/nios2/Kconfig.debug b/arch/nios2/Kconfig.debug
index 7a49f0d28d14..f1da8a7b17ff 100644
--- a/arch/nios2/Kconfig.debug
+++ b/arch/nios2/Kconfig.debug
@@ -3,15 +3,6 @@
config TRACE_IRQFLAGS_SUPPORT
def_bool y
-config DEBUG_STACK_USAGE
- bool "Enable stack utilization instrumentation"
- depends on DEBUG_KERNEL
- help
- Enables the display of the minimum amount of free stack which each
- task has ever had available in the sysrq-T and sysrq-P debug output.
-
- This option will slow down process creation somewhat.
-
config EARLY_PRINTK
bool "Activate early kernel debugging"
default y
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index db0b6eebbfa5..a80669209155 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -177,7 +177,6 @@ config PPC
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
- select HAVE_ARCH_PREL32_RELOCATIONS
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
select HAVE_CBPF_JIT if !PPC64
diff --git a/arch/riscv/include/asm/tlb.h b/arch/riscv/include/asm/tlb.h
index c229509288ea..439dc7072e05 100644
--- a/arch/riscv/include/asm/tlb.h
+++ b/arch/riscv/include/asm/tlb.h
@@ -14,6 +14,10 @@
#ifndef _ASM_RISCV_TLB_H
#define _ASM_RISCV_TLB_H
+struct mmu_gather;
+
+static void tlb_flush(struct mmu_gather *tlb);
+
#include <asm-generic/tlb.h>
static inline void tlb_flush(struct mmu_gather *tlb)
diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c
index 568026ccf6e8..fb03a4482ad6 100644
--- a/arch/riscv/kernel/sys_riscv.c
+++ b/arch/riscv/kernel/sys_riscv.c
@@ -65,24 +65,11 @@ SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end,
uintptr_t, flags)
{
-#ifdef CONFIG_SMP
- struct mm_struct *mm = current->mm;
- bool local = (flags & SYS_RISCV_FLUSH_ICACHE_LOCAL) != 0;
-#endif
-
/* Check the reserved flags. */
if (unlikely(flags & ~SYS_RISCV_FLUSH_ICACHE_ALL))
return -EINVAL;
- /*
- * Without CONFIG_SMP flush_icache_mm is a just a flush_icache_all(),
- * which generates unused variable warnings all over this function.
- */
-#ifdef CONFIG_SMP
- flush_icache_mm(mm, local);
-#else
- flush_icache_all();
-#endif
+ flush_icache_mm(current->mm, flags & SYS_RISCV_FLUSH_ICACHE_LOCAL);
return 0;
}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index c5ff296bc5d1..1a0be022f91d 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2843,7 +2843,7 @@ config X86_SYSFB
This option, if enabled, marks VGA/VBE/EFI framebuffers as generic
framebuffers so the new generic system-framebuffer drivers can be
used on x86. If the framebuffer is not compatible with the generic
- modes, it is adverticed as fallback platform framebuffer so legacy
+ modes, it is advertised as fallback platform framebuffer so legacy
drivers like efifb, vesafb and uvesafb can pick it up.
If this option is not selected, all system framebuffers are always
marked as fallback platform framebuffers as usual.
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 94859241bc3e..8f6e7eb8ae9f 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -175,22 +175,6 @@ ifdef CONFIG_FUNCTION_GRAPH_TRACER
endif
endif
-ifndef CC_HAVE_ASM_GOTO
- $(error Compiler lacks asm-goto support.)
-endif
-
-#
-# Jump labels need '-maccumulate-outgoing-args' for gcc < 4.5.2 to prevent a
-# GCC bug (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=46226). There's no way
-# to test for this bug at compile-time because the test case needs to execute,
-# which is a no-go for cross compilers. So check the GCC version instead.
-#
-ifdef CONFIG_JUMP_LABEL
- ifneq ($(ACCUMULATE_OUTGOING_ARGS), 1)
- ACCUMULATE_OUTGOING_ARGS = $(call cc-if-fullversion, -lt, 040502, 1)
- endif
-endif
-
ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1)
# This compiler flag is not supported by Clang:
KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args,)
@@ -312,6 +296,13 @@ PHONY += vdso_install
vdso_install:
$(Q)$(MAKE) $(build)=arch/x86/entry/vdso $@
+archprepare: checkbin
+checkbin:
+ifndef CC_HAVE_ASM_GOTO
+ @echo Compiler lacks asm-goto support.
+ @exit 1
+endif
+
archclean:
$(Q)rm -rf $(objtree)/arch/i386
$(Q)rm -rf $(objtree)/arch/x86_64
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index 9bd139569b41..cb2deb61c5d9 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -223,34 +223,34 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff
pcmpeqd TWOONE(%rip), \TMP2
pand POLY(%rip), \TMP2
pxor \TMP2, \TMP3
- movdqa \TMP3, HashKey(%arg2)
+ movdqu \TMP3, HashKey(%arg2)
movdqa \TMP3, \TMP5
pshufd $78, \TMP3, \TMP1
pxor \TMP3, \TMP1
- movdqa \TMP1, HashKey_k(%arg2)
+ movdqu \TMP1, HashKey_k(%arg2)
GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
# TMP5 = HashKey^2<<1 (mod poly)
- movdqa \TMP5, HashKey_2(%arg2)
+ movdqu \TMP5, HashKey_2(%arg2)
# HashKey_2 = HashKey^2<<1 (mod poly)
pshufd $78, \TMP5, \TMP1
pxor \TMP5, \TMP1
- movdqa \TMP1, HashKey_2_k(%arg2)
+ movdqu \TMP1, HashKey_2_k(%arg2)
GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
# TMP5 = HashKey^3<<1 (mod poly)
- movdqa \TMP5, HashKey_3(%arg2)
+ movdqu \TMP5, HashKey_3(%arg2)
pshufd $78, \TMP5, \TMP1
pxor \TMP5, \TMP1
- movdqa \TMP1, HashKey_3_k(%arg2)
+ movdqu \TMP1, HashKey_3_k(%arg2)
GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
# TMP5 = HashKey^3<<1 (mod poly)
- movdqa \TMP5, HashKey_4(%arg2)
+ movdqu \TMP5, HashKey_4(%arg2)
pshufd $78, \TMP5, \TMP1
pxor \TMP5, \TMP1
- movdqa \TMP1, HashKey_4_k(%arg2)
+ movdqu \TMP1, HashKey_4_k(%arg2)
.endm
# GCM_INIT initializes a gcm_context struct to prepare for encoding/decoding.
@@ -271,7 +271,7 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff
movdqu %xmm0, CurCount(%arg2) # ctx_data.current_counter = iv
PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
- movdqa HashKey(%arg2), %xmm13
+ movdqu HashKey(%arg2), %xmm13
CALC_AAD_HASH %xmm13, \AAD, \AADLEN, %xmm0, %xmm1, %xmm2, %xmm3, \
%xmm4, %xmm5, %xmm6
@@ -997,7 +997,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
pshufd $78, \XMM5, \TMP6
pxor \XMM5, \TMP6
paddd ONE(%rip), \XMM0 # INCR CNT
- movdqa HashKey_4(%arg2), \TMP5
+ movdqu HashKey_4(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1
movdqa \XMM0, \XMM1
paddd ONE(%rip), \XMM0 # INCR CNT
@@ -1016,7 +1016,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
pxor (%arg1), \XMM2
pxor (%arg1), \XMM3
pxor (%arg1), \XMM4
- movdqa HashKey_4_k(%arg2), \TMP5
+ movdqu HashKey_4_k(%arg2), \TMP5
PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0)
movaps 0x10(%arg1), \TMP1
AESENC \TMP1, \XMM1 # Round 1
@@ -1031,7 +1031,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
movdqa \XMM6, \TMP1
pshufd $78, \XMM6, \TMP2
pxor \XMM6, \TMP2
- movdqa HashKey_3(%arg2), \TMP5
+ movdqu HashKey_3(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1
movaps 0x30(%arg1), \TMP3
AESENC \TMP3, \XMM1 # Round 3
@@ -1044,7 +1044,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
AESENC \TMP3, \XMM2
AESENC \TMP3, \XMM3
AESENC \TMP3, \XMM4
- movdqa HashKey_3_k(%arg2), \TMP5
+ movdqu HashKey_3_k(%arg2), \TMP5
PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
movaps 0x50(%arg1), \TMP3
AESENC \TMP3, \XMM1 # Round 5
@@ -1058,7 +1058,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
movdqa \XMM7, \TMP1
pshufd $78, \XMM7, \TMP2
pxor \XMM7, \TMP2
- movdqa HashKey_2(%arg2), \TMP5
+ movdqu HashKey_2(%arg2), \TMP5
# Multiply TMP5 * HashKey using karatsuba
@@ -1074,7 +1074,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
AESENC \TMP3, \XMM2
AESENC \TMP3, \XMM3
AESENC \TMP3, \XMM4
- movdqa HashKey_2_k(%arg2), \TMP5
+ movdqu HashKey_2_k(%arg2), \TMP5
PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
movaps 0x80(%arg1), \TMP3
AESENC \TMP3, \XMM1 # Round 8
@@ -1092,7 +1092,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
movdqa \XMM8, \TMP1
pshufd $78, \XMM8, \TMP2
pxor \XMM8, \TMP2
- movdqa HashKey(%arg2), \TMP5
+ movdqu HashKey(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
movaps 0x90(%arg1), \TMP3
AESENC \TMP3, \XMM1 # Round 9
@@ -1121,7 +1121,7 @@ aes_loop_par_enc_done\@:
AESENCLAST \TMP3, \XMM2
AESENCLAST \TMP3, \XMM3
AESENCLAST \TMP3, \XMM4
- movdqa HashKey_k(%arg2), \TMP5
+ movdqu HashKey_k(%arg2), \TMP5
PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
movdqu (%arg4,%r11,1), \TMP3
pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK
@@ -1205,7 +1205,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
pshufd $78, \XMM5, \TMP6
pxor \XMM5, \TMP6
paddd ONE(%rip), \XMM0 # INCR CNT
- movdqa HashKey_4(%arg2), \TMP5
+ movdqu HashKey_4(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1
movdqa \XMM0, \XMM1
paddd ONE(%rip), \XMM0 # INCR CNT
@@ -1224,7 +1224,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
pxor (%arg1), \XMM2
pxor (%arg1), \XMM3
pxor (%arg1), \XMM4
- movdqa HashKey_4_k(%arg2), \TMP5
+ movdqu HashKey_4_k(%arg2), \TMP5
PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0)
movaps 0x10(%arg1), \TMP1
AESENC \TMP1, \XMM1 # Round 1
@@ -1239,7 +1239,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
movdqa \XMM6, \TMP1
pshufd $78, \XMM6, \TMP2
pxor \XMM6, \TMP2
- movdqa HashKey_3(%arg2), \TMP5
+ movdqu HashKey_3(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1
movaps 0x30(%arg1), \TMP3
AESENC \TMP3, \XMM1 # Round 3
@@ -1252,7 +1252,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
AESENC \TMP3, \XMM2
AESENC \TMP3, \XMM3
AESENC \TMP3, \XMM4
- movdqa HashKey_3_k(%arg2), \TMP5
+ movdqu HashKey_3_k(%arg2), \TMP5
PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
movaps 0x50(%arg1), \TMP3
AESENC \TMP3, \XMM1 # Round 5
@@ -1266,7 +1266,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
movdqa \XMM7, \TMP1
pshufd $78, \XMM7, \TMP2
pxor \XMM7, \TMP2
- movdqa HashKey_2(%arg2), \TMP5
+ movdqu HashKey_2(%arg2), \TMP5
# Multiply TMP5 * HashKey using karatsuba
@@ -1282,7 +1282,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
AESENC \TMP3, \XMM2
AESENC \TMP3, \XMM3
AESENC \TMP3, \XMM4
- movdqa HashKey_2_k(%arg2), \TMP5
+ movdqu HashKey_2_k(%arg2), \TMP5
PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
movaps 0x80(%arg1), \TMP3
AESENC \TMP3, \XMM1 # Round 8
@@ -1300,7 +1300,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
movdqa \XMM8, \TMP1
pshufd $78, \XMM8, \TMP2
pxor \XMM8, \TMP2
- movdqa HashKey(%arg2), \TMP5
+ movdqu HashKey(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
movaps 0x90(%arg1), \TMP3
AESENC \TMP3, \XMM1 # Round 9
@@ -1329,7 +1329,7 @@ aes_loop_par_dec_done\@:
AESENCLAST \TMP3, \XMM2
AESENCLAST \TMP3, \XMM3
AESENCLAST \TMP3, \XMM4
- movdqa HashKey_k(%arg2), \TMP5
+ movdqu HashKey_k(%arg2), \TMP5
PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
movdqu (%arg4,%r11,1), \TMP3
pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK
@@ -1405,10 +1405,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
movdqa \XMM1, \TMP6
pshufd $78, \XMM1, \TMP2
pxor \XMM1, \TMP2
- movdqa HashKey_4(%arg2), \TMP5
+ movdqu HashKey_4(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP6 # TMP6 = a1*b1
PCLMULQDQ 0x00, \TMP5, \XMM1 # XMM1 = a0*b0
- movdqa HashKey_4_k(%arg2), \TMP4
+ movdqu HashKey_4_k(%arg2), \TMP4
PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
movdqa \XMM1, \XMMDst
movdqa \TMP2, \XMM1 # result in TMP6, XMMDst, XMM1
@@ -1418,10 +1418,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
movdqa \XMM2, \TMP1
pshufd $78, \XMM2, \TMP2
pxor \XMM2, \TMP2
- movdqa HashKey_3(%arg2), \TMP5
+ movdqu HashKey_3(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
PCLMULQDQ 0x00, \TMP5, \XMM2 # XMM2 = a0*b0
- movdqa HashKey_3_k(%arg2), \TMP4
+ movdqu HashKey_3_k(%arg2), \TMP4
PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
pxor \TMP1, \TMP6
pxor \XMM2, \XMMDst
@@ -1433,10 +1433,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
movdqa \XMM3, \TMP1
pshufd $78, \XMM3, \TMP2
pxor \XMM3, \TMP2
- movdqa HashKey_2(%arg2), \TMP5
+ movdqu HashKey_2(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
PCLMULQDQ 0x00, \TMP5, \XMM3 # XMM3 = a0*b0
- movdqa HashKey_2_k(%arg2), \TMP4
+ movdqu HashKey_2_k(%arg2), \TMP4
PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
pxor \TMP1, \TMP6
pxor \XMM3, \XMMDst
@@ -1446,10 +1446,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
movdqa \XMM4, \TMP1
pshufd $78, \XMM4, \TMP2
pxor \XMM4, \TMP2
- movdqa HashKey(%arg2), \TMP5
+ movdqu HashKey(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
PCLMULQDQ 0x00, \TMP5, \XMM4 # XMM4 = a0*b0
- movdqa HashKey_k(%arg2), \TMP4
+ movdqu HashKey_k(%arg2), \TMP4
PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
pxor \TMP1, \TMP6
pxor \XMM4, \XMMDst
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 5f4829f10129..dfb2f7c0d019 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2465,7 +2465,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
perf_callchain_store(entry, regs->ip);
- if (!current->mm)
+ if (!nmi_uaccess_okay())
return;
if (perf_callchain_user32(regs, entry))
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index c14f2a74b2be..15450a675031 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -33,7 +33,8 @@ extern inline unsigned long native_save_fl(void)
return flags;
}
-static inline void native_restore_fl(unsigned long flags)
+extern inline void native_restore_fl(unsigned long flags);
+extern inline void native_restore_fl(unsigned long flags)
{
asm volatile("push %0 ; popf"
: /* no output */
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index a564084c6141..f8b1ad2c3828 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -2,6 +2,8 @@
#ifndef _ASM_X86_PGTABLE_3LEVEL_H
#define _ASM_X86_PGTABLE_3LEVEL_H
+#include <asm/atomic64_32.h>
+
/*
* Intel Physical Address Extension (PAE) Mode - three-level page
* tables on PPro+ CPUs.
@@ -150,10 +152,7 @@ static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
{
pte_t res;
- /* xchg acts as a barrier before the setting of the high bits */
- res.pte_low = xchg(&ptep->pte_low, 0);
- res.pte_high = ptep->pte_high;
- ptep->pte_high = 0;
+ res.pte = (pteval_t)arch_atomic64_xchg((atomic64_t *)ptep, 0);
return res;
}
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index c24297268ebc..d53c54b842da 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -132,6 +132,8 @@ struct cpuinfo_x86 {
/* Index into per_cpu list: */
u16 cpu_index;
u32 microcode;
+ /* Address space bits used by the cache internally */
+ u8 x86_cache_bits;
unsigned initialized : 1;
} __randomize_layout;
@@ -183,7 +185,7 @@ extern void cpu_detect(struct cpuinfo_x86 *c);
static inline unsigned long long l1tf_pfn_limit(void)
{
- return BIT_ULL(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT);
+ return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT);
}
extern void early_cpu_init(void);
diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h
index 5f9012ff52ed..33d3c88a7225 100644
--- a/arch/x86/include/asm/signal.h
+++ b/arch/x86/include/asm/signal.h
@@ -39,6 +39,7 @@ extern void do_signal(struct pt_regs *regs);
#define __ARCH_HAS_SA_RESTORER
+#include <asm/asm.h>
#include <uapi/asm/sigcontext.h>
#ifdef __i386__
@@ -86,9 +87,9 @@ static inline int __const_sigismember(sigset_t *set, int _sig)
static inline int __gen_sigismember(sigset_t *set, int _sig)
{
- unsigned char ret;
- asm("btl %2,%1\n\tsetc %0"
- : "=qm"(ret) : "m"(*set), "Ir"(_sig-1) : "cc");
+ bool ret;
+ asm("btl %2,%1" CC_SET(c)
+ : CC_OUT(c) (ret) : "m"(*set), "Ir"(_sig-1));
return ret;
}
diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
index b6dc698f992a..f335aad404a4 100644
--- a/arch/x86/include/asm/stacktrace.h
+++ b/arch/x86/include/asm/stacktrace.h
@@ -111,6 +111,6 @@ static inline unsigned long caller_frame_pointer(void)
return (unsigned long)frame;
}
-void show_opcodes(u8 *rip, const char *loglvl);
+void show_opcodes(struct pt_regs *regs, const char *loglvl);
void show_ip(struct pt_regs *regs, const char *loglvl);
#endif /* _ASM_X86_STACKTRACE_H */
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 29c9da6c62fc..58ce5288878e 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -175,8 +175,16 @@ struct tlb_state {
* are on. This means that it may not match current->active_mm,
* which will contain the previous user mm when we're in lazy TLB
* mode even if we've already switched back to swapper_pg_dir.
+ *
+ * During switch_mm_irqs_off(), loaded_mm will be set to
+ * LOADED_MM_SWITCHING during the brief interrupts-off window
+ * when CR3 and loaded_mm would otherwise be inconsistent. This
+ * is for nmi_uaccess_okay()'s benefit.
*/
struct mm_struct *loaded_mm;
+
+#define LOADED_MM_SWITCHING ((struct mm_struct *)1)
+
u16 loaded_mm_asid;
u16 next_asid;
/* last user mm's ctx id */
@@ -246,6 +254,38 @@ struct tlb_state {
};
DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
+/*
+ * Blindly accessing user memory from NMI context can be dangerous
+ * if we're in the middle of switching the current user task or
+ * switching the loaded mm. It can also be dangerous if we
+ * interrupted some kernel code that was temporarily using a
+ * different mm.
+ */
+static inline bool nmi_uaccess_okay(void)
+{
+ struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
+ struct mm_struct *current_mm = current->mm;
+
+ VM_WARN_ON_ONCE(!loaded_mm);
+
+ /*
+ * The condition we want to check is
+ * current_mm->pgd == __va(read_cr3_pa()). This may be slow, though,
+ * if we're running in a VM with shadow paging, and nmi_uaccess_okay()
+ * is supposed to be reasonably fast.
+ *
+ * Instead, we check the almost equivalent but somewhat conservative
+ * condition below, and we rely on the fact that switch_mm_irqs_off()
+ * sets loaded_mm to LOADED_MM_SWITCHING before writing to CR3.
+ */
+ if (loaded_mm != current_mm)
+ return false;
+
+ VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa()));
+
+ return true;
+}
+
/* Initialize cr4 shadow for this CPU. */
static inline void cr4_init_shadow(void)
{
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
index fb856c9f0449..53748541c487 100644
--- a/arch/x86/include/asm/vgtod.h
+++ b/arch/x86/include/asm/vgtod.h
@@ -93,7 +93,7 @@ static inline unsigned int __getcpu(void)
*
* If RDPID is available, use it.
*/
- alternative_io ("lsl %[p],%[seg]",
+ alternative_io ("lsl %[seg],%[p]",
".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */
X86_FEATURE_RDPID,
[p] "=a" (p), [seg] "r" (__PER_CPU_SEG));
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 014f214da581..b9d5e7c9ef43 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -684,8 +684,6 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
* It means the size must be writable atomically and the address must be aligned
* in a way that permits an atomic write. It also makes sure we fit on a single
* page.
- *
- * Note: Must be called under text_mutex.
*/
void *text_poke(void *addr, const void *opcode, size_t len)
{
@@ -700,6 +698,8 @@ void *text_poke(void *addr, const void *opcode, size_t len)
*/
BUG_ON(!after_bootmem);
+ lockdep_assert_held(&text_mutex);
+
if (!core_kernel_text((unsigned long)addr)) {
pages[0] = vmalloc_to_page(addr);
pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
@@ -782,8 +782,6 @@ int poke_int3_handler(struct pt_regs *regs)
* - replace the first byte (int3) by the first byte of
* replacing opcode
* - sync cores
- *
- * Note: must be called under text_mutex.
*/
void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
{
@@ -792,6 +790,9 @@ void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
bp_int3_handler = handler;
bp_int3_addr = (u8 *)addr + sizeof(int3);
bp_patching_in_progress = true;
+
+ lockdep_assert_held(&text_mutex);
+
/*
* Corresponding read barrier in int3 notifier for making sure the
* in_progress and handler are correctly ordered wrt. patching.
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 4c2313d0b9ca..40bdaea97fe7 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -668,6 +668,45 @@ EXPORT_SYMBOL_GPL(l1tf_mitigation);
enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
+/*
+ * These CPUs all support 44bits physical address space internally in the
+ * cache but CPUID can report a smaller number of physical address bits.
+ *
+ * The L1TF mitigation uses the top most address bit for the inversion of
+ * non present PTEs. When the installed memory reaches into the top most
+ * address bit due to memory holes, which has been observed on machines
+ * which report 36bits physical address bits and have 32G RAM installed,
+ * then the mitigation range check in l1tf_select_mitigation() triggers.
+ * This is a false positive because the mitigation is still possible due to
+ * the fact that the cache uses 44bit internally. Use the cache bits
+ * instead of the reported physical bits and adjust them on the affected
+ * machines to 44bit if the reported bits are less than 44.
+ */
+static void override_cache_bits(struct cpuinfo_x86 *c)
+{
+ if (c->x86 != 6)
+ return;
+
+ switch (c->x86_model) {
+ case INTEL_FAM6_NEHALEM:
+ case INTEL_FAM6_WESTMERE:
+ case INTEL_FAM6_SANDYBRIDGE:
+ case INTEL_FAM6_IVYBRIDGE:
+ case INTEL_FAM6_HASWELL_CORE:
+ case INTEL_FAM6_HASWELL_ULT:
+ case INTEL_FAM6_HASWELL_GT3E:
+ case INTEL_FAM6_BROADWELL_CORE:
+ case INTEL_FAM6_BROADWELL_GT3E:
+ case INTEL_FAM6_SKYLAKE_MOBILE:
+ case INTEL_FAM6_SKYLAKE_DESKTOP:
+ case INTEL_FAM6_KABYLAKE_MOBILE:
+ case INTEL_FAM6_KABYLAKE_DESKTOP:
+ if (c->x86_cache_bits < 44)
+ c->x86_cache_bits = 44;
+ break;
+ }
+}
+
static void __init l1tf_select_mitigation(void)
{
u64 half_pa;
@@ -675,6 +714,8 @@ static void __init l1tf_select_mitigation(void)
if (!boot_cpu_has_bug(X86_BUG_L1TF))
return;
+ override_cache_bits(&boot_cpu_data);
+
switch (l1tf_mitigation) {
case L1TF_MITIGATION_OFF:
case L1TF_MITIGATION_FLUSH_NOWARN:
@@ -694,11 +735,6 @@ static void __init l1tf_select_mitigation(void)
return;
#endif
- /*
- * This is extremely unlikely to happen because almost all
- * systems have far more MAX_PA/2 than RAM can be fit into
- * DIMM slots.
- */
half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 84dee5ab745a..44c4ef3d989b 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -919,6 +919,7 @@ void get_cpu_address_sizes(struct cpuinfo_x86 *c)
else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
c->x86_phys_bits = 36;
#endif
+ c->x86_cache_bits = c->x86_phys_bits;
}
static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 401e8c133108..fc3c07fe7df5 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -150,6 +150,9 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
if (cpu_has(c, X86_FEATURE_HYPERVISOR))
return false;
+ if (c->x86 != 6)
+ return false;
+
for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
if (c->x86_model == spectre_bad_microcodes[i].model &&
c->x86_stepping == spectre_bad_microcodes[i].stepping)
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 9c8652974f8e..f56895106ccf 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -17,6 +17,7 @@
#include <linux/bug.h>
#include <linux/nmi.h>
#include <linux/sysfs.h>
+#include <linux/kasan.h>
#include <asm/cpu_entry_area.h>
#include <asm/stacktrace.h>
@@ -89,14 +90,24 @@ static void printk_stack_address(unsigned long address, int reliable,
* Thus, the 2/3rds prologue and 64 byte OPCODE_BUFSIZE is just a random
* guesstimate in attempt to achieve all of the above.
*/
-void show_opcodes(u8 *rip, const char *loglvl)
+void show_opcodes(struct pt_regs *regs, const char *loglvl)
{
#define PROLOGUE_SIZE 42
#define EPILOGUE_SIZE 21
#define OPCODE_BUFSIZE (PROLOGUE_SIZE + 1 + EPILOGUE_SIZE)
u8 opcodes[OPCODE_BUFSIZE];
+ unsigned long prologue = regs->ip - PROLOGUE_SIZE;
+ bool bad_ip;
- if (probe_kernel_read(opcodes, rip - PROLOGUE_SIZE, OPCODE_BUFSIZE)) {
+ /*
+ * Make sure userspace isn't trying to trick us into dumping kernel
+ * memory by pointing the userspace instruction pointer at it.
+ */
+ bad_ip = user_mode(regs) &&
+ __chk_range_not_ok(prologue, OPCODE_BUFSIZE, TASK_SIZE_MAX);
+
+ if (bad_ip || probe_kernel_read(opcodes, (u8 *)prologue,
+ OPCODE_BUFSIZE)) {
printk("%sCode: Bad RIP value.\n", loglvl);
} else {
printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %"
@@ -112,7 +123,7 @@ void show_ip(struct pt_regs *regs, const char *loglvl)
#else
printk("%sRIP: %04x:%pS\n", loglvl, (int)regs->cs, (void *)regs->ip);
#endif
- show_opcodes((u8 *)regs->ip, loglvl);
+ show_opcodes(regs, loglvl);
}
void show_iret_regs(struct pt_regs *regs)
@@ -346,7 +357,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
* We're not going to return, but we might be on an IST stack or
* have very little stack space left. Rewind the stack and kill
* the task.
+ * Before we rewind the stack, we have to tell KASAN that we're going to
+ * reuse the task stack and that existing poisons are invalid.
*/
+ kasan_unpoison_task_stack(current);
rewind_stack_do_exit(signr);
}
NOKPROBE_SYMBOL(oops_end);
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c
index c8c6ad0d58b8..3f435d7fca5e 100644
--- a/arch/x86/lib/usercopy.c
+++ b/arch/x86/lib/usercopy.c
@@ -7,6 +7,8 @@
#include <linux/uaccess.h>
#include <linux/export.h>
+#include <asm/tlbflush.h>
+
/*
* We rely on the nested NMI work to allow atomic faults from the NMI path; the
* nested NMI paths are careful to preserve CR2.
@@ -19,6 +21,9 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
if (__range_not_ok(from, n, TASK_SIZE))
return n;
+ if (!nmi_uaccess_okay())
+ return n;
+
/*
* Even though this function is typically called from NMI/IRQ context
* disable pagefaults so that its behaviour is consistent even when
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index b9123c497e0a..47bebfe6efa7 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -837,7 +837,7 @@ show_signal_msg(struct pt_regs *regs, unsigned long error_code,
printk(KERN_CONT "\n");
- show_opcodes((u8 *)regs->ip, loglvl);
+ show_opcodes(regs, loglvl);
}
static void
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 8d6c34fe49be..51a5a69ecac9 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -1420,6 +1420,29 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
return 0;
}
+/*
+ * Machine check recovery code needs to change cache mode of poisoned
+ * pages to UC to avoid speculative access logging another error. But
+ * passing the address of the 1:1 mapping to set_memory_uc() is a fine
+ * way to encourage a speculative access. So we cheat and flip the top
+ * bit of the address. This works fine for the code that updates the
+ * page tables. But at the end of the process we need to flush the cache
+ * and the non-canonical address causes a #GP fault when used by the
+ * CLFLUSH instruction.
+ *
+ * But in the common case we already have a canonical address. This code
+ * will fix the top bit if needed and is a no-op otherwise.
+ */
+static inline unsigned long make_addr_canonical_again(unsigned long addr)
+{
+#ifdef CONFIG_X86_64
+ return (long)(addr << 1) >> 1;
+#else
+ return addr;
+#endif
+}
+
+
static int change_page_attr_set_clr(unsigned long *addr, int numpages,
pgprot_t mask_set, pgprot_t mask_clr,
int force_split, int in_flag,
@@ -1465,7 +1488,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
* Save address for cache flush. *addr is modified in the call
* to __change_page_attr_set_clr() below.
*/
- baddr = *addr;
+ baddr = make_addr_canonical_again(*addr);
}
/* Must avoid aliasing mappings in the highmem code */
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
index 31341ae7309f..c1fc1ae6b429 100644
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -248,7 +248,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
*
* Returns a pointer to a PTE on success, or NULL on failure.
*/
-static __init pte_t *pti_user_pagetable_walk_pte(unsigned long address)
+static pte_t *pti_user_pagetable_walk_pte(unsigned long address)
{
gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
pmd_t *pmd;
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 9517d1b2a281..e96b99eb800c 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -305,6 +305,10 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);
+ /* Let nmi_uaccess_okay() know that we're changing CR3. */
+ this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING);
+ barrier();
+
if (need_flush) {
this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
@@ -335,6 +339,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
if (next != &init_mm)
this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
+ /* Make sure we write CR3 before loaded_mm. */
+ barrier();
+
this_cpu_write(cpu_tlbstate.loaded_mm, next);
this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
}
diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
index 324b93328b37..05ca14222463 100644
--- a/arch/x86/platform/efi/efi_32.c
+++ b/arch/x86/platform/efi/efi_32.c
@@ -85,14 +85,10 @@ pgd_t * __init efi_call_phys_prolog(void)
void __init efi_call_phys_epilog(pgd_t *save_pgd)
{
- struct desc_ptr gdt_descr;
-
- gdt_descr.address = (unsigned long)get_cpu_gdt_rw(0);
- gdt_descr.size = GDT_SIZE - 1;
- load_gdt(&gdt_descr);
-
load_cr3(save_pgd);
__flush_tlb_all();
+
+ load_fixmap_gdt(0);
}
void __init efi_runtime_update_mappings(void)
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 45b700ac5fe7..2fe5c9b1816b 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -435,14 +435,13 @@ static void xen_set_pud(pud_t *ptr, pud_t val)
static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
{
trace_xen_mmu_set_pte_atomic(ptep, pte);
- set_64bit((u64 *)ptep, native_pte_val(pte));
+ __xen_set_pte(ptep, pte);
}
static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
trace_xen_mmu_pte_clear(mm, addr, ptep);
- if (!xen_batched_set_pte(ptep, native_make_pte(0)))
- native_pte_clear(mm, addr, ptep);
+ __xen_set_pte(ptep, native_make_pte(0));
}
static void xen_pmd_clear(pmd_t *pmdp)
@@ -1570,7 +1569,7 @@ static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
pte_val_ma(pte));
#endif
- native_set_pte(ptep, pte);
+ __xen_set_pte(ptep, pte);
}
/* Early in boot, while setting up the initial pagetable, assume
@@ -2061,7 +2060,6 @@ void __init xen_relocate_p2m(void)
pud_t *pud;
pgd_t *pgd;
unsigned long *new_p2m;
- int save_pud;
size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
n_pte = roundup(size, PAGE_SIZE) >> PAGE_SHIFT;
@@ -2091,7 +2089,6 @@ void __init xen_relocate_p2m(void)
pgd = __va(read_cr3_pa());
new_p2m = (unsigned long *)(2 * PGDIR_SIZE);
- save_pud = n_pud;
for (idx_pud = 0; idx_pud < n_pud; idx_pud++) {
pud = early_memremap(pud_phys, PAGE_SIZE);
clear_page(pud);
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index 84507d3e9a98..8e20a0677dcf 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -123,16 +123,11 @@ static void rwb_wake_all(struct rq_wb *rwb)
}
}
-static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct)
+static void wbt_rqw_done(struct rq_wb *rwb, struct rq_wait *rqw,
+ enum wbt_flags wb_acct)
{
- struct rq_wb *rwb = RQWB(rqos);
- struct rq_wait *rqw;
int inflight, limit;
- if (!(wb_acct & WBT_TRACKED))
- return;
-
- rqw = get_rq_wait(rwb, wb_acct);
inflight = atomic_dec_return(&rqw->inflight);
/*
@@ -166,10 +161,22 @@ static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct)
int diff = limit - inflight;
if (!inflight || diff >= rwb->wb_background / 2)
- wake_up(&rqw->wait);
+ wake_up_all(&rqw->wait);
}
}
+static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct)
+{
+ struct rq_wb *rwb = RQWB(rqos);
+ struct rq_wait *rqw;
+
+ if (!(wb_acct & WBT_TRACKED))
+ return;
+
+ rqw = get_rq_wait(rwb, wb_acct);
+ wbt_rqw_done(rwb, rqw, wb_acct);
+}
+
/*
* Called on completion of a request. Note that it's also called when
* a request is merged, when the request gets freed.
@@ -481,6 +488,34 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
return limit;
}
+struct wbt_wait_data {
+ struct wait_queue_entry wq;
+ struct task_struct *task;
+ struct rq_wb *rwb;
+ struct rq_wait *rqw;
+ unsigned long rw;
+ bool got_token;
+};
+
+static int wbt_wake_function(struct wait_queue_entry *curr, unsigned int mode,
+ int wake_flags, void *key)
+{
+ struct wbt_wait_data *data = container_of(curr, struct wbt_wait_data,
+ wq);
+
+ /*
+ * If we fail to get a budget, return -1 to interrupt the wake up
+ * loop in __wake_up_common.
+ */
+ if (!rq_wait_inc_below(data->rqw, get_limit(data->rwb, data->rw)))
+ return -1;
+
+ data->got_token = true;
+ list_del_init(&curr->entry);
+ wake_up_process(data->task);
+ return 1;
+}
+
/*
* Block if we will exceed our limit, or if we are currently waiting for
* the timer to kick off queuing again.
@@ -491,19 +526,40 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
__acquires(lock)
{
struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
- DECLARE_WAITQUEUE(wait, current);
+ struct wbt_wait_data data = {
+ .wq = {
+ .func = wbt_wake_function,
+ .entry = LIST_HEAD_INIT(data.wq.entry),
+ },
+ .task = current,
+ .rwb = rwb,
+ .rqw = rqw,
+ .rw = rw,
+ };
bool has_sleeper;
has_sleeper = wq_has_sleeper(&rqw->wait);
if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw)))
return;
- add_wait_queue_exclusive(&rqw->wait, &wait);
+ prepare_to_wait_exclusive(&rqw->wait, &data.wq, TASK_UNINTERRUPTIBLE);
do {
- set_current_state(TASK_UNINTERRUPTIBLE);
+ if (data.got_token)
+ break;
- if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw)))
+ if (!has_sleeper &&
+ rq_wait_inc_below(rqw, get_limit(rwb, rw))) {
+ finish_wait(&rqw->wait, &data.wq);
+
+ /*
+ * We raced with wbt_wake_function() getting a token,
+ * which means we now have two. Put our local token
+ * and wake anyone else potentially waiting for one.
+ */
+ if (data.got_token)
+ wbt_rqw_done(rwb, rqw, wb_acct);
break;
+ }
if (lock) {
spin_unlock_irq(lock);
@@ -511,11 +567,11 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
spin_lock_irq(lock);
} else
io_schedule();
+
has_sleeper = false;
} while (1);
- __set_current_state(TASK_RUNNING);
- remove_wait_queue(&rqw->wait, &wait);
+ finish_wait(&rqw->wait, &data.wq);
}
static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
@@ -580,11 +636,6 @@ static void wbt_wait(struct rq_qos *rqos, struct bio *bio, spinlock_t *lock)
return;
}
- if (current_is_kswapd())
- flags |= WBT_KSWAPD;
- if (bio_op(bio) == REQ_OP_DISCARD)
- flags |= WBT_DISCARD;
-
__wbt_wait(rwb, flags, bio->bi_opf, lock);
if (!blk_stat_is_active(rwb->cb))
diff --git a/block/bsg.c b/block/bsg.c
index db588add6ba6..9a442c23a715 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -37,7 +37,7 @@ struct bsg_device {
struct request_queue *queue;
spinlock_t lock;
struct hlist_node dev_list;
- atomic_t ref_count;
+ refcount_t ref_count;
char name[20];
int max_queue;
};
@@ -252,7 +252,7 @@ static int bsg_put_device(struct bsg_device *bd)
mutex_lock(&bsg_mutex);
- if (!atomic_dec_and_test(&bd->ref_count)) {
+ if (!refcount_dec_and_test(&bd->ref_count)) {
mutex_unlock(&bsg_mutex);
return 0;
}
@@ -290,7 +290,7 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
bd->queue = rq;
- atomic_set(&bd->ref_count, 1);
+ refcount_set(&bd->ref_count, 1);
hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode)));
strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1);
@@ -308,7 +308,7 @@ static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) {
if (bd->queue == q) {
- atomic_inc(&bd->ref_count);
+ refcount_inc(&bd->ref_count);
goto found;
}
}
diff --git a/block/elevator.c b/block/elevator.c
index 5ea6e7d600e4..6a06b5d040e5 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -895,8 +895,7 @@ int elv_register(struct elevator_type *e)
spin_lock(&elv_list_lock);
if (elevator_find(e->elevator_name, e->uses_mq)) {
spin_unlock(&elv_list_lock);
- if (e->icq_cache)
- kmem_cache_destroy(e->icq_cache);
+ kmem_cache_destroy(e->icq_cache);
return -EBUSY;
}
list_add_tail(&e->list, &elv_list);
diff --git a/drivers/ata/pata_ftide010.c b/drivers/ata/pata_ftide010.c
index 5d4b72e21161..569a4a662dcd 100644
--- a/drivers/ata/pata_ftide010.c
+++ b/drivers/ata/pata_ftide010.c
@@ -256,14 +256,12 @@ static struct ata_port_operations pata_ftide010_port_ops = {
.qc_issue = ftide010_qc_issue,
};
-static struct ata_port_info ftide010_port_info[] = {
- {
- .flags = ATA_FLAG_SLAVE_POSS,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA6,
- .pio_mask = ATA_PIO4,
- .port_ops = &pata_ftide010_port_ops,
- },
+static struct ata_port_info ftide010_port_info = {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA6,
+ .pio_mask = ATA_PIO4,
+ .port_ops = &pata_ftide010_port_ops,
};
#if IS_ENABLED(CONFIG_SATA_GEMINI)
@@ -349,6 +347,7 @@ static int pata_ftide010_gemini_cable_detect(struct ata_port *ap)
}
static int pata_ftide010_gemini_init(struct ftide010 *ftide,
+ struct ata_port_info *pi,
bool is_ata1)
{
struct device *dev = ftide->dev;
@@ -373,7 +372,13 @@ static int pata_ftide010_gemini_init(struct ftide010 *ftide,
/* Flag port as SATA-capable */
if (gemini_sata_bridge_enabled(sg, is_ata1))
- ftide010_port_info[0].flags |= ATA_FLAG_SATA;
+ pi->flags |= ATA_FLAG_SATA;
+
+ /* This device has broken DMA, only PIO works */
+ if (of_machine_is_compatible("itian,sq201")) {
+ pi->mwdma_mask = 0;
+ pi->udma_mask = 0;
+ }
/*
* We assume that a simple 40-wire cable is used in the PATA mode.
@@ -435,6 +440,7 @@ static int pata_ftide010_gemini_init(struct ftide010 *ftide,
}
#else
static int pata_ftide010_gemini_init(struct ftide010 *ftide,
+ struct ata_port_info *pi,
bool is_ata1)
{
return -ENOTSUPP;
@@ -446,7 +452,7 @@ static int pata_ftide010_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- const struct ata_port_info pi = ftide010_port_info[0];
+ struct ata_port_info pi = ftide010_port_info;
const struct ata_port_info *ppi[] = { &pi, NULL };
struct ftide010 *ftide;
struct resource *res;
@@ -490,6 +496,7 @@ static int pata_ftide010_probe(struct platform_device *pdev)
* are ATA0. This will also set up the cable types.
*/
ret = pata_ftide010_gemini_init(ftide,
+ &pi,
(res->start == 0x63400000));
if (ret)
goto err_dis_clk;
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 8e2e4757adcb..5a42ae4078c2 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -185,7 +185,7 @@ EXPORT_SYMBOL_GPL(of_pm_clk_add_clk);
int of_pm_clk_add_clks(struct device *dev)
{
struct clk **clks;
- unsigned int i, count;
+ int i, count;
int ret;
if (!dev || !dev->of_node)
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index b55b245e8052..fd1e19f1a49f 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -84,6 +84,18 @@ MODULE_PARM_DESC(max_persistent_grants,
"Maximum number of grants to map persistently");
/*
+ * How long a persistent grant is allowed to remain allocated without being in
+ * use. The time is in seconds, 0 means indefinitely long.
+ */
+
+static unsigned int xen_blkif_pgrant_timeout = 60;
+module_param_named(persistent_grant_unused_seconds, xen_blkif_pgrant_timeout,
+ uint, 0644);
+MODULE_PARM_DESC(persistent_grant_unused_seconds,
+ "Time in seconds an unused persistent grant is allowed to "
+ "remain allocated. Default is 60, 0 means unlimited.");
+
+/*
* Maximum number of rings/queues blkback supports, allow as many queues as there
* are CPUs if user has not specified a value.
*/
@@ -123,6 +135,13 @@ module_param(log_stats, int, 0644);
/* Number of free pages to remove on each call to gnttab_free_pages */
#define NUM_BATCH_FREE_PAGES 10
+static inline bool persistent_gnt_timeout(struct persistent_gnt *persistent_gnt)
+{
+ return xen_blkif_pgrant_timeout &&
+ (jiffies - persistent_gnt->last_used >=
+ HZ * xen_blkif_pgrant_timeout);
+}
+
static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page)
{
unsigned long flags;
@@ -236,8 +255,7 @@ static int add_persistent_gnt(struct xen_blkif_ring *ring,
}
}
- bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE);
- set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
+ persistent_gnt->active = true;
/* Add new node and rebalance tree. */
rb_link_node(&(persistent_gnt->node), parent, new);
rb_insert_color(&(persistent_gnt->node), &ring->persistent_gnts);
@@ -261,11 +279,11 @@ static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring,
else if (gref > data->gnt)
node = node->rb_right;
else {
- if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) {
+ if (data->active) {
pr_alert_ratelimited("requesting a grant already in use\n");
return NULL;
}
- set_bit(PERSISTENT_GNT_ACTIVE, data->flags);
+ data->active = true;
atomic_inc(&ring->persistent_gnt_in_use);
return data;
}
@@ -276,10 +294,10 @@ static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring,
static void put_persistent_gnt(struct xen_blkif_ring *ring,
struct persistent_gnt *persistent_gnt)
{
- if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
+ if (!persistent_gnt->active)
pr_alert_ratelimited("freeing a grant already unused\n");
- set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
- clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
+ persistent_gnt->last_used = jiffies;
+ persistent_gnt->active = false;
atomic_dec(&ring->persistent_gnt_in_use);
}
@@ -371,26 +389,26 @@ static void purge_persistent_gnt(struct xen_blkif_ring *ring)
struct persistent_gnt *persistent_gnt;
struct rb_node *n;
unsigned int num_clean, total;
- bool scan_used = false, clean_used = false;
+ bool scan_used = false;
struct rb_root *root;
- if (ring->persistent_gnt_c < xen_blkif_max_pgrants ||
- (ring->persistent_gnt_c == xen_blkif_max_pgrants &&
- !ring->blkif->vbd.overflow_max_grants)) {
- goto out;
- }
-
if (work_busy(&ring->persistent_purge_work)) {
pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n");
goto out;
}
- num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
- num_clean = ring->persistent_gnt_c - xen_blkif_max_pgrants + num_clean;
- num_clean = min(ring->persistent_gnt_c, num_clean);
- if ((num_clean == 0) ||
- (num_clean > (ring->persistent_gnt_c - atomic_read(&ring->persistent_gnt_in_use))))
- goto out;
+ if (ring->persistent_gnt_c < xen_blkif_max_pgrants ||
+ (ring->persistent_gnt_c == xen_blkif_max_pgrants &&
+ !ring->blkif->vbd.overflow_max_grants)) {
+ num_clean = 0;
+ } else {
+ num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
+ num_clean = ring->persistent_gnt_c - xen_blkif_max_pgrants +
+ num_clean;
+ num_clean = min(ring->persistent_gnt_c, num_clean);
+ pr_debug("Going to purge at least %u persistent grants\n",
+ num_clean);
+ }
/*
* At this point, we can assure that there will be no calls
@@ -401,9 +419,7 @@ static void purge_persistent_gnt(struct xen_blkif_ring *ring)
* number of grants.
*/
- total = num_clean;
-
- pr_debug("Going to purge %u persistent grants\n", num_clean);
+ total = 0;
BUG_ON(!list_empty(&ring->persistent_purge_list));
root = &ring->persistent_gnts;
@@ -412,46 +428,37 @@ purge_list:
BUG_ON(persistent_gnt->handle ==
BLKBACK_INVALID_HANDLE);
- if (clean_used) {
- clear_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
+ if (persistent_gnt->active)
continue;
- }
-
- if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
+ if (!scan_used && !persistent_gnt_timeout(persistent_gnt))
continue;
- if (!scan_used &&
- (test_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags)))
+ if (scan_used && total >= num_clean)
continue;
rb_erase(&persistent_gnt->node, root);
list_add(&persistent_gnt->remove_node,
&ring->persistent_purge_list);
- if (--num_clean == 0)
- goto finished;
+ total++;
}
/*
- * If we get here it means we also need to start cleaning
+ * Check whether we also need to start cleaning
* grants that were used since last purge in order to cope
* with the requested num
*/
- if (!scan_used && !clean_used) {
- pr_debug("Still missing %u purged frames\n", num_clean);
+ if (!scan_used && total < num_clean) {
+ pr_debug("Still missing %u purged frames\n", num_clean - total);
scan_used = true;
goto purge_list;
}
-finished:
- if (!clean_used) {
- pr_debug("Finished scanning for grants to clean, removing used flag\n");
- clean_used = true;
- goto purge_list;
- }
- ring->persistent_gnt_c -= (total - num_clean);
- ring->blkif->vbd.overflow_max_grants = 0;
+ if (total) {
+ ring->persistent_gnt_c -= total;
+ ring->blkif->vbd.overflow_max_grants = 0;
- /* We can defer this work */
- schedule_work(&ring->persistent_purge_work);
- pr_debug("Purged %u/%u\n", (total - num_clean), total);
+ /* We can defer this work */
+ schedule_work(&ring->persistent_purge_work);
+ pr_debug("Purged %u/%u\n", num_clean, total);
+ }
out:
return;
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index ecb35fe8ca8d..1d3002d773f7 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -233,16 +233,6 @@ struct xen_vbd {
struct backend_info;
-/* Number of available flags */
-#define PERSISTENT_GNT_FLAGS_SIZE 2
-/* This persistent grant is currently in use */
-#define PERSISTENT_GNT_ACTIVE 0
-/*
- * This persistent grant has been used, this flag is set when we remove the
- * PERSISTENT_GNT_ACTIVE, to know that this grant has been used recently.
- */
-#define PERSISTENT_GNT_WAS_ACTIVE 1
-
/* Number of requests that we can fit in a ring */
#define XEN_BLKIF_REQS_PER_PAGE 32
@@ -250,7 +240,8 @@ struct persistent_gnt {
struct page *page;
grant_ref_t gnt;
grant_handle_t handle;
- DECLARE_BITMAP(flags, PERSISTENT_GNT_FLAGS_SIZE);
+ unsigned long last_used;
+ bool active;
struct rb_node node;
struct list_head remove_node;
};
@@ -278,7 +269,6 @@ struct xen_blkif_ring {
wait_queue_head_t pending_free_wq;
/* Tree to store persistent grants. */
- spinlock_t pers_gnts_lock;
struct rb_root persistent_gnts;
unsigned int persistent_gnt_c;
atomic_t persistent_gnt_in_use;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 8986adab9bf5..a71d817e900d 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -46,6 +46,7 @@
#include <linux/scatterlist.h>
#include <linux/bitmap.h>
#include <linux/list.h>
+#include <linux/workqueue.h>
#include <xen/xen.h>
#include <xen/xenbus.h>
@@ -121,6 +122,8 @@ static inline struct blkif_req *blkif_req(struct request *rq)
static DEFINE_MUTEX(blkfront_mutex);
static const struct block_device_operations xlvbd_block_fops;
+static struct delayed_work blkfront_work;
+static LIST_HEAD(info_list);
/*
* Maximum number of segments in indirect requests, the actual value used by
@@ -216,6 +219,7 @@ struct blkfront_info
/* Save uncomplete reqs and bios for migration. */
struct list_head requests;
struct bio_list bio_list;
+ struct list_head info_list;
};
static unsigned int nr_minors;
@@ -1759,6 +1763,12 @@ abort_transaction:
return err;
}
+static void free_info(struct blkfront_info *info)
+{
+ list_del(&info->info_list);
+ kfree(info);
+}
+
/* Common code used when first setting up, and when resuming. */
static int talk_to_blkback(struct xenbus_device *dev,
struct blkfront_info *info)
@@ -1880,7 +1890,10 @@ again:
destroy_blkring:
blkif_free(info, 0);
- kfree(info);
+ mutex_lock(&blkfront_mutex);
+ free_info(info);
+ mutex_unlock(&blkfront_mutex);
+
dev_set_drvdata(&dev->dev, NULL);
return err;
@@ -1991,6 +2004,10 @@ static int blkfront_probe(struct xenbus_device *dev,
info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
dev_set_drvdata(&dev->dev, info);
+ mutex_lock(&blkfront_mutex);
+ list_add(&info->info_list, &info_list);
+ mutex_unlock(&blkfront_mutex);
+
return 0;
}
@@ -2301,6 +2318,12 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST)
indirect_segments = 0;
info->max_indirect_segments = indirect_segments;
+
+ if (info->feature_persistent) {
+ mutex_lock(&blkfront_mutex);
+ schedule_delayed_work(&blkfront_work, HZ * 10);
+ mutex_unlock(&blkfront_mutex);
+ }
}
/*
@@ -2482,7 +2505,9 @@ static int blkfront_remove(struct xenbus_device *xbdev)
mutex_unlock(&info->mutex);
if (!bdev) {
- kfree(info);
+ mutex_lock(&blkfront_mutex);
+ free_info(info);
+ mutex_unlock(&blkfront_mutex);
return 0;
}
@@ -2502,7 +2527,9 @@ static int blkfront_remove(struct xenbus_device *xbdev)
if (info && !bdev->bd_openers) {
xlvbd_release_gendisk(info);
disk->private_data = NULL;
- kfree(info);
+ mutex_lock(&blkfront_mutex);
+ free_info(info);
+ mutex_unlock(&blkfront_mutex);
}
mutex_unlock(&bdev->bd_mutex);
@@ -2585,7 +2612,7 @@ static void blkif_release(struct gendisk *disk, fmode_t mode)
dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
xlvbd_release_gendisk(info);
disk->private_data = NULL;
- kfree(info);
+ free_info(info);
}
out:
@@ -2618,6 +2645,61 @@ static struct xenbus_driver blkfront_driver = {
.is_ready = blkfront_is_ready,
};
+static void purge_persistent_grants(struct blkfront_info *info)
+{
+ unsigned int i;
+ unsigned long flags;
+
+ for (i = 0; i < info->nr_rings; i++) {
+ struct blkfront_ring_info *rinfo = &info->rinfo[i];
+ struct grant *gnt_list_entry, *tmp;
+
+ spin_lock_irqsave(&rinfo->ring_lock, flags);
+
+ if (rinfo->persistent_gnts_c == 0) {
+ spin_unlock_irqrestore(&rinfo->ring_lock, flags);
+ continue;
+ }
+
+ list_for_each_entry_safe(gnt_list_entry, tmp, &rinfo->grants,
+ node) {
+ if (gnt_list_entry->gref == GRANT_INVALID_REF ||
+ gnttab_query_foreign_access(gnt_list_entry->gref))
+ continue;
+
+ list_del(&gnt_list_entry->node);
+ gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL);
+ rinfo->persistent_gnts_c--;
+ __free_page(gnt_list_entry->page);
+ kfree(gnt_list_entry);
+ }
+
+ spin_unlock_irqrestore(&rinfo->ring_lock, flags);
+ }
+}
+
+static void blkfront_delay_work(struct work_struct *work)
+{
+ struct blkfront_info *info;
+ bool need_schedule_work = false;
+
+ mutex_lock(&blkfront_mutex);
+
+ list_for_each_entry(info, &info_list, info_list) {
+ if (info->feature_persistent) {
+ need_schedule_work = true;
+ mutex_lock(&info->mutex);
+ purge_persistent_grants(info);
+ mutex_unlock(&info->mutex);
+ }
+ }
+
+ if (need_schedule_work)
+ schedule_delayed_work(&blkfront_work, HZ * 10);
+
+ mutex_unlock(&blkfront_mutex);
+}
+
static int __init xlblk_init(void)
{
int ret;
@@ -2626,6 +2708,15 @@ static int __init xlblk_init(void)
if (!xen_domain())
return -ENODEV;
+ if (!xen_has_pv_disk_devices())
+ return -ENODEV;
+
+ if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
+ pr_warn("xen_blk: can't get major %d with name %s\n",
+ XENVBD_MAJOR, DEV_NAME);
+ return -ENODEV;
+ }
+
if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST)
xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
@@ -2641,14 +2732,7 @@ static int __init xlblk_init(void)
xen_blkif_max_queues = nr_cpus;
}
- if (!xen_has_pv_disk_devices())
- return -ENODEV;
-
- if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
- printk(KERN_WARNING "xen_blk: can't get major %d with name %s\n",
- XENVBD_MAJOR, DEV_NAME);
- return -ENODEV;
- }
+ INIT_DELAYED_WORK(&blkfront_work, blkfront_delay_work);
ret = xenbus_register_frontend(&blkfront_driver);
if (ret) {
@@ -2663,6 +2747,8 @@ module_init(xlblk_init);
static void __exit xlblk_exit(void)
{
+ cancel_delayed_work_sync(&blkfront_work);
+
xenbus_unregister_driver(&blkfront_driver);
unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
kfree(minors);
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 2df11cc08a46..845b0314ce3a 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -200,6 +200,7 @@ config BT_HCIUART_RTL
depends on BT_HCIUART
depends on BT_HCIUART_SERDEV
depends on GPIOLIB
+ depends on ACPI
select BT_HCIUART_3WIRE
select BT_RTL
help
diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
index ed2a5c7cb77f..4593baff2bc9 100644
--- a/drivers/bluetooth/btmtkuart.c
+++ b/drivers/bluetooth/btmtkuart.c
@@ -144,8 +144,10 @@ static int mtk_setup_fw(struct hci_dev *hdev)
fw_size = fw->size;
/* The size of patch header is 30 bytes, should be skip */
- if (fw_size < 30)
- return -EINVAL;
+ if (fw_size < 30) {
+ err = -EINVAL;
+ goto free_fw;
+ }
fw_size -= 30;
fw_ptr += 30;
@@ -172,8 +174,8 @@ static int mtk_setup_fw(struct hci_dev *hdev)
fw_ptr += dlen;
}
+free_fw:
release_firmware(fw);
-
return err;
}
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index c9bac9dc4637..e4fe954e63a9 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -498,32 +498,29 @@ static int sysc_check_registers(struct sysc *ddata)
/**
* syc_ioremap - ioremap register space for the interconnect target module
- * @ddata: deviec driver data
+ * @ddata: device driver data
*
* Note that the interconnect target module registers can be anywhere
- * within the first child device address space. For example, SGX has
- * them at offset 0x1fc00 in the 32MB module address space. We just
- * what we need around the interconnect target module registers.
+ * within the interconnect target module range. For example, SGX has
+ * them at offset 0x1fc00 in the 32MB module address space. And cpsw
+ * has them at offset 0x1200 in the CPSW_WR child. Usually the
+ * the interconnect target module registers are at the beginning of
+ * the module range though.
*/
static int sysc_ioremap(struct sysc *ddata)
{
- u32 size = 0;
-
- if (ddata->offsets[SYSC_SYSSTATUS] >= 0)
- size = ddata->offsets[SYSC_SYSSTATUS];
- else if (ddata->offsets[SYSC_SYSCONFIG] >= 0)
- size = ddata->offsets[SYSC_SYSCONFIG];
- else if (ddata->offsets[SYSC_REVISION] >= 0)
- size = ddata->offsets[SYSC_REVISION];
- else
- return -EINVAL;
+ int size;
- size &= 0xfff00;
- size += SZ_256;
+ size = max3(ddata->offsets[SYSC_REVISION],
+ ddata->offsets[SYSC_SYSCONFIG],
+ ddata->offsets[SYSC_SYSSTATUS]);
+
+ if (size < 0 || (size + sizeof(u32)) > ddata->module_size)
+ return -EINVAL;
ddata->module_va = devm_ioremap(ddata->dev,
ddata->module_pa,
- size);
+ size + sizeof(u32));
if (!ddata->module_va)
return -EIO;
@@ -1224,10 +1221,10 @@ static int sysc_child_suspend_noirq(struct device *dev)
if (!pm_runtime_status_suspended(dev)) {
error = pm_generic_runtime_suspend(dev);
if (error) {
- dev_err(dev, "%s error at %i: %i\n",
- __func__, __LINE__, error);
+ dev_warn(dev, "%s busy at %i: %i\n",
+ __func__, __LINE__, error);
- return error;
+ return 0;
}
error = sysc_runtime_suspend(ddata->dev);
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 113fc6edb2b0..a5d5a96479bf 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2546,7 +2546,7 @@ static int cdrom_ioctl_drive_status(struct cdrom_device_info *cdi,
if (!CDROM_CAN(CDC_SELECT_DISC) ||
(arg == CDSL_CURRENT || arg == CDSL_NONE))
return cdi->ops->drive_status(cdi, CDSL_CURRENT);
- if (((int)arg >= cdi->capacity))
+ if (arg >= cdi->capacity)
return -EINVAL;
return cdrom_slot_status(cdi, arg);
}
diff --git a/drivers/clk/clk-npcm7xx.c b/drivers/clk/clk-npcm7xx.c
index 740af90a9508..c5edf8f2fd19 100644
--- a/drivers/clk/clk-npcm7xx.c
+++ b/drivers/clk/clk-npcm7xx.c
@@ -558,8 +558,8 @@ static void __init npcm7xx_clk_init(struct device_node *clk_np)
if (!clk_base)
goto npcm7xx_init_error;
- npcm7xx_clk_data = kzalloc(sizeof(*npcm7xx_clk_data->hws) *
- NPCM7XX_NUM_CLOCKS + sizeof(npcm7xx_clk_data), GFP_KERNEL);
+ npcm7xx_clk_data = kzalloc(struct_size(npcm7xx_clk_data, hws,
+ NPCM7XX_NUM_CLOCKS), GFP_KERNEL);
if (!npcm7xx_clk_data)
goto npcm7xx_init_np_err;
diff --git a/drivers/clk/x86/clk-st.c b/drivers/clk/x86/clk-st.c
index fb62f3938008..3a0996f2d556 100644
--- a/drivers/clk/x86/clk-st.c
+++ b/drivers/clk/x86/clk-st.c
@@ -46,7 +46,7 @@ static int st_clk_probe(struct platform_device *pdev)
clk_oscout1_parents, ARRAY_SIZE(clk_oscout1_parents),
0, st_data->base + CLKDRVSTR2, OSCOUT1CLK25MHZ, 3, 0, NULL);
- clk_set_parent(hws[ST_CLK_MUX]->clk, hws[ST_CLK_25M]->clk);
+ clk_set_parent(hws[ST_CLK_MUX]->clk, hws[ST_CLK_48M]->clk);
hws[ST_CLK_GATE] = clk_hw_register_gate(NULL, "oscout1", "oscout1_mux",
0, st_data->base + MISCCLKCNTL1, OSCCLKENB,
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 110483f0e3fb..e26a40971b26 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -379,9 +379,20 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
if (idx == -1)
idx = i; /* first enabled state */
if (s->target_residency > data->predicted_us) {
- if (!tick_nohz_tick_stopped())
+ if (data->predicted_us < TICK_USEC)
break;
+ if (!tick_nohz_tick_stopped()) {
+ /*
+ * If the state selected so far is shallow,
+ * waking up early won't hurt, so retain the
+ * tick in that case and let the governor run
+ * again in the next iteration of the loop.
+ */
+ expected_interval = drv->states[idx].target_residency;
+ break;
+ }
+
/*
* If the state selected so far is shallow and this
* state's target residency matches the time till the
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index 6e61cc93c2b0..d7aa7d7ff102 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -679,10 +679,8 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
int ret = 0;
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
- crypto_ablkcipher_set_flags(ablkcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
dev_err(jrdev, "key size mismatch\n");
- return -EINVAL;
+ goto badkey;
}
ctx->cdata.keylen = keylen;
@@ -715,7 +713,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
return ret;
badkey:
crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return 0;
+ return -EINVAL;
}
/*
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 578ea63a3109..f26d62e5533a 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -71,8 +71,8 @@ static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
- dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
- dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
+ dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
+ dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
}
static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
@@ -90,8 +90,8 @@ static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
- dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
- dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
+ dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
+ dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
}
/* RSA Job Completion handler */
@@ -417,13 +417,13 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
goto unmap_p;
}
- pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
+ pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, pdb->tmp1_dma)) {
dev_err(dev, "Unable to map RSA tmp1 memory\n");
goto unmap_q;
}
- pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
+ pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, pdb->tmp2_dma)) {
dev_err(dev, "Unable to map RSA tmp2 memory\n");
goto unmap_tmp1;
@@ -451,7 +451,7 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
return 0;
unmap_tmp1:
- dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
+ dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
unmap_q:
dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
unmap_p:
@@ -504,13 +504,13 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
goto unmap_dq;
}
- pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
+ pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, pdb->tmp1_dma)) {
dev_err(dev, "Unable to map RSA tmp1 memory\n");
goto unmap_qinv;
}
- pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
+ pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, pdb->tmp2_dma)) {
dev_err(dev, "Unable to map RSA tmp2 memory\n");
goto unmap_tmp1;
@@ -538,7 +538,7 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
return 0;
unmap_tmp1:
- dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
+ dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
unmap_qinv:
dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
unmap_dq:
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index f4f258075b89..acdd72016ffe 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -190,7 +190,8 @@ static void caam_jr_dequeue(unsigned long devarg)
BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0);
/* Unmap just-run descriptor so we can post-process */
- dma_unmap_single(dev, jrp->outring[hw_idx].desc,
+ dma_unmap_single(dev,
+ caam_dma_to_cpu(jrp->outring[hw_idx].desc),
jrp->entinfo[sw_idx].desc_size,
DMA_TO_DEVICE);
diff --git a/drivers/crypto/cavium/nitrox/nitrox_dev.h b/drivers/crypto/cavium/nitrox/nitrox_dev.h
index 9a476bb6d4c7..af596455b420 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_dev.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_dev.h
@@ -35,6 +35,7 @@ struct nitrox_cmdq {
/* requests in backlog queues */
atomic_t backlog_count;
+ int write_idx;
/* command size 32B/64B */
u8 instr_size;
u8 qno;
@@ -87,7 +88,7 @@ struct nitrox_bh {
struct bh_data *slc;
};
-/* NITROX-5 driver state */
+/* NITROX-V driver state */
#define NITROX_UCODE_LOADED 0
#define NITROX_READY 1
diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c
index ebe267379ac9..4d31df07777f 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_lib.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c
@@ -36,6 +36,7 @@ static int cmdq_common_init(struct nitrox_cmdq *cmdq)
cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN);
cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN);
cmdq->qsize = (qsize + PKT_IN_ALIGN);
+ cmdq->write_idx = 0;
spin_lock_init(&cmdq->response_lock);
spin_lock_init(&cmdq->cmdq_lock);
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
index deaefd532aaa..4a362fc22f62 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
@@ -42,6 +42,16 @@
* Invalid flag options in AES-CCM IV.
*/
+static inline int incr_index(int index, int count, int max)
+{
+ if ((index + count) >= max)
+ index = index + count - max;
+ else
+ index += count;
+
+ return index;
+}
+
/**
* dma_free_sglist - unmap and free the sg lists.
* @ndev: N5 device
@@ -426,30 +436,29 @@ static void post_se_instr(struct nitrox_softreq *sr,
struct nitrox_cmdq *cmdq)
{
struct nitrox_device *ndev = sr->ndev;
- union nps_pkt_in_instr_baoff_dbell pkt_in_baoff_dbell;
- u64 offset;
+ int idx;
u8 *ent;
spin_lock_bh(&cmdq->cmdq_lock);
- /* get the next write offset */
- offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(cmdq->qno);
- pkt_in_baoff_dbell.value = nitrox_read_csr(ndev, offset);
+ idx = cmdq->write_idx;
/* copy the instruction */
- ent = cmdq->head + pkt_in_baoff_dbell.s.aoff;
+ ent = cmdq->head + (idx * cmdq->instr_size);
memcpy(ent, &sr->instr, cmdq->instr_size);
- /* flush the command queue updates */
- dma_wmb();
- sr->tstamp = jiffies;
atomic_set(&sr->status, REQ_POSTED);
response_list_add(sr, cmdq);
+ sr->tstamp = jiffies;
+ /* flush the command queue updates */
+ dma_wmb();
/* Ring doorbell with count 1 */
writeq(1, cmdq->dbell_csr_addr);
/* orders the doorbell rings */
mmiowb();
+ cmdq->write_idx = incr_index(idx, 1, ndev->qlen);
+
spin_unlock_bh(&cmdq->cmdq_lock);
}
@@ -459,6 +468,9 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
struct nitrox_softreq *sr, *tmp;
int ret = 0;
+ if (!atomic_read(&cmdq->backlog_count))
+ return 0;
+
spin_lock_bh(&cmdq->backlog_lock);
list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) {
@@ -466,7 +478,7 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
/* submit until space available */
if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
- ret = -EBUSY;
+ ret = -ENOSPC;
break;
}
/* delete from backlog list */
@@ -491,23 +503,20 @@ static int nitrox_enqueue_request(struct nitrox_softreq *sr)
{
struct nitrox_cmdq *cmdq = sr->cmdq;
struct nitrox_device *ndev = sr->ndev;
- int ret = -EBUSY;
+
+ /* try to post backlog requests */
+ post_backlog_cmds(cmdq);
if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
- return -EAGAIN;
-
+ return -ENOSPC;
+ /* add to backlog list */
backlog_list_add(sr, cmdq);
- } else {
- ret = post_backlog_cmds(cmdq);
- if (ret) {
- backlog_list_add(sr, cmdq);
- return ret;
- }
- post_se_instr(sr, cmdq);
- ret = -EINPROGRESS;
+ return -EBUSY;
}
- return ret;
+ post_se_instr(sr, cmdq);
+
+ return -EINPROGRESS;
}
/**
@@ -624,11 +633,9 @@ int nitrox_process_se_request(struct nitrox_device *ndev,
*/
sr->instr.fdata[0] = *((u64 *)&req->gph);
sr->instr.fdata[1] = 0;
- /* flush the soft_req changes before posting the cmd */
- wmb();
ret = nitrox_enqueue_request(sr);
- if (ret == -EAGAIN)
+ if (ret == -ENOSPC)
goto send_fail;
return ret;
diff --git a/drivers/crypto/chelsio/chtls/chtls.h b/drivers/crypto/chelsio/chtls/chtls.h
index a53a0e6ba024..7725b6ee14ef 100644
--- a/drivers/crypto/chelsio/chtls/chtls.h
+++ b/drivers/crypto/chelsio/chtls/chtls.h
@@ -96,6 +96,10 @@ enum csk_flags {
CSK_CONN_INLINE, /* Connection on HW */
};
+enum chtls_cdev_state {
+ CHTLS_CDEV_STATE_UP = 1
+};
+
struct listen_ctx {
struct sock *lsk;
struct chtls_dev *cdev;
@@ -146,6 +150,7 @@ struct chtls_dev {
unsigned int send_page_order;
int max_host_sndbuf;
struct key_map kmap;
+ unsigned int cdev_state;
};
struct chtls_hws {
diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c
index 9b07f9165658..f59b044ebd25 100644
--- a/drivers/crypto/chelsio/chtls/chtls_main.c
+++ b/drivers/crypto/chelsio/chtls/chtls_main.c
@@ -160,6 +160,7 @@ static void chtls_register_dev(struct chtls_dev *cdev)
tlsdev->hash = chtls_create_hash;
tlsdev->unhash = chtls_destroy_hash;
tls_register_device(&cdev->tlsdev);
+ cdev->cdev_state = CHTLS_CDEV_STATE_UP;
}
static void chtls_unregister_dev(struct chtls_dev *cdev)
@@ -281,8 +282,10 @@ static void chtls_free_all_uld(void)
struct chtls_dev *cdev, *tmp;
mutex_lock(&cdev_mutex);
- list_for_each_entry_safe(cdev, tmp, &cdev_list, list)
- chtls_free_uld(cdev);
+ list_for_each_entry_safe(cdev, tmp, &cdev_list, list) {
+ if (cdev->cdev_state == CHTLS_CDEV_STATE_UP)
+ chtls_free_uld(cdev);
+ }
mutex_unlock(&cdev_mutex);
}
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
index 5285ece4f33a..b71895871be3 100644
--- a/drivers/crypto/vmx/aes_cbc.c
+++ b/drivers/crypto/vmx/aes_cbc.c
@@ -107,24 +107,23 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
ret = crypto_skcipher_encrypt(req);
skcipher_request_zero(req);
} else {
- preempt_disable();
- pagefault_disable();
- enable_kernel_vsx();
-
blkcipher_walk_init(&walk, dst, src, nbytes);
ret = blkcipher_walk_virt(desc, &walk);
while ((nbytes = walk.nbytes)) {
+ preempt_disable();
+ pagefault_disable();
+ enable_kernel_vsx();
aes_p8_cbc_encrypt(walk.src.virt.addr,
walk.dst.virt.addr,
nbytes & AES_BLOCK_MASK,
&ctx->enc_key, walk.iv, 1);
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, &walk, nbytes);
}
-
- disable_kernel_vsx();
- pagefault_enable();
- preempt_enable();
}
return ret;
@@ -147,24 +146,23 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
ret = crypto_skcipher_decrypt(req);
skcipher_request_zero(req);
} else {
- preempt_disable();
- pagefault_disable();
- enable_kernel_vsx();
-
blkcipher_walk_init(&walk, dst, src, nbytes);
ret = blkcipher_walk_virt(desc, &walk);
while ((nbytes = walk.nbytes)) {
+ preempt_disable();
+ pagefault_disable();
+ enable_kernel_vsx();
aes_p8_cbc_encrypt(walk.src.virt.addr,
walk.dst.virt.addr,
nbytes & AES_BLOCK_MASK,
&ctx->dec_key, walk.iv, 0);
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, &walk, nbytes);
}
-
- disable_kernel_vsx();
- pagefault_enable();
- preempt_enable();
}
return ret;
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
index 8bd9aff0f55f..e9954a7d4694 100644
--- a/drivers/crypto/vmx/aes_xts.c
+++ b/drivers/crypto/vmx/aes_xts.c
@@ -116,32 +116,39 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req);
skcipher_request_zero(req);
} else {
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+
+ ret = blkcipher_walk_virt(desc, &walk);
+
preempt_disable();
pagefault_disable();
enable_kernel_vsx();
- blkcipher_walk_init(&walk, dst, src, nbytes);
-
- ret = blkcipher_walk_virt(desc, &walk);
iv = walk.iv;
memset(tweak, 0, AES_BLOCK_SIZE);
aes_p8_encrypt(iv, tweak, &ctx->tweak_key);
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+
while ((nbytes = walk.nbytes)) {
+ preempt_disable();
+ pagefault_disable();
+ enable_kernel_vsx();
if (enc)
aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak);
else
aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak);
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, &walk, nbytes);
}
-
- disable_kernel_vsx();
- pagefault_enable();
- preempt_enable();
}
return ret;
}
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index ed3b785bae37..2e5a0faa2cb1 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -30,4 +30,13 @@ config SW_SYNC
WARNING: improper use of this can result in deadlocking kernel
drivers from userspace. Intended for test and debug only.
+config UDMABUF
+ bool "userspace dmabuf misc driver"
+ default n
+ depends on DMA_SHARED_BUFFER
+ depends on MEMFD_CREATE || COMPILE_TEST
+ help
+ A driver to let userspace turn memfd regions into dma-bufs.
+ Qemu can use this to create host dmabufs for guest framebuffers.
+
endmenu
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index c33bf8863147..0913a6ccab5a 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -1,3 +1,4 @@
obj-y := dma-buf.o dma-fence.o dma-fence-array.o reservation.o seqno-fence.o
obj-$(CONFIG_SYNC_FILE) += sync_file.o
obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o
+obj-$(CONFIG_UDMABUF) += udmabuf.o
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 13884474d158..02f7f9a89979 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -405,7 +405,6 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
|| !exp_info->ops->map_dma_buf
|| !exp_info->ops->unmap_dma_buf
|| !exp_info->ops->release
- || !exp_info->ops->map
|| !exp_info->ops->mmap)) {
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
new file mode 100644
index 000000000000..5b44ef226904
--- /dev/null
+++ b/drivers/dma-buf/udmabuf.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/cred.h>
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/highmem.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/memfd.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/shmem_fs.h>
+#include <linux/slab.h>
+#include <linux/udmabuf.h>
+
+static const u32 list_limit = 1024; /* udmabuf_create_list->count limit */
+static const size_t size_limit_mb = 64; /* total dmabuf size, in megabytes */
+
+struct udmabuf {
+ pgoff_t pagecount;
+ struct page **pages;
+};
+
+static int udmabuf_vm_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct udmabuf *ubuf = vma->vm_private_data;
+
+ vmf->page = ubuf->pages[vmf->pgoff];
+ get_page(vmf->page);
+ return 0;
+}
+
+static const struct vm_operations_struct udmabuf_vm_ops = {
+ .fault = udmabuf_vm_fault,
+};
+
+static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
+{
+ struct udmabuf *ubuf = buf->priv;
+
+ if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
+ return -EINVAL;
+
+ vma->vm_ops = &udmabuf_vm_ops;
+ vma->vm_private_data = ubuf;
+ return 0;
+}
+
+static struct sg_table *map_udmabuf(struct dma_buf_attachment *at,
+ enum dma_data_direction direction)
+{
+ struct udmabuf *ubuf = at->dmabuf->priv;
+ struct sg_table *sg;
+ int ret;
+
+ sg = kzalloc(sizeof(*sg), GFP_KERNEL);
+ if (!sg)
+ return ERR_PTR(-ENOMEM);
+ ret = sg_alloc_table_from_pages(sg, ubuf->pages, ubuf->pagecount,
+ 0, ubuf->pagecount << PAGE_SHIFT,
+ GFP_KERNEL);
+ if (ret < 0)
+ goto err;
+ if (!dma_map_sg(at->dev, sg->sgl, sg->nents, direction)) {
+ ret = -EINVAL;
+ goto err;
+ }
+ return sg;
+
+err:
+ sg_free_table(sg);
+ kfree(sg);
+ return ERR_PTR(ret);
+}
+
+static void unmap_udmabuf(struct dma_buf_attachment *at,
+ struct sg_table *sg,
+ enum dma_data_direction direction)
+{
+ sg_free_table(sg);
+ kfree(sg);
+}
+
+static void release_udmabuf(struct dma_buf *buf)
+{
+ struct udmabuf *ubuf = buf->priv;
+ pgoff_t pg;
+
+ for (pg = 0; pg < ubuf->pagecount; pg++)
+ put_page(ubuf->pages[pg]);
+ kfree(ubuf->pages);
+ kfree(ubuf);
+}
+
+static void *kmap_udmabuf(struct dma_buf *buf, unsigned long page_num)
+{
+ struct udmabuf *ubuf = buf->priv;
+ struct page *page = ubuf->pages[page_num];
+
+ return kmap(page);
+}
+
+static void kunmap_udmabuf(struct dma_buf *buf, unsigned long page_num,
+ void *vaddr)
+{
+ kunmap(vaddr);
+}
+
+static const struct dma_buf_ops udmabuf_ops = {
+ .map_dma_buf = map_udmabuf,
+ .unmap_dma_buf = unmap_udmabuf,
+ .release = release_udmabuf,
+ .map = kmap_udmabuf,
+ .unmap = kunmap_udmabuf,
+ .mmap = mmap_udmabuf,
+};
+
+#define SEALS_WANTED (F_SEAL_SHRINK)
+#define SEALS_DENIED (F_SEAL_WRITE)
+
+static long udmabuf_create(const struct udmabuf_create_list *head,
+ const struct udmabuf_create_item *list)
+{
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+ struct file *memfd = NULL;
+ struct udmabuf *ubuf;
+ struct dma_buf *buf;
+ pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit;
+ struct page *page;
+ int seals, ret = -EINVAL;
+ u32 i, flags;
+
+ ubuf = kzalloc(sizeof(*ubuf), GFP_KERNEL);
+ if (!ubuf)
+ return -ENOMEM;
+
+ pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
+ for (i = 0; i < head->count; i++) {
+ if (!IS_ALIGNED(list[i].offset, PAGE_SIZE))
+ goto err;
+ if (!IS_ALIGNED(list[i].size, PAGE_SIZE))
+ goto err;
+ ubuf->pagecount += list[i].size >> PAGE_SHIFT;
+ if (ubuf->pagecount > pglimit)
+ goto err;
+ }
+ ubuf->pages = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->pages),
+ GFP_KERNEL);
+ if (!ubuf->pages) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ pgbuf = 0;
+ for (i = 0; i < head->count; i++) {
+ ret = -EBADFD;
+ memfd = fget(list[i].memfd);
+ if (!memfd)
+ goto err;
+ if (!shmem_mapping(file_inode(memfd)->i_mapping))
+ goto err;
+ seals = memfd_fcntl(memfd, F_GET_SEALS, 0);
+ if (seals == -EINVAL)
+ goto err;
+ ret = -EINVAL;
+ if ((seals & SEALS_WANTED) != SEALS_WANTED ||
+ (seals & SEALS_DENIED) != 0)
+ goto err;
+ pgoff = list[i].offset >> PAGE_SHIFT;
+ pgcnt = list[i].size >> PAGE_SHIFT;
+ for (pgidx = 0; pgidx < pgcnt; pgidx++) {
+ page = shmem_read_mapping_page(
+ file_inode(memfd)->i_mapping, pgoff + pgidx);
+ if (IS_ERR(page)) {
+ ret = PTR_ERR(page);
+ goto err;
+ }
+ ubuf->pages[pgbuf++] = page;
+ }
+ fput(memfd);
+ memfd = NULL;
+ }
+
+ exp_info.ops = &udmabuf_ops;
+ exp_info.size = ubuf->pagecount << PAGE_SHIFT;
+ exp_info.priv = ubuf;
+
+ buf = dma_buf_export(&exp_info);
+ if (IS_ERR(buf)) {
+ ret = PTR_ERR(buf);
+ goto err;
+ }
+
+ flags = 0;
+ if (head->flags & UDMABUF_FLAGS_CLOEXEC)
+ flags |= O_CLOEXEC;
+ return dma_buf_fd(buf, flags);
+
+err:
+ while (pgbuf > 0)
+ put_page(ubuf->pages[--pgbuf]);
+ if (memfd)
+ fput(memfd);
+ kfree(ubuf->pages);
+ kfree(ubuf);
+ return ret;
+}
+
+static long udmabuf_ioctl_create(struct file *filp, unsigned long arg)
+{
+ struct udmabuf_create create;
+ struct udmabuf_create_list head;
+ struct udmabuf_create_item list;
+
+ if (copy_from_user(&create, (void __user *)arg,
+ sizeof(create)))
+ return -EFAULT;
+
+ head.flags = create.flags;
+ head.count = 1;
+ list.memfd = create.memfd;
+ list.offset = create.offset;
+ list.size = create.size;
+
+ return udmabuf_create(&head, &list);
+}
+
+static long udmabuf_ioctl_create_list(struct file *filp, unsigned long arg)
+{
+ struct udmabuf_create_list head;
+ struct udmabuf_create_item *list;
+ int ret = -EINVAL;
+ u32 lsize;
+
+ if (copy_from_user(&head, (void __user *)arg, sizeof(head)))
+ return -EFAULT;
+ if (head.count > list_limit)
+ return -EINVAL;
+ lsize = sizeof(struct udmabuf_create_item) * head.count;
+ list = memdup_user((void __user *)(arg + sizeof(head)), lsize);
+ if (IS_ERR(list))
+ return PTR_ERR(list);
+
+ ret = udmabuf_create(&head, list);
+ kfree(list);
+ return ret;
+}
+
+static long udmabuf_ioctl(struct file *filp, unsigned int ioctl,
+ unsigned long arg)
+{
+ long ret;
+
+ switch (ioctl) {
+ case UDMABUF_CREATE:
+ ret = udmabuf_ioctl_create(filp, arg);
+ break;
+ case UDMABUF_CREATE_LIST:
+ ret = udmabuf_ioctl_create_list(filp, arg);
+ break;
+ default:
+ ret = -ENOTTY;
+ break;
+ }
+ return ret;
+}
+
+static const struct file_operations udmabuf_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = udmabuf_ioctl,
+};
+
+static struct miscdevice udmabuf_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "udmabuf",
+ .fops = &udmabuf_fops,
+};
+
+static int __init udmabuf_dev_init(void)
+{
+ return misc_register(&udmabuf_misc);
+}
+
+static void __exit udmabuf_dev_exit(void)
+{
+ misc_deregister(&udmabuf_misc);
+}
+
+module_init(udmabuf_dev_init)
+module_exit(udmabuf_dev_exit)
+
+MODULE_AUTHOR("Gerd Hoffmann <[email protected]>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index a6771cef85e2..bc6a16a3c36e 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -18,7 +18,8 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm_encoder.o drm_mode_object.o drm_property.o \
drm_plane.o drm_color_mgmt.o drm_print.o \
drm_dumb_buffers.o drm_mode_config.o drm_vblank.o \
- drm_syncobj.o drm_lease.o drm_writeback.o drm_client.o
+ drm_syncobj.o drm_lease.o drm_writeback.o drm_client.o \
+ drm_atomic_uapi.o
drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
drm-$(CONFIG_DRM_VM) += drm_vm.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 8836186eb5ef..b7ec2b0e5a9b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1104,7 +1104,7 @@ static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
{
int r;
struct dma_fence *fence;
- r = drm_syncobj_find_fence(p->filp, handle, &fence);
+ r = drm_syncobj_find_fence(p->filp, handle, 0, &fence);
if (r)
return r;
@@ -1193,7 +1193,7 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
int i;
for (i = 0; i < p->num_post_dep_syncobjs; ++i)
- drm_syncobj_replace_fence(p->post_dep_syncobjs[i], p->fence);
+ drm_syncobj_replace_fence(p->post_dep_syncobjs[i], 0, p->fence);
}
static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
@@ -1262,10 +1262,10 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
error_abort:
dma_fence_put(&job->base.s_fence->finished);
job->base.s_fence = NULL;
- amdgpu_mn_unlock(p->mn);
error_unlock:
amdgpu_job_free(job);
+ amdgpu_mn_unlock(p->mn);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index ff10df4f50d3..53e038963936 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -883,28 +883,6 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
static struct drm_driver kms_driver;
-static int amdgpu_kick_out_firmware_fb(struct pci_dev *pdev)
-{
- struct apertures_struct *ap;
- bool primary = false;
-
- ap = alloc_apertures(1);
- if (!ap)
- return -ENOMEM;
-
- ap->ranges[0].base = pci_resource_start(pdev, 0);
- ap->ranges[0].size = pci_resource_len(pdev, 0);
-
-#ifdef CONFIG_X86
- primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
-#endif
- drm_fb_helper_remove_conflicting_framebuffers(ap, "amdgpudrmfb", primary);
- kfree(ap);
-
- return 0;
-}
-
-
static int amdgpu_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -924,21 +902,17 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
}
/* Get rid of things like offb */
- ret = amdgpu_kick_out_firmware_fb(pdev);
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "amdgpudrmfb");
if (ret)
return ret;
- /* warn the user if they mix atomic and non-atomic capable GPUs */
- if ((kms_driver.driver_features & DRIVER_ATOMIC) && !supports_atomic)
- DRM_ERROR("Mixing atomic and non-atomic capable GPUs!\n");
- /* support atomic early so the atomic debugfs stuff gets created */
- if (supports_atomic)
- kms_driver.driver_features |= DRIVER_ATOMIC;
-
dev = drm_dev_alloc(&kms_driver, &pdev->dev);
if (IS_ERR(dev))
return PTR_ERR(dev);
+ if (!supports_atomic)
+ dev->driver_features &= ~DRIVER_ATOMIC;
+
ret = pci_enable_device(pdev);
if (ret)
goto err_free;
@@ -1190,7 +1164,7 @@ amdgpu_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe,
static struct drm_driver kms_driver = {
.driver_features =
- DRIVER_USE_AGP |
+ DRIVER_USE_AGP | DRIVER_ATOMIC |
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
DRIVER_PRIME | DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ,
.load = amdgpu_driver_load_kms,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 8acf8acb588d..6c849e055038 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -990,6 +990,7 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
aconnector->dc_sink = sink;
if (sink->dc_edid.length == 0) {
aconnector->edid = NULL;
+ drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
} else {
aconnector->edid =
(struct edid *) sink->dc_edid.raw_edid;
@@ -997,10 +998,13 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
drm_connector_update_edid_property(connector,
aconnector->edid);
+ drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
+ aconnector->edid);
}
amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
} else {
+ drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
amdgpu_dm_update_freesync_caps(connector, NULL);
drm_connector_update_edid_property(connector, NULL);
aconnector->num_modes = 0;
@@ -1157,8 +1161,10 @@ static void handle_hpd_rx_irq(void *param)
(dc_link->type == dc_connection_mst_branch))
dm_handle_hpd_rx_irq(aconnector);
- if (dc_link->type != dc_connection_mst_branch)
+ if (dc_link->type != dc_connection_mst_branch) {
+ drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
mutex_unlock(&aconnector->hpd_lock);
+ }
}
static void register_hpd_handlers(struct amdgpu_device *adev)
@@ -2747,6 +2753,7 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
.atomic_duplicate_state = dm_crtc_duplicate_state,
.atomic_destroy_state = dm_crtc_destroy_state,
.set_crc_source = amdgpu_dm_crtc_set_crc_source,
+ .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
.enable_vblank = dm_enable_vblank,
.disable_vblank = dm_disable_vblank,
};
@@ -2884,6 +2891,7 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
dm->backlight_dev = NULL;
}
#endif
+ drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(connector);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 7519f9ad77dd..d4f1bdf93207 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -248,11 +248,14 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
/* amdgpu_dm_crc.c */
#ifdef CONFIG_DEBUG_FS
-int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name,
- size_t *values_cnt);
+int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name);
+int amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc,
+ const char *src_name,
+ size_t *values_cnt);
void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc);
#else
#define amdgpu_dm_crtc_set_crc_source NULL
+#define amdgpu_dm_crtc_verify_crc_source NULL
#define amdgpu_dm_crtc_handle_crc_irq(x)
#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index 9bfb040352e9..01fc5717b657 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -46,8 +46,23 @@ static enum amdgpu_dm_pipe_crc_source dm_parse_crc_source(const char *source)
return AMDGPU_DM_PIPE_CRC_SOURCE_INVALID;
}
-int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name,
- size_t *values_cnt)
+int
+amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc, const char *src_name,
+ size_t *values_cnt)
+{
+ enum amdgpu_dm_pipe_crc_source source = dm_parse_crc_source(src_name);
+
+ if (source < 0) {
+ DRM_DEBUG_DRIVER("Unknown CRC source %s for CRTC%d\n",
+ src_name, crtc->index);
+ return -EINVAL;
+ }
+
+ *values_cnt = 3;
+ return 0;
+}
+
+int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
{
struct dm_crtc_state *crtc_state = to_dm_crtc_state(crtc->state);
struct dc_stream_state *stream_state = crtc_state->stream;
@@ -83,7 +98,6 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name,
return -EINVAL;
}
- *values_cnt = 3;
/* Reset crc_skipped on dm state */
crtc_state->crc_skip_count = 0;
return 0;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 67683645ce2c..03601d717fed 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -498,6 +498,8 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc;
drm_dp_aux_register(&aconnector->dm_dp_aux.aux);
+ drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux,
+ aconnector->base.name, dm->adev->dev);
aconnector->mst_mgr.cbs = &dm_mst_cbs;
drm_dp_mst_topology_mgr_init(
&aconnector->mst_mgr,
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 29409a65d864..49c37f6dd63e 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -78,11 +78,8 @@ static void malidp_plane_reset(struct drm_plane *plane)
kfree(state);
plane->state = NULL;
state = kzalloc(sizeof(*state), GFP_KERNEL);
- if (state) {
- state->base.plane = plane;
- state->base.rotation = DRM_MODE_ROTATE_0;
- plane->state = &state->base;
- }
+ if (state)
+ __drm_atomic_helper_plane_reset(plane, &state->base);
}
static struct
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index eb7dfb65ef47..8d770641fcc4 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -8,6 +8,7 @@
*/
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_uapi.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/armada_drm.h>
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index d73281095fac..9e34bce089d0 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -101,18 +101,34 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
(adj->crtc_hdisplay - 1) |
((adj->crtc_vdisplay - 1) << 16));
- cfg = 0;
+ cfg = ATMEL_HLCDC_CLKSEL;
- prate = clk_get_rate(crtc->dc->hlcdc->sys_clk);
+ prate = 2 * clk_get_rate(crtc->dc->hlcdc->sys_clk);
mode_rate = adj->crtc_clock * 1000;
- if ((prate / 2) < mode_rate) {
- prate *= 2;
- cfg |= ATMEL_HLCDC_CLKSEL;
- }
div = DIV_ROUND_UP(prate, mode_rate);
- if (div < 2)
+ if (div < 2) {
div = 2;
+ } else if (ATMEL_HLCDC_CLKDIV(div) & ~ATMEL_HLCDC_CLKDIV_MASK) {
+ /* The divider ended up too big, try a lower base rate. */
+ cfg &= ~ATMEL_HLCDC_CLKSEL;
+ prate /= 2;
+ div = DIV_ROUND_UP(prate, mode_rate);
+ if (ATMEL_HLCDC_CLKDIV(div) & ~ATMEL_HLCDC_CLKDIV_MASK)
+ div = ATMEL_HLCDC_CLKDIV_MASK;
+ } else {
+ int div_low = prate / mode_rate;
+
+ if (div_low >= 2 &&
+ ((prate / div_low - mode_rate) <
+ 10 * (mode_rate - prate / div)))
+ /*
+ * At least 10 times better when using a higher
+ * frequency than requested, instead of a lower.
+ * So, go with that.
+ */
+ div = div_low;
+ }
cfg |= ATMEL_HLCDC_CLKDIV(div);
@@ -226,6 +242,55 @@ static void atmel_hlcdc_crtc_atomic_enable(struct drm_crtc *c,
#define ATMEL_HLCDC_RGB888_OUTPUT BIT(3)
#define ATMEL_HLCDC_OUTPUT_MODE_MASK GENMASK(3, 0)
+static int atmel_hlcdc_connector_output_mode(struct drm_connector_state *state)
+{
+ struct drm_connector *connector = state->connector;
+ struct drm_display_info *info = &connector->display_info;
+ struct drm_encoder *encoder;
+ unsigned int supported_fmts = 0;
+ int j;
+
+ encoder = state->best_encoder;
+ if (!encoder)
+ encoder = connector->encoder;
+
+ switch (atmel_hlcdc_encoder_get_bus_fmt(encoder)) {
+ case 0:
+ break;
+ case MEDIA_BUS_FMT_RGB444_1X12:
+ return ATMEL_HLCDC_RGB444_OUTPUT;
+ case MEDIA_BUS_FMT_RGB565_1X16:
+ return ATMEL_HLCDC_RGB565_OUTPUT;
+ case MEDIA_BUS_FMT_RGB666_1X18:
+ return ATMEL_HLCDC_RGB666_OUTPUT;
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ return ATMEL_HLCDC_RGB888_OUTPUT;
+ default:
+ return -EINVAL;
+ }
+
+ for (j = 0; j < info->num_bus_formats; j++) {
+ switch (info->bus_formats[j]) {
+ case MEDIA_BUS_FMT_RGB444_1X12:
+ supported_fmts |= ATMEL_HLCDC_RGB444_OUTPUT;
+ break;
+ case MEDIA_BUS_FMT_RGB565_1X16:
+ supported_fmts |= ATMEL_HLCDC_RGB565_OUTPUT;
+ break;
+ case MEDIA_BUS_FMT_RGB666_1X18:
+ supported_fmts |= ATMEL_HLCDC_RGB666_OUTPUT;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ supported_fmts |= ATMEL_HLCDC_RGB888_OUTPUT;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return supported_fmts;
+}
+
static int atmel_hlcdc_crtc_select_output_mode(struct drm_crtc_state *state)
{
unsigned int output_fmts = ATMEL_HLCDC_OUTPUT_MODE_MASK;
@@ -238,31 +303,12 @@ static int atmel_hlcdc_crtc_select_output_mode(struct drm_crtc_state *state)
crtc = drm_crtc_to_atmel_hlcdc_crtc(state->crtc);
for_each_new_connector_in_state(state->state, connector, cstate, i) {
- struct drm_display_info *info = &connector->display_info;
unsigned int supported_fmts = 0;
- int j;
if (!cstate->crtc)
continue;
- for (j = 0; j < info->num_bus_formats; j++) {
- switch (info->bus_formats[j]) {
- case MEDIA_BUS_FMT_RGB444_1X12:
- supported_fmts |= ATMEL_HLCDC_RGB444_OUTPUT;
- break;
- case MEDIA_BUS_FMT_RGB565_1X16:
- supported_fmts |= ATMEL_HLCDC_RGB565_OUTPUT;
- break;
- case MEDIA_BUS_FMT_RGB666_1X18:
- supported_fmts |= ATMEL_HLCDC_RGB666_OUTPUT;
- break;
- case MEDIA_BUS_FMT_RGB888_1X24:
- supported_fmts |= ATMEL_HLCDC_RGB888_OUTPUT;
- break;
- default:
- break;
- }
- }
+ supported_fmts = atmel_hlcdc_connector_output_mode(cstate);
if (crtc->dc->desc->conflicting_output_formats)
output_fmts &= supported_fmts;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
index 60c937f42114..4cc1e03f0aee 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
@@ -441,5 +441,6 @@ void atmel_hlcdc_crtc_irq(struct drm_crtc *c);
int atmel_hlcdc_crtc_create(struct drm_device *dev);
int atmel_hlcdc_create_outputs(struct drm_device *dev);
+int atmel_hlcdc_encoder_get_bus_fmt(struct drm_encoder *encoder);
#endif /* DRM_ATMEL_HLCDC_H */
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index 8db51fb131db..f73d8a92274e 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -27,33 +27,94 @@
#include "atmel_hlcdc_dc.h"
+struct atmel_hlcdc_rgb_output {
+ struct drm_encoder encoder;
+ int bus_fmt;
+};
+
static const struct drm_encoder_funcs atmel_hlcdc_panel_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
+static struct atmel_hlcdc_rgb_output *
+atmel_hlcdc_encoder_to_rgb_output(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct atmel_hlcdc_rgb_output, encoder);
+}
+
+int atmel_hlcdc_encoder_get_bus_fmt(struct drm_encoder *encoder)
+{
+ struct atmel_hlcdc_rgb_output *output;
+
+ output = atmel_hlcdc_encoder_to_rgb_output(encoder);
+
+ return output->bus_fmt;
+}
+
+static int atmel_hlcdc_of_bus_fmt(const struct device_node *ep)
+{
+ u32 bus_width;
+ int ret;
+
+ ret = of_property_read_u32(ep, "bus-width", &bus_width);
+ if (ret == -EINVAL)
+ return 0;
+ if (ret)
+ return ret;
+
+ switch (bus_width) {
+ case 12:
+ return MEDIA_BUS_FMT_RGB444_1X12;
+ case 16:
+ return MEDIA_BUS_FMT_RGB565_1X16;
+ case 18:
+ return MEDIA_BUS_FMT_RGB666_1X18;
+ case 24:
+ return MEDIA_BUS_FMT_RGB888_1X24;
+ default:
+ return -EINVAL;
+ }
+}
+
static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, int endpoint)
{
- struct drm_encoder *encoder;
+ struct atmel_hlcdc_rgb_output *output;
+ struct device_node *ep;
struct drm_panel *panel;
struct drm_bridge *bridge;
int ret;
+ ep = of_graph_get_endpoint_by_regs(dev->dev->of_node, 0, endpoint);
+ if (!ep)
+ return -ENODEV;
+
ret = drm_of_find_panel_or_bridge(dev->dev->of_node, 0, endpoint,
&panel, &bridge);
- if (ret)
+ if (ret) {
+ of_node_put(ep);
return ret;
+ }
- encoder = devm_kzalloc(dev->dev, sizeof(*encoder), GFP_KERNEL);
- if (!encoder)
+ output = devm_kzalloc(dev->dev, sizeof(*output), GFP_KERNEL);
+ if (!output) {
+ of_node_put(ep);
+ return -ENOMEM;
+ }
+
+ output->bus_fmt = atmel_hlcdc_of_bus_fmt(ep);
+ of_node_put(ep);
+ if (output->bus_fmt < 0) {
+ dev_err(dev->dev, "endpoint %d: invalid bus width\n", endpoint);
return -EINVAL;
+ }
- ret = drm_encoder_init(dev, encoder,
+ ret = drm_encoder_init(dev, &output->encoder,
&atmel_hlcdc_panel_encoder_funcs,
DRM_MODE_ENCODER_NONE, NULL);
if (ret)
return ret;
- encoder->possible_crtcs = 0x1;
+ output->encoder.possible_crtcs = 0x1;
if (panel) {
bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_Unknown);
@@ -62,7 +123,7 @@ static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, int endpoint)
}
if (bridge) {
- ret = drm_bridge_attach(encoder, bridge, NULL);
+ ret = drm_bridge_attach(&output->encoder, bridge, NULL);
if (!ret)
return 0;
@@ -70,7 +131,7 @@ static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, int endpoint)
drm_panel_bridge_remove(bridge);
}
- drm_encoder_cleanup(encoder);
+ drm_encoder_cleanup(&output->encoder);
return ret;
}
@@ -78,12 +139,23 @@ static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, int endpoint)
int atmel_hlcdc_create_outputs(struct drm_device *dev)
{
int endpoint, ret = 0;
+ int attached = 0;
- for (endpoint = 0; !ret; endpoint++)
+ /*
+ * Always scan the first few endpoints even if we get -ENODEV,
+ * but keep going after that as long as we keep getting hits.
+ */
+ for (endpoint = 0; !ret || endpoint < 4; endpoint++) {
ret = atmel_hlcdc_attach_endpoint(dev, endpoint);
+ if (ret == -ENODEV)
+ continue;
+ if (ret)
+ break;
+ attached++;
+ }
/* At least one device was successfully attached.*/
- if (ret == -ENODEV && endpoint)
+ if (ret == -ENODEV && attached)
return 0;
return ret;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index 04440064b9b7..9330a076e15a 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -942,10 +942,7 @@ static void atmel_hlcdc_plane_reset(struct drm_plane *p)
"Failed to allocate initial plane state\n");
return;
}
-
- p->state = &state->base;
- p->state->alpha = DRM_BLEND_ALPHA_OPAQUE;
- p->state->plane = p;
+ __drm_atomic_helper_plane_reset(p, &state->base);
}
}
diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h
index 375bf92cd04f..b4f6bb521900 100644
--- a/drivers/gpu/drm/bochs/bochs.h
+++ b/drivers/gpu/drm/bochs/bochs.h
@@ -51,11 +51,6 @@ enum bochs_types {
BOCHS_UNKNOWN,
};
-struct bochs_framebuffer {
- struct drm_framebuffer base;
- struct drm_gem_object *obj;
-};
-
struct bochs_device {
/* hw */
void __iomem *mmio;
@@ -88,15 +83,11 @@ struct bochs_device {
/* fbdev */
struct {
- struct bochs_framebuffer gfb;
+ struct drm_framebuffer *fb;
struct drm_fb_helper helper;
- int size;
- bool initialized;
} fb;
};
-#define to_bochs_framebuffer(x) container_of(x, struct bochs_framebuffer, base)
-
struct bochs_bo {
struct ttm_buffer_object bo;
struct ttm_placement placement;
@@ -126,7 +117,7 @@ static inline u64 bochs_bo_mmap_offset(struct bochs_bo *bo)
/* ---------------------------------------------------------------------- */
/* bochs_hw.c */
-int bochs_hw_init(struct drm_device *dev, uint32_t flags);
+int bochs_hw_init(struct drm_device *dev);
void bochs_hw_fini(struct drm_device *dev);
void bochs_hw_setmode(struct bochs_device *bochs,
@@ -148,15 +139,9 @@ int bochs_dumb_create(struct drm_file *file, struct drm_device *dev,
int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
uint32_t handle, uint64_t *offset);
-int bochs_framebuffer_init(struct drm_device *dev,
- struct bochs_framebuffer *gfb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj);
int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr);
int bochs_bo_unpin(struct bochs_bo *bo);
-extern const struct drm_mode_config_funcs bochs_mode_funcs;
-
/* bochs_kms.c */
int bochs_kms_init(struct bochs_device *bochs);
void bochs_kms_fini(struct bochs_device *bochs);
@@ -164,3 +149,5 @@ void bochs_kms_fini(struct bochs_device *bochs);
/* bochs_fbdev.c */
int bochs_fbdev_init(struct bochs_device *bochs);
void bochs_fbdev_fini(struct bochs_device *bochs);
+
+extern const struct drm_mode_config_funcs bochs_mode_funcs;
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index 7b20318483e4..f3dd66ae990a 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -35,7 +35,7 @@ static void bochs_unload(struct drm_device *dev)
dev->dev_private = NULL;
}
-static int bochs_load(struct drm_device *dev, unsigned long flags)
+static int bochs_load(struct drm_device *dev)
{
struct bochs_device *bochs;
int ret;
@@ -46,7 +46,7 @@ static int bochs_load(struct drm_device *dev, unsigned long flags)
dev->dev_private = bochs;
bochs->dev = dev;
- ret = bochs_hw_init(dev, flags);
+ ret = bochs_hw_init(dev);
if (ret)
goto err;
@@ -82,8 +82,6 @@ static const struct file_operations bochs_fops = {
static struct drm_driver bochs_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET,
- .load = bochs_load,
- .unload = bochs_unload,
.fops = &bochs_fops,
.name = "bochs-drm",
.desc = "bochs dispi vga interface (qemu stdvga)",
@@ -107,11 +105,7 @@ static int bochs_pm_suspend(struct device *dev)
drm_kms_helper_poll_disable(drm_dev);
- if (bochs->fb.initialized) {
- console_lock();
- drm_fb_helper_set_suspend(&bochs->fb.helper, 1);
- console_unlock();
- }
+ drm_fb_helper_set_suspend_unlocked(&bochs->fb.helper, 1);
return 0;
}
@@ -124,11 +118,7 @@ static int bochs_pm_resume(struct device *dev)
drm_helper_resume_force_mode(drm_dev);
- if (bochs->fb.initialized) {
- console_lock();
- drm_fb_helper_set_suspend(&bochs->fb.helper, 0);
- console_unlock();
- }
+ drm_fb_helper_set_suspend_unlocked(&bochs->fb.helper, 0);
drm_kms_helper_poll_enable(drm_dev);
return 0;
@@ -143,25 +133,10 @@ static const struct dev_pm_ops bochs_pm_ops = {
/* ---------------------------------------------------------------------- */
/* pci interface */
-static int bochs_kick_out_firmware_fb(struct pci_dev *pdev)
-{
- struct apertures_struct *ap;
-
- ap = alloc_apertures(1);
- if (!ap)
- return -ENOMEM;
-
- ap->ranges[0].base = pci_resource_start(pdev, 0);
- ap->ranges[0].size = pci_resource_len(pdev, 0);
- drm_fb_helper_remove_conflicting_framebuffers(ap, "bochsdrmfb", false);
- kfree(ap);
-
- return 0;
-}
-
static int bochs_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
+ struct drm_device *dev;
unsigned long fbsize;
int ret;
@@ -171,18 +146,41 @@ static int bochs_pci_probe(struct pci_dev *pdev,
return -ENOMEM;
}
- ret = bochs_kick_out_firmware_fb(pdev);
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "bochsdrmfb");
if (ret)
return ret;
- return drm_get_pci_dev(pdev, ent, &bochs_driver);
+ dev = drm_dev_alloc(&bochs_driver, &pdev->dev);
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+
+ dev->pdev = pdev;
+ pci_set_drvdata(pdev, dev);
+
+ ret = bochs_load(dev);
+ if (ret)
+ goto err_free_dev;
+
+ ret = drm_dev_register(dev, 0);
+ if (ret)
+ goto err_unload;
+
+ return ret;
+
+err_unload:
+ bochs_unload(dev);
+err_free_dev:
+ drm_dev_put(dev);
+ return ret;
}
static void bochs_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- drm_put_dev(dev);
+ drm_dev_unregister(dev);
+ bochs_unload(dev);
+ drm_dev_put(dev);
}
static const struct pci_device_id bochs_pci_tbl[] = {
diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c
index 14eb8d0d5a00..8f4d6c052f7b 100644
--- a/drivers/gpu/drm/bochs/bochs_fbdev.c
+++ b/drivers/gpu/drm/bochs/bochs_fbdev.c
@@ -6,6 +6,7 @@
*/
#include "bochs.h"
+#include <drm/drm_gem_framebuffer_helper.h>
/* ---------------------------------------------------------------------- */
@@ -13,9 +14,7 @@ static int bochsfb_mmap(struct fb_info *info,
struct vm_area_struct *vma)
{
struct drm_fb_helper *fb_helper = info->par;
- struct bochs_device *bochs =
- container_of(fb_helper, struct bochs_device, fb.helper);
- struct bochs_bo *bo = gem_to_bochs_bo(bochs->fb.gfb.obj);
+ struct bochs_bo *bo = gem_to_bochs_bo(fb_helper->fb->obj[0]);
return ttm_fbdev_mmap(vma, &bo->bo);
}
@@ -101,19 +100,20 @@ static int bochsfb_create(struct drm_fb_helper *helper,
/* init fb device */
info = drm_fb_helper_alloc_fbi(helper);
- if (IS_ERR(info))
+ if (IS_ERR(info)) {
+ DRM_ERROR("Failed to allocate fbi: %ld\n", PTR_ERR(info));
return PTR_ERR(info);
+ }
info->par = &bochs->fb.helper;
- ret = bochs_framebuffer_init(bochs->dev, &bochs->fb.gfb, &mode_cmd, gobj);
- if (ret)
- return ret;
-
- bochs->fb.size = size;
+ fb = drm_gem_fbdev_fb_create(bochs->dev, sizes, 0, gobj, NULL);
+ if (IS_ERR(fb)) {
+ DRM_ERROR("Failed to create framebuffer: %ld\n", PTR_ERR(fb));
+ return PTR_ERR(fb);
+ }
/* setup helper */
- fb = &bochs->fb.gfb.base;
bochs->fb.helper.fb = fb;
strcpy(info->fix.id, "bochsdrmfb");
@@ -130,27 +130,6 @@ static int bochsfb_create(struct drm_fb_helper *helper,
drm_vma_offset_remove(&bo->bo.bdev->vma_manager, &bo->bo.vma_node);
info->fix.smem_start = 0;
info->fix.smem_len = size;
-
- bochs->fb.initialized = true;
- return 0;
-}
-
-static int bochs_fbdev_destroy(struct bochs_device *bochs)
-{
- struct bochs_framebuffer *gfb = &bochs->fb.gfb;
-
- DRM_DEBUG_DRIVER("\n");
-
- drm_fb_helper_unregister_fbi(&bochs->fb.helper);
-
- if (gfb->obj) {
- drm_gem_object_unreference_unlocked(gfb->obj);
- gfb->obj = NULL;
- }
-
- drm_framebuffer_unregister_private(&gfb->base);
- drm_framebuffer_cleanup(&gfb->base);
-
return 0;
}
@@ -158,41 +137,17 @@ static const struct drm_fb_helper_funcs bochs_fb_helper_funcs = {
.fb_probe = bochsfb_create,
};
+const struct drm_mode_config_funcs bochs_mode_funcs = {
+ .fb_create = drm_gem_fb_create,
+};
+
int bochs_fbdev_init(struct bochs_device *bochs)
{
- int ret;
-
- drm_fb_helper_prepare(bochs->dev, &bochs->fb.helper,
- &bochs_fb_helper_funcs);
-
- ret = drm_fb_helper_init(bochs->dev, &bochs->fb.helper, 1);
- if (ret)
- return ret;
-
- ret = drm_fb_helper_single_add_all_connectors(&bochs->fb.helper);
- if (ret)
- goto fini;
-
- drm_helper_disable_unused_functions(bochs->dev);
-
- ret = drm_fb_helper_initial_config(&bochs->fb.helper, 32);
- if (ret)
- goto fini;
-
- return 0;
-
-fini:
- drm_fb_helper_fini(&bochs->fb.helper);
- return ret;
+ return drm_fb_helper_fbdev_setup(bochs->dev, &bochs->fb.helper,
+ &bochs_fb_helper_funcs, 32, 1);
}
void bochs_fbdev_fini(struct bochs_device *bochs)
{
- if (bochs->fb.initialized)
- bochs_fbdev_destroy(bochs);
-
- if (bochs->fb.helper.fbdev)
- drm_fb_helper_fini(&bochs->fb.helper);
-
- bochs->fb.initialized = false;
+ drm_fb_helper_fbdev_teardown(bochs->dev);
}
diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c
index a39b0343c197..16e4f1caccca 100644
--- a/drivers/gpu/drm/bochs/bochs_hw.c
+++ b/drivers/gpu/drm/bochs/bochs_hw.c
@@ -47,7 +47,7 @@ static void bochs_dispi_write(struct bochs_device *bochs, u16 reg, u16 val)
}
}
-int bochs_hw_init(struct drm_device *dev, uint32_t flags)
+int bochs_hw_init(struct drm_device *dev)
{
struct bochs_device *bochs = dev->dev_private;
struct pci_dev *pdev = dev->pdev;
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index ca5a9afdd5cf..ea9a43d31bf1 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -35,14 +35,12 @@ static int bochs_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
{
struct bochs_device *bochs =
container_of(crtc, struct bochs_device, crtc);
- struct bochs_framebuffer *bochs_fb;
struct bochs_bo *bo;
u64 gpu_addr = 0;
int ret;
if (old_fb) {
- bochs_fb = to_bochs_framebuffer(old_fb);
- bo = gem_to_bochs_bo(bochs_fb->obj);
+ bo = gem_to_bochs_bo(old_fb->obj[0]);
ret = ttm_bo_reserve(&bo->bo, true, false, NULL);
if (ret) {
DRM_ERROR("failed to reserve old_fb bo\n");
@@ -55,8 +53,7 @@ static int bochs_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
if (WARN_ON(crtc->primary->fb == NULL))
return -EINVAL;
- bochs_fb = to_bochs_framebuffer(crtc->primary->fb);
- bo = gem_to_bochs_bo(bochs_fb->obj);
+ bo = gem_to_bochs_bo(crtc->primary->fb->obj[0]);
ret = ttm_bo_reserve(&bo->bo, true, false, NULL);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index 39cd08416773..a61c1ecb2bdc 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -430,7 +430,7 @@ static void bochs_bo_unref(struct bochs_bo **bo)
return;
tbo = &((*bo)->bo);
- ttm_bo_unref(&tbo);
+ ttm_bo_put(tbo);
*bo = NULL;
}
@@ -457,77 +457,3 @@ int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
drm_gem_object_unreference_unlocked(obj);
return 0;
}
-
-/* ---------------------------------------------------------------------- */
-
-static void bochs_user_framebuffer_destroy(struct drm_framebuffer *fb)
-{
- struct bochs_framebuffer *bochs_fb = to_bochs_framebuffer(fb);
-
- drm_gem_object_unreference_unlocked(bochs_fb->obj);
- drm_framebuffer_cleanup(fb);
- kfree(fb);
-}
-
-static const struct drm_framebuffer_funcs bochs_fb_funcs = {
- .destroy = bochs_user_framebuffer_destroy,
-};
-
-int bochs_framebuffer_init(struct drm_device *dev,
- struct bochs_framebuffer *gfb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj)
-{
- int ret;
-
- drm_helper_mode_fill_fb_struct(dev, &gfb->base, mode_cmd);
- gfb->obj = obj;
- ret = drm_framebuffer_init(dev, &gfb->base, &bochs_fb_funcs);
- if (ret) {
- DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
- return ret;
- }
- return 0;
-}
-
-static struct drm_framebuffer *
-bochs_user_framebuffer_create(struct drm_device *dev,
- struct drm_file *filp,
- const struct drm_mode_fb_cmd2 *mode_cmd)
-{
- struct drm_gem_object *obj;
- struct bochs_framebuffer *bochs_fb;
- int ret;
-
- DRM_DEBUG_DRIVER("%dx%d, format %c%c%c%c\n",
- mode_cmd->width, mode_cmd->height,
- (mode_cmd->pixel_format) & 0xff,
- (mode_cmd->pixel_format >> 8) & 0xff,
- (mode_cmd->pixel_format >> 16) & 0xff,
- (mode_cmd->pixel_format >> 24) & 0xff);
-
- if (mode_cmd->pixel_format != DRM_FORMAT_XRGB8888)
- return ERR_PTR(-ENOENT);
-
- obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
- if (obj == NULL)
- return ERR_PTR(-ENOENT);
-
- bochs_fb = kzalloc(sizeof(*bochs_fb), GFP_KERNEL);
- if (!bochs_fb) {
- drm_gem_object_unreference_unlocked(obj);
- return ERR_PTR(-ENOMEM);
- }
-
- ret = bochs_framebuffer_init(dev, bochs_fb, mode_cmd, obj);
- if (ret) {
- drm_gem_object_unreference_unlocked(obj);
- kfree(bochs_fb);
- return ERR_PTR(ret);
- }
- return &bochs_fb->base;
-}
-
-const struct drm_mode_config_funcs bochs_mode_funcs = {
- .fb_create = bochs_user_framebuffer_create,
-};
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index bf6cad6c9178..9eeb8ef0b174 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -112,6 +112,14 @@ config DRM_THINE_THC63LVD1024
---help---
Thine THC63LVD1024 LVDS/parallel converter driver.
+config DRM_TOSHIBA_TC358764
+ tristate "TC358764 DSI/LVDS bridge"
+ depends on DRM && DRM_PANEL
+ depends on OF
+ select DRM_MIPI_DSI
+ help
+ Toshiba TC358764 DSI/LVDS bridge driver.
+
config DRM_TOSHIBA_TC358767
tristate "Toshiba TC358767 eDP bridge"
depends on OF
@@ -128,6 +136,16 @@ config DRM_TI_TFP410
---help---
Texas Instruments TFP410 DVI/HDMI Transmitter driver
+config DRM_TI_SN65DSI86
+ tristate "TI SN65DSI86 DSI to eDP bridge"
+ depends on OF
+ select DRM_KMS_HELPER
+ select REGMAP_I2C
+ select DRM_PANEL
+ select DRM_MIPI_DSI
+ help
+ Texas Instruments SN65DSI86 DSI to eDP Bridge driver
+
source "drivers/gpu/drm/bridge/analogix/Kconfig"
source "drivers/gpu/drm/bridge/adv7511/Kconfig"
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 35f88d48ec20..4934fcf5a6f8 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -10,8 +10,10 @@ obj-$(CONFIG_DRM_SIL_SII8620) += sil-sii8620.o
obj-$(CONFIG_DRM_SII902X) += sii902x.o
obj-$(CONFIG_DRM_SII9234) += sii9234.o
obj-$(CONFIG_DRM_THINE_THC63LVD1024) += thc63lvd1024.o
+obj-$(CONFIG_DRM_TOSHIBA_TC358764) += tc358764.o
obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o
obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix/
obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/
+obj-$(CONFIG_DRM_TI_SN65DSI86) += ti-sn65dsi86.o
obj-$(CONFIG_DRM_TI_TFP410) += ti-tfp410.o
obj-y += synopsys/
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index d68986cea132..2f21d3b6850b 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -554,7 +554,7 @@ static int analogix_dp_process_clock_recovery(struct analogix_dp_device *dp)
if (retval < 0)
return retval;
- dev_info(dp->dev, "Link Training Clock Recovery success\n");
+ dev_dbg(dp->dev, "Link Training Clock Recovery success\n");
dp->link_train.lt_state = EQUALIZER_TRAINING;
} else {
for (lane = 0; lane < lane_count; lane++) {
@@ -634,7 +634,7 @@ static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp)
if (retval < 0)
return retval;
- dev_info(dp->dev, "Link Training success!\n");
+ dev_dbg(dp->dev, "Link Training success!\n");
analogix_dp_get_link_bandwidth(dp, &reg);
dp->link_train.link_rate = reg;
dev_dbg(dp->dev, "final bandwidth = %.2x\n",
diff --git a/drivers/gpu/drm/bridge/synopsys/Makefile b/drivers/gpu/drm/bridge/synopsys/Makefile
index 5dad97d920be..3e1b1e3d9533 100644
--- a/drivers/gpu/drm/bridge/synopsys/Makefile
+++ b/drivers/gpu/drm/bridge/synopsys/Makefile
@@ -1,5 +1,3 @@
-#ccflags-y := -Iinclude/drm
-
obj-$(CONFIG_DRM_DW_HDMI) += dw-hdmi.o
obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o
obj-$(CONFIG_DRM_DW_HDMI_I2S_AUDIO) += dw-hdmi-i2s-audio.o
diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c
new file mode 100644
index 000000000000..ee6b98efa9c2
--- /dev/null
+++ b/drivers/gpu/drm/bridge/tc358764.c
@@ -0,0 +1,499 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Samsung Electronics Co., Ltd
+ *
+ * Authors:
+ * Andrzej Hajda <[email protected]>
+ * Maciej Purski <[email protected]>
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drmP.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of_graph.h>
+#include <linux/regulator/consumer.h>
+#include <video/mipi_display.h>
+
+#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
+#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
+
+/* PPI layer registers */
+#define PPI_STARTPPI 0x0104 /* START control bit */
+#define PPI_LPTXTIMECNT 0x0114 /* LPTX timing signal */
+#define PPI_LANEENABLE 0x0134 /* Enables each lane */
+#define PPI_TX_RX_TA 0x013C /* BTA timing parameters */
+#define PPI_D0S_CLRSIPOCOUNT 0x0164 /* Assertion timer for Lane 0 */
+#define PPI_D1S_CLRSIPOCOUNT 0x0168 /* Assertion timer for Lane 1 */
+#define PPI_D2S_CLRSIPOCOUNT 0x016C /* Assertion timer for Lane 2 */
+#define PPI_D3S_CLRSIPOCOUNT 0x0170 /* Assertion timer for Lane 3 */
+#define PPI_START_FUNCTION 1
+
+/* DSI layer registers */
+#define DSI_STARTDSI 0x0204 /* START control bit of DSI-TX */
+#define DSI_LANEENABLE 0x0210 /* Enables each lane */
+#define DSI_RX_START 1
+
+/* Video path registers */
+#define VP_CTRL 0x0450 /* Video Path Control */
+#define VP_CTRL_MSF(v) FLD_VAL(v, 0, 0) /* Magic square in RGB666 */
+#define VP_CTRL_VTGEN(v) FLD_VAL(v, 4, 4) /* Use chip clock for timing */
+#define VP_CTRL_EVTMODE(v) FLD_VAL(v, 5, 5) /* Event mode */
+#define VP_CTRL_RGB888(v) FLD_VAL(v, 8, 8) /* RGB888 mode */
+#define VP_CTRL_VSDELAY(v) FLD_VAL(v, 31, 20) /* VSYNC delay */
+#define VP_CTRL_HSPOL BIT(17) /* Polarity of HSYNC signal */
+#define VP_CTRL_DEPOL BIT(18) /* Polarity of DE signal */
+#define VP_CTRL_VSPOL BIT(19) /* Polarity of VSYNC signal */
+#define VP_HTIM1 0x0454 /* Horizontal Timing Control 1 */
+#define VP_HTIM1_HBP(v) FLD_VAL(v, 24, 16)
+#define VP_HTIM1_HSYNC(v) FLD_VAL(v, 8, 0)
+#define VP_HTIM2 0x0458 /* Horizontal Timing Control 2 */
+#define VP_HTIM2_HFP(v) FLD_VAL(v, 24, 16)
+#define VP_HTIM2_HACT(v) FLD_VAL(v, 10, 0)
+#define VP_VTIM1 0x045C /* Vertical Timing Control 1 */
+#define VP_VTIM1_VBP(v) FLD_VAL(v, 23, 16)
+#define VP_VTIM1_VSYNC(v) FLD_VAL(v, 7, 0)
+#define VP_VTIM2 0x0460 /* Vertical Timing Control 2 */
+#define VP_VTIM2_VFP(v) FLD_VAL(v, 23, 16)
+#define VP_VTIM2_VACT(v) FLD_VAL(v, 10, 0)
+#define VP_VFUEN 0x0464 /* Video Frame Timing Update Enable */
+
+/* LVDS registers */
+#define LV_MX0003 0x0480 /* Mux input bit 0 to 3 */
+#define LV_MX0407 0x0484 /* Mux input bit 4 to 7 */
+#define LV_MX0811 0x0488 /* Mux input bit 8 to 11 */
+#define LV_MX1215 0x048C /* Mux input bit 12 to 15 */
+#define LV_MX1619 0x0490 /* Mux input bit 16 to 19 */
+#define LV_MX2023 0x0494 /* Mux input bit 20 to 23 */
+#define LV_MX2427 0x0498 /* Mux input bit 24 to 27 */
+#define LV_MX(b0, b1, b2, b3) (FLD_VAL(b0, 4, 0) | FLD_VAL(b1, 12, 8) | \
+ FLD_VAL(b2, 20, 16) | FLD_VAL(b3, 28, 24))
+
+/* Input bit numbers used in mux registers */
+enum {
+ LVI_R0,
+ LVI_R1,
+ LVI_R2,
+ LVI_R3,
+ LVI_R4,
+ LVI_R5,
+ LVI_R6,
+ LVI_R7,
+ LVI_G0,
+ LVI_G1,
+ LVI_G2,
+ LVI_G3,
+ LVI_G4,
+ LVI_G5,
+ LVI_G6,
+ LVI_G7,
+ LVI_B0,
+ LVI_B1,
+ LVI_B2,
+ LVI_B3,
+ LVI_B4,
+ LVI_B5,
+ LVI_B6,
+ LVI_B7,
+ LVI_HS,
+ LVI_VS,
+ LVI_DE,
+ LVI_L0
+};
+
+#define LV_CFG 0x049C /* LVDS Configuration */
+#define LV_PHY0 0x04A0 /* LVDS PHY 0 */
+#define LV_PHY0_RST(v) FLD_VAL(v, 22, 22) /* PHY reset */
+#define LV_PHY0_IS(v) FLD_VAL(v, 15, 14)
+#define LV_PHY0_ND(v) FLD_VAL(v, 4, 0) /* Frequency range select */
+#define LV_PHY0_PRBS_ON(v) FLD_VAL(v, 20, 16) /* Clock/Data Flag pins */
+
+/* System registers */
+#define SYS_RST 0x0504 /* System Reset */
+#define SYS_ID 0x0580 /* System ID */
+
+#define SYS_RST_I2CS BIT(0) /* Reset I2C-Slave controller */
+#define SYS_RST_I2CM BIT(1) /* Reset I2C-Master controller */
+#define SYS_RST_LCD BIT(2) /* Reset LCD controller */
+#define SYS_RST_BM BIT(3) /* Reset Bus Management controller */
+#define SYS_RST_DSIRX BIT(4) /* Reset DSI-RX and App controller */
+#define SYS_RST_REG BIT(5) /* Reset Register module */
+
+#define LPX_PERIOD 2
+#define TTA_SURE 3
+#define TTA_GET 0x20000
+
+/* Lane enable PPI and DSI register bits */
+#define LANEENABLE_CLEN BIT(0)
+#define LANEENABLE_L0EN BIT(1)
+#define LANEENABLE_L1EN BIT(2)
+#define LANEENABLE_L2EN BIT(3)
+#define LANEENABLE_L3EN BIT(4)
+
+/* LVCFG fields */
+#define LV_CFG_LVEN BIT(0)
+#define LV_CFG_LVDLINK BIT(1)
+#define LV_CFG_CLKPOL1 BIT(2)
+#define LV_CFG_CLKPOL2 BIT(3)
+
+static const char * const tc358764_supplies[] = {
+ "vddc", "vddio", "vddlvds"
+};
+
+struct tc358764 {
+ struct device *dev;
+ struct drm_bridge bridge;
+ struct drm_connector connector;
+ struct regulator_bulk_data supplies[ARRAY_SIZE(tc358764_supplies)];
+ struct gpio_desc *gpio_reset;
+ struct drm_panel *panel;
+ int error;
+};
+
+static int tc358764_clear_error(struct tc358764 *ctx)
+{
+ int ret = ctx->error;
+
+ ctx->error = 0;
+ return ret;
+}
+
+static void tc358764_read(struct tc358764 *ctx, u16 addr, u32 *val)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ ssize_t ret;
+
+ if (ctx->error)
+ return;
+
+ cpu_to_le16s(&addr);
+ ret = mipi_dsi_generic_read(dsi, &addr, sizeof(addr), val, sizeof(*val));
+ if (ret >= 0)
+ le32_to_cpus(val);
+
+ dev_dbg(ctx->dev, "read: %d, addr: %d\n", addr, *val);
+}
+
+static void tc358764_write(struct tc358764 *ctx, u16 addr, u32 val)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ ssize_t ret;
+ u8 data[6];
+
+ if (ctx->error)
+ return;
+
+ data[0] = addr;
+ data[1] = addr >> 8;
+ data[2] = val;
+ data[3] = val >> 8;
+ data[4] = val >> 16;
+ data[5] = val >> 24;
+
+ ret = mipi_dsi_generic_write(dsi, data, sizeof(data));
+ if (ret < 0)
+ ctx->error = ret;
+}
+
+static inline struct tc358764 *bridge_to_tc358764(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct tc358764, bridge);
+}
+
+static inline
+struct tc358764 *connector_to_tc358764(struct drm_connector *connector)
+{
+ return container_of(connector, struct tc358764, connector);
+}
+
+static int tc358764_init(struct tc358764 *ctx)
+{
+ u32 v = 0;
+
+ tc358764_read(ctx, SYS_ID, &v);
+ if (ctx->error)
+ return tc358764_clear_error(ctx);
+ dev_info(ctx->dev, "ID: %#x\n", v);
+
+ /* configure PPI counters */
+ tc358764_write(ctx, PPI_TX_RX_TA, TTA_GET | TTA_SURE);
+ tc358764_write(ctx, PPI_LPTXTIMECNT, LPX_PERIOD);
+ tc358764_write(ctx, PPI_D0S_CLRSIPOCOUNT, 5);
+ tc358764_write(ctx, PPI_D1S_CLRSIPOCOUNT, 5);
+ tc358764_write(ctx, PPI_D2S_CLRSIPOCOUNT, 5);
+ tc358764_write(ctx, PPI_D3S_CLRSIPOCOUNT, 5);
+
+ /* enable four data lanes and clock lane */
+ tc358764_write(ctx, PPI_LANEENABLE, LANEENABLE_L3EN | LANEENABLE_L2EN |
+ LANEENABLE_L1EN | LANEENABLE_L0EN | LANEENABLE_CLEN);
+ tc358764_write(ctx, DSI_LANEENABLE, LANEENABLE_L3EN | LANEENABLE_L2EN |
+ LANEENABLE_L1EN | LANEENABLE_L0EN | LANEENABLE_CLEN);
+
+ /* start */
+ tc358764_write(ctx, PPI_STARTPPI, PPI_START_FUNCTION);
+ tc358764_write(ctx, DSI_STARTDSI, DSI_RX_START);
+
+ /* configure video path */
+ tc358764_write(ctx, VP_CTRL, VP_CTRL_VSDELAY(15) | VP_CTRL_RGB888(1) |
+ VP_CTRL_EVTMODE(1) | VP_CTRL_HSPOL | VP_CTRL_VSPOL);
+
+ /* reset PHY */
+ tc358764_write(ctx, LV_PHY0, LV_PHY0_RST(1) |
+ LV_PHY0_PRBS_ON(4) | LV_PHY0_IS(2) | LV_PHY0_ND(6));
+ tc358764_write(ctx, LV_PHY0, LV_PHY0_PRBS_ON(4) | LV_PHY0_IS(2) |
+ LV_PHY0_ND(6));
+
+ /* reset bridge */
+ tc358764_write(ctx, SYS_RST, SYS_RST_LCD);
+
+ /* set bit order */
+ tc358764_write(ctx, LV_MX0003, LV_MX(LVI_R0, LVI_R1, LVI_R2, LVI_R3));
+ tc358764_write(ctx, LV_MX0407, LV_MX(LVI_R4, LVI_R7, LVI_R5, LVI_G0));
+ tc358764_write(ctx, LV_MX0811, LV_MX(LVI_G1, LVI_G2, LVI_G6, LVI_G7));
+ tc358764_write(ctx, LV_MX1215, LV_MX(LVI_G3, LVI_G4, LVI_G5, LVI_B0));
+ tc358764_write(ctx, LV_MX1619, LV_MX(LVI_B6, LVI_B7, LVI_B1, LVI_B2));
+ tc358764_write(ctx, LV_MX2023, LV_MX(LVI_B3, LVI_B4, LVI_B5, LVI_L0));
+ tc358764_write(ctx, LV_MX2427, LV_MX(LVI_HS, LVI_VS, LVI_DE, LVI_R6));
+ tc358764_write(ctx, LV_CFG, LV_CFG_CLKPOL2 | LV_CFG_CLKPOL1 |
+ LV_CFG_LVEN);
+
+ return tc358764_clear_error(ctx);
+}
+
+static void tc358764_reset(struct tc358764 *ctx)
+{
+ gpiod_set_value(ctx->gpio_reset, 1);
+ usleep_range(1000, 2000);
+ gpiod_set_value(ctx->gpio_reset, 0);
+ usleep_range(1000, 2000);
+}
+
+static int tc358764_get_modes(struct drm_connector *connector)
+{
+ struct tc358764 *ctx = connector_to_tc358764(connector);
+
+ return drm_panel_get_modes(ctx->panel);
+}
+
+static const
+struct drm_connector_helper_funcs tc358764_connector_helper_funcs = {
+ .get_modes = tc358764_get_modes,
+};
+
+static const struct drm_connector_funcs tc358764_connector_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static void tc358764_disable(struct drm_bridge *bridge)
+{
+ struct tc358764 *ctx = bridge_to_tc358764(bridge);
+ int ret = drm_panel_disable(bridge_to_tc358764(bridge)->panel);
+
+ if (ret < 0)
+ dev_err(ctx->dev, "error disabling panel (%d)\n", ret);
+}
+
+static void tc358764_post_disable(struct drm_bridge *bridge)
+{
+ struct tc358764 *ctx = bridge_to_tc358764(bridge);
+ int ret;
+
+ ret = drm_panel_unprepare(ctx->panel);
+ if (ret < 0)
+ dev_err(ctx->dev, "error unpreparing panel (%d)\n", ret);
+ tc358764_reset(ctx);
+ usleep_range(10000, 15000);
+ ret = regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret < 0)
+ dev_err(ctx->dev, "error disabling regulators (%d)\n", ret);
+}
+
+static void tc358764_pre_enable(struct drm_bridge *bridge)
+{
+ struct tc358764 *ctx = bridge_to_tc358764(bridge);
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret < 0)
+ dev_err(ctx->dev, "error enabling regulators (%d)\n", ret);
+ usleep_range(10000, 15000);
+ tc358764_reset(ctx);
+ ret = tc358764_init(ctx);
+ if (ret < 0)
+ dev_err(ctx->dev, "error initializing bridge (%d)\n", ret);
+ ret = drm_panel_prepare(ctx->panel);
+ if (ret < 0)
+ dev_err(ctx->dev, "error preparing panel (%d)\n", ret);
+}
+
+static void tc358764_enable(struct drm_bridge *bridge)
+{
+ struct tc358764 *ctx = bridge_to_tc358764(bridge);
+ int ret = drm_panel_enable(ctx->panel);
+
+ if (ret < 0)
+ dev_err(ctx->dev, "error enabling panel (%d)\n", ret);
+}
+
+static int tc358764_attach(struct drm_bridge *bridge)
+{
+ struct tc358764 *ctx = bridge_to_tc358764(bridge);
+ struct drm_device *drm = bridge->dev;
+ int ret;
+
+ ctx->connector.polled = DRM_CONNECTOR_POLL_HPD;
+ ret = drm_connector_init(drm, &ctx->connector,
+ &tc358764_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+ if (ret) {
+ DRM_ERROR("Failed to initialize connector\n");
+ return ret;
+ }
+
+ drm_connector_helper_add(&ctx->connector,
+ &tc358764_connector_helper_funcs);
+ drm_connector_attach_encoder(&ctx->connector, bridge->encoder);
+ drm_panel_attach(ctx->panel, &ctx->connector);
+ ctx->connector.funcs->reset(&ctx->connector);
+ drm_fb_helper_add_one_connector(drm->fb_helper, &ctx->connector);
+ drm_connector_register(&ctx->connector);
+
+ return 0;
+}
+
+static void tc358764_detach(struct drm_bridge *bridge)
+{
+ struct tc358764 *ctx = bridge_to_tc358764(bridge);
+ struct drm_device *drm = bridge->dev;
+
+ drm_connector_unregister(&ctx->connector);
+ drm_fb_helper_remove_one_connector(drm->fb_helper, &ctx->connector);
+ drm_panel_detach(ctx->panel);
+ ctx->panel = NULL;
+ drm_connector_unreference(&ctx->connector);
+}
+
+static const struct drm_bridge_funcs tc358764_bridge_funcs = {
+ .disable = tc358764_disable,
+ .post_disable = tc358764_post_disable,
+ .enable = tc358764_enable,
+ .pre_enable = tc358764_pre_enable,
+ .attach = tc358764_attach,
+ .detach = tc358764_detach,
+};
+
+static int tc358764_parse_dt(struct tc358764 *ctx)
+{
+ struct device *dev = ctx->dev;
+ int ret;
+
+ ctx->gpio_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->gpio_reset)) {
+ dev_err(dev, "no reset GPIO pin provided\n");
+ return PTR_ERR(ctx->gpio_reset);
+ }
+
+ ret = drm_of_find_panel_or_bridge(ctx->dev->of_node, 1, 0, &ctx->panel,
+ NULL);
+ if (ret && ret != -EPROBE_DEFER)
+ dev_err(dev, "cannot find panel (%d)\n", ret);
+
+ return ret;
+}
+
+static int tc358764_configure_regulators(struct tc358764 *ctx)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(ctx->supplies); ++i)
+ ctx->supplies[i].supply = tc358764_supplies[i];
+
+ ret = devm_regulator_bulk_get(ctx->dev, ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ if (ret < 0)
+ dev_err(ctx->dev, "failed to get regulators: %d\n", ret);
+
+ return ret;
+}
+
+static int tc358764_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct tc358764 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(struct tc358764), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ ctx->dev = dev;
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST
+ | MIPI_DSI_MODE_VIDEO_AUTO_VERT | MIPI_DSI_MODE_LPM;
+
+ ret = tc358764_parse_dt(ctx);
+ if (ret < 0)
+ return ret;
+
+ ret = tc358764_configure_regulators(ctx);
+ if (ret < 0)
+ return ret;
+
+ ctx->bridge.funcs = &tc358764_bridge_funcs;
+ ctx->bridge.of_node = dev->of_node;
+
+ drm_bridge_add(&ctx->bridge);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ drm_bridge_remove(&ctx->bridge);
+ dev_err(dev, "failed to attach dsi\n");
+ }
+
+ return ret;
+}
+
+static int tc358764_remove(struct mipi_dsi_device *dsi)
+{
+ struct tc358764 *ctx = mipi_dsi_get_drvdata(dsi);
+
+ mipi_dsi_detach(dsi);
+ drm_bridge_remove(&ctx->bridge);
+
+ return 0;
+}
+
+static const struct of_device_id tc358764_of_match[] = {
+ { .compatible = "toshiba,tc358764" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tc358764_of_match);
+
+static struct mipi_dsi_driver tc358764_driver = {
+ .probe = tc358764_probe,
+ .remove = tc358764_remove,
+ .driver = {
+ .name = "tc358764",
+ .owner = THIS_MODULE,
+ .of_match_table = tc358764_of_match,
+ },
+};
+module_mipi_dsi_driver(tc358764_driver);
+
+MODULE_AUTHOR("Andrzej Hajda <[email protected]>");
+MODULE_AUTHOR("Maciej Purski <[email protected]>");
+MODULE_DESCRIPTION("MIPI-DSI based Driver for TC358764 DSI/LVDS Bridge");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
new file mode 100644
index 000000000000..f8a931cf3665
--- /dev/null
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -0,0 +1,779 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/iopoll.h>
+#include <linux/of_graph.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#define SN_DEVICE_REV_REG 0x08
+#define SN_DPPLL_SRC_REG 0x0A
+#define DPPLL_CLK_SRC_DSICLK BIT(0)
+#define REFCLK_FREQ_MASK GENMASK(3, 1)
+#define REFCLK_FREQ(x) ((x) << 1)
+#define DPPLL_SRC_DP_PLL_LOCK BIT(7)
+#define SN_PLL_ENABLE_REG 0x0D
+#define SN_DSI_LANES_REG 0x10
+#define CHA_DSI_LANES_MASK GENMASK(4, 3)
+#define CHA_DSI_LANES(x) ((x) << 3)
+#define SN_DSIA_CLK_FREQ_REG 0x12
+#define SN_CHA_ACTIVE_LINE_LENGTH_LOW_REG 0x20
+#define SN_CHA_VERTICAL_DISPLAY_SIZE_LOW_REG 0x24
+#define SN_CHA_HSYNC_PULSE_WIDTH_LOW_REG 0x2C
+#define SN_CHA_HSYNC_PULSE_WIDTH_HIGH_REG 0x2D
+#define CHA_HSYNC_POLARITY BIT(7)
+#define SN_CHA_VSYNC_PULSE_WIDTH_LOW_REG 0x30
+#define SN_CHA_VSYNC_PULSE_WIDTH_HIGH_REG 0x31
+#define CHA_VSYNC_POLARITY BIT(7)
+#define SN_CHA_HORIZONTAL_BACK_PORCH_REG 0x34
+#define SN_CHA_VERTICAL_BACK_PORCH_REG 0x36
+#define SN_CHA_HORIZONTAL_FRONT_PORCH_REG 0x38
+#define SN_CHA_VERTICAL_FRONT_PORCH_REG 0x3A
+#define SN_ENH_FRAME_REG 0x5A
+#define VSTREAM_ENABLE BIT(3)
+#define SN_DATA_FORMAT_REG 0x5B
+#define SN_HPD_DISABLE_REG 0x5C
+#define HPD_DISABLE BIT(0)
+#define SN_AUX_WDATA_REG(x) (0x64 + (x))
+#define SN_AUX_ADDR_19_16_REG 0x74
+#define SN_AUX_ADDR_15_8_REG 0x75
+#define SN_AUX_ADDR_7_0_REG 0x76
+#define SN_AUX_LENGTH_REG 0x77
+#define SN_AUX_CMD_REG 0x78
+#define AUX_CMD_SEND BIT(1)
+#define AUX_CMD_REQ(x) ((x) << 4)
+#define SN_AUX_RDATA_REG(x) (0x79 + (x))
+#define SN_SSC_CONFIG_REG 0x93
+#define DP_NUM_LANES_MASK GENMASK(5, 4)
+#define DP_NUM_LANES(x) ((x) << 4)
+#define SN_DATARATE_CONFIG_REG 0x94
+#define DP_DATARATE_MASK GENMASK(7, 5)
+#define DP_DATARATE(x) ((x) << 5)
+#define SN_ML_TX_MODE_REG 0x96
+#define ML_TX_MAIN_LINK_OFF 0
+#define ML_TX_NORMAL_MODE BIT(0)
+#define SN_AUX_CMD_STATUS_REG 0xF4
+#define AUX_IRQ_STATUS_AUX_RPLY_TOUT BIT(3)
+#define AUX_IRQ_STATUS_AUX_SHORT BIT(5)
+#define AUX_IRQ_STATUS_NAT_I2C_FAIL BIT(6)
+
+#define MIN_DSI_CLK_FREQ_MHZ 40
+
+/* fudge factor required to account for 8b/10b encoding */
+#define DP_CLK_FUDGE_NUM 10
+#define DP_CLK_FUDGE_DEN 8
+
+/* Matches DP_AUX_MAX_PAYLOAD_BYTES (for now) */
+#define SN_AUX_MAX_PAYLOAD_BYTES 16
+
+#define SN_REGULATOR_SUPPLY_NUM 4
+
+struct ti_sn_bridge {
+ struct device *dev;
+ struct regmap *regmap;
+ struct drm_dp_aux aux;
+ struct drm_bridge bridge;
+ struct drm_connector connector;
+ struct device_node *host_node;
+ struct mipi_dsi_device *dsi;
+ struct clk *refclk;
+ struct drm_panel *panel;
+ struct gpio_desc *enable_gpio;
+ struct regulator_bulk_data supplies[SN_REGULATOR_SUPPLY_NUM];
+};
+
+static const struct regmap_range ti_sn_bridge_volatile_ranges[] = {
+ { .range_min = 0, .range_max = 0xFF },
+};
+
+static const struct regmap_access_table ti_sn_bridge_volatile_table = {
+ .yes_ranges = ti_sn_bridge_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(ti_sn_bridge_volatile_ranges),
+};
+
+static const struct regmap_config ti_sn_bridge_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .volatile_table = &ti_sn_bridge_volatile_table,
+ .cache_type = REGCACHE_NONE,
+};
+
+static void ti_sn_bridge_write_u16(struct ti_sn_bridge *pdata,
+ unsigned int reg, u16 val)
+{
+ regmap_write(pdata->regmap, reg, val & 0xFF);
+ regmap_write(pdata->regmap, reg + 1, val >> 8);
+}
+
+static int __maybe_unused ti_sn_bridge_resume(struct device *dev)
+{
+ struct ti_sn_bridge *pdata = dev_get_drvdata(dev);
+ int ret;
+
+ ret = regulator_bulk_enable(SN_REGULATOR_SUPPLY_NUM, pdata->supplies);
+ if (ret) {
+ DRM_ERROR("failed to enable supplies %d\n", ret);
+ return ret;
+ }
+
+ gpiod_set_value(pdata->enable_gpio, 1);
+
+ return ret;
+}
+
+static int __maybe_unused ti_sn_bridge_suspend(struct device *dev)
+{
+ struct ti_sn_bridge *pdata = dev_get_drvdata(dev);
+ int ret;
+
+ gpiod_set_value(pdata->enable_gpio, 0);
+
+ ret = regulator_bulk_disable(SN_REGULATOR_SUPPLY_NUM, pdata->supplies);
+ if (ret)
+ DRM_ERROR("failed to disable supplies %d\n", ret);
+
+ return ret;
+}
+
+static const struct dev_pm_ops ti_sn_bridge_pm_ops = {
+ SET_RUNTIME_PM_OPS(ti_sn_bridge_suspend, ti_sn_bridge_resume, NULL)
+};
+
+/* Connector funcs */
+static struct ti_sn_bridge *
+connector_to_ti_sn_bridge(struct drm_connector *connector)
+{
+ return container_of(connector, struct ti_sn_bridge, connector);
+}
+
+static int ti_sn_bridge_connector_get_modes(struct drm_connector *connector)
+{
+ struct ti_sn_bridge *pdata = connector_to_ti_sn_bridge(connector);
+
+ return drm_panel_get_modes(pdata->panel);
+}
+
+static enum drm_mode_status
+ti_sn_bridge_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ /* maximum supported resolution is 4K at 60 fps */
+ if (mode->clock > 594000)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static struct drm_connector_helper_funcs ti_sn_bridge_connector_helper_funcs = {
+ .get_modes = ti_sn_bridge_connector_get_modes,
+ .mode_valid = ti_sn_bridge_connector_mode_valid,
+};
+
+static enum drm_connector_status
+ti_sn_bridge_connector_detect(struct drm_connector *connector, bool force)
+{
+ /**
+ * TODO: Currently if drm_panel is present, then always
+ * return the status as connected. Need to add support to detect
+ * device state for hot pluggable scenarios.
+ */
+ return connector_status_connected;
+}
+
+static const struct drm_connector_funcs ti_sn_bridge_connector_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = ti_sn_bridge_connector_detect,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static struct ti_sn_bridge *bridge_to_ti_sn_bridge(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct ti_sn_bridge, bridge);
+}
+
+static int ti_sn_bridge_parse_regulators(struct ti_sn_bridge *pdata)
+{
+ unsigned int i;
+ const char * const ti_sn_bridge_supply_names[] = {
+ "vcca", "vcc", "vccio", "vpll",
+ };
+
+ for (i = 0; i < SN_REGULATOR_SUPPLY_NUM; i++)
+ pdata->supplies[i].supply = ti_sn_bridge_supply_names[i];
+
+ return devm_regulator_bulk_get(pdata->dev, SN_REGULATOR_SUPPLY_NUM,
+ pdata->supplies);
+}
+
+static int ti_sn_bridge_attach(struct drm_bridge *bridge)
+{
+ int ret, val;
+ struct ti_sn_bridge *pdata = bridge_to_ti_sn_bridge(bridge);
+ struct mipi_dsi_host *host;
+ struct mipi_dsi_device *dsi;
+ const struct mipi_dsi_device_info info = { .type = "ti_sn_bridge",
+ .channel = 0,
+ .node = NULL,
+ };
+
+ ret = drm_connector_init(bridge->dev, &pdata->connector,
+ &ti_sn_bridge_connector_funcs,
+ DRM_MODE_CONNECTOR_eDP);
+ if (ret) {
+ DRM_ERROR("Failed to initialize connector with drm\n");
+ return ret;
+ }
+
+ drm_connector_helper_add(&pdata->connector,
+ &ti_sn_bridge_connector_helper_funcs);
+ drm_connector_attach_encoder(&pdata->connector, bridge->encoder);
+
+ /*
+ * TODO: ideally finding host resource and dsi dev registration needs
+ * to be done in bridge probe. But some existing DSI host drivers will
+ * wait for any of the drm_bridge/drm_panel to get added to the global
+ * bridge/panel list, before completing their probe. So if we do the
+ * dsi dev registration part in bridge probe, before populating in
+ * the global bridge list, then it will cause deadlock as dsi host probe
+ * will never complete, neither our bridge probe. So keeping it here
+ * will satisfy most of the existing host drivers. Once the host driver
+ * is fixed we can move the below code to bridge probe safely.
+ */
+ host = of_find_mipi_dsi_host_by_node(pdata->host_node);
+ if (!host) {
+ DRM_ERROR("failed to find dsi host\n");
+ ret = -ENODEV;
+ goto err_dsi_host;
+ }
+
+ dsi = mipi_dsi_device_register_full(host, &info);
+ if (IS_ERR(dsi)) {
+ DRM_ERROR("failed to create dsi device\n");
+ ret = PTR_ERR(dsi);
+ goto err_dsi_host;
+ }
+
+ /* TODO: setting to 4 lanes always for now */
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_EOT_PACKET | MIPI_DSI_MODE_VIDEO_HSE;
+
+ /* check if continuous dsi clock is required or not */
+ pm_runtime_get_sync(pdata->dev);
+ regmap_read(pdata->regmap, SN_DPPLL_SRC_REG, &val);
+ pm_runtime_put(pdata->dev);
+ if (!(val & DPPLL_CLK_SRC_DSICLK))
+ dsi->mode_flags |= MIPI_DSI_CLOCK_NON_CONTINUOUS;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ DRM_ERROR("failed to attach dsi to host\n");
+ goto err_dsi_attach;
+ }
+ pdata->dsi = dsi;
+
+ /* attach panel to bridge */
+ drm_panel_attach(pdata->panel, &pdata->connector);
+
+ return 0;
+
+err_dsi_attach:
+ mipi_dsi_device_unregister(dsi);
+err_dsi_host:
+ drm_connector_cleanup(&pdata->connector);
+ return ret;
+}
+
+static void ti_sn_bridge_disable(struct drm_bridge *bridge)
+{
+ struct ti_sn_bridge *pdata = bridge_to_ti_sn_bridge(bridge);
+
+ drm_panel_disable(pdata->panel);
+
+ /* disable video stream */
+ regmap_update_bits(pdata->regmap, SN_ENH_FRAME_REG, VSTREAM_ENABLE, 0);
+ /* semi auto link training mode OFF */
+ regmap_write(pdata->regmap, SN_ML_TX_MODE_REG, 0);
+ /* disable DP PLL */
+ regmap_write(pdata->regmap, SN_PLL_ENABLE_REG, 0);
+
+ drm_panel_unprepare(pdata->panel);
+}
+
+static u32 ti_sn_bridge_get_dsi_freq(struct ti_sn_bridge *pdata)
+{
+ u32 bit_rate_khz, clk_freq_khz;
+ struct drm_display_mode *mode =
+ &pdata->bridge.encoder->crtc->state->adjusted_mode;
+
+ bit_rate_khz = mode->clock *
+ mipi_dsi_pixel_format_to_bpp(pdata->dsi->format);
+ clk_freq_khz = bit_rate_khz / (pdata->dsi->lanes * 2);
+
+ return clk_freq_khz;
+}
+
+/* clk frequencies supported by bridge in Hz in case derived from REFCLK pin */
+static const u32 ti_sn_bridge_refclk_lut[] = {
+ 12000000,
+ 19200000,
+ 26000000,
+ 27000000,
+ 38400000,
+};
+
+/* clk frequencies supported by bridge in Hz in case derived from DACP/N pin */
+static const u32 ti_sn_bridge_dsiclk_lut[] = {
+ 468000000,
+ 384000000,
+ 416000000,
+ 486000000,
+ 460800000,
+};
+
+static void ti_sn_bridge_set_refclk_freq(struct ti_sn_bridge *pdata)
+{
+ int i;
+ u32 refclk_rate;
+ const u32 *refclk_lut;
+ size_t refclk_lut_size;
+
+ if (pdata->refclk) {
+ refclk_rate = clk_get_rate(pdata->refclk);
+ refclk_lut = ti_sn_bridge_refclk_lut;
+ refclk_lut_size = ARRAY_SIZE(ti_sn_bridge_refclk_lut);
+ clk_prepare_enable(pdata->refclk);
+ } else {
+ refclk_rate = ti_sn_bridge_get_dsi_freq(pdata) * 1000;
+ refclk_lut = ti_sn_bridge_dsiclk_lut;
+ refclk_lut_size = ARRAY_SIZE(ti_sn_bridge_dsiclk_lut);
+ }
+
+ /* for i equals to refclk_lut_size means default frequency */
+ for (i = 0; i < refclk_lut_size; i++)
+ if (refclk_lut[i] == refclk_rate)
+ break;
+
+ regmap_update_bits(pdata->regmap, SN_DPPLL_SRC_REG, REFCLK_FREQ_MASK,
+ REFCLK_FREQ(i));
+}
+
+/**
+ * LUT index corresponds to register value and
+ * LUT values corresponds to dp data rate supported
+ * by the bridge in Mbps unit.
+ */
+static const unsigned int ti_sn_bridge_dp_rate_lut[] = {
+ 0, 1620, 2160, 2430, 2700, 3240, 4320, 5400
+};
+
+static void ti_sn_bridge_set_dsi_dp_rate(struct ti_sn_bridge *pdata)
+{
+ unsigned int bit_rate_mhz, clk_freq_mhz, dp_rate_mhz;
+ unsigned int val, i;
+ struct drm_display_mode *mode =
+ &pdata->bridge.encoder->crtc->state->adjusted_mode;
+
+ /* set DSIA clk frequency */
+ bit_rate_mhz = (mode->clock / 1000) *
+ mipi_dsi_pixel_format_to_bpp(pdata->dsi->format);
+ clk_freq_mhz = bit_rate_mhz / (pdata->dsi->lanes * 2);
+
+ /* for each increment in val, frequency increases by 5MHz */
+ val = (MIN_DSI_CLK_FREQ_MHZ / 5) +
+ (((clk_freq_mhz - MIN_DSI_CLK_FREQ_MHZ) / 5) & 0xFF);
+ regmap_write(pdata->regmap, SN_DSIA_CLK_FREQ_REG, val);
+
+ /* set DP data rate */
+ dp_rate_mhz = ((bit_rate_mhz / pdata->dsi->lanes) * DP_CLK_FUDGE_NUM) /
+ DP_CLK_FUDGE_DEN;
+ for (i = 0; i < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut) - 1; i++)
+ if (ti_sn_bridge_dp_rate_lut[i] > dp_rate_mhz)
+ break;
+
+ regmap_update_bits(pdata->regmap, SN_DATARATE_CONFIG_REG,
+ DP_DATARATE_MASK, DP_DATARATE(i));
+}
+
+static void ti_sn_bridge_set_video_timings(struct ti_sn_bridge *pdata)
+{
+ struct drm_display_mode *mode =
+ &pdata->bridge.encoder->crtc->state->adjusted_mode;
+ u8 hsync_polarity = 0, vsync_polarity = 0;
+
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ hsync_polarity = CHA_HSYNC_POLARITY;
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ vsync_polarity = CHA_VSYNC_POLARITY;
+
+ ti_sn_bridge_write_u16(pdata, SN_CHA_ACTIVE_LINE_LENGTH_LOW_REG,
+ mode->hdisplay);
+ ti_sn_bridge_write_u16(pdata, SN_CHA_VERTICAL_DISPLAY_SIZE_LOW_REG,
+ mode->vdisplay);
+ regmap_write(pdata->regmap, SN_CHA_HSYNC_PULSE_WIDTH_LOW_REG,
+ (mode->hsync_end - mode->hsync_start) & 0xFF);
+ regmap_write(pdata->regmap, SN_CHA_HSYNC_PULSE_WIDTH_HIGH_REG,
+ (((mode->hsync_end - mode->hsync_start) >> 8) & 0x7F) |
+ hsync_polarity);
+ regmap_write(pdata->regmap, SN_CHA_VSYNC_PULSE_WIDTH_LOW_REG,
+ (mode->vsync_end - mode->vsync_start) & 0xFF);
+ regmap_write(pdata->regmap, SN_CHA_VSYNC_PULSE_WIDTH_HIGH_REG,
+ (((mode->vsync_end - mode->vsync_start) >> 8) & 0x7F) |
+ vsync_polarity);
+
+ regmap_write(pdata->regmap, SN_CHA_HORIZONTAL_BACK_PORCH_REG,
+ (mode->htotal - mode->hsync_end) & 0xFF);
+ regmap_write(pdata->regmap, SN_CHA_VERTICAL_BACK_PORCH_REG,
+ (mode->vtotal - mode->vsync_end) & 0xFF);
+
+ regmap_write(pdata->regmap, SN_CHA_HORIZONTAL_FRONT_PORCH_REG,
+ (mode->hsync_start - mode->hdisplay) & 0xFF);
+ regmap_write(pdata->regmap, SN_CHA_VERTICAL_FRONT_PORCH_REG,
+ (mode->vsync_start - mode->vdisplay) & 0xFF);
+
+ usleep_range(10000, 10500); /* 10ms delay recommended by spec */
+}
+
+static void ti_sn_bridge_enable(struct drm_bridge *bridge)
+{
+ struct ti_sn_bridge *pdata = bridge_to_ti_sn_bridge(bridge);
+ unsigned int val;
+ int ret;
+
+ /*
+ * FIXME:
+ * This 70ms was found necessary by experimentation. If it's not
+ * present, link training fails. It seems like it can go anywhere from
+ * pre_enable() up to semi-auto link training initiation below.
+ *
+ * Neither the datasheet for the bridge nor the panel tested mention a
+ * delay of this magnitude in the timing requirements. So for now, add
+ * the mystery delay until someone figures out a better fix.
+ */
+ msleep(70);
+
+ /* DSI_A lane config */
+ val = CHA_DSI_LANES(4 - pdata->dsi->lanes);
+ regmap_update_bits(pdata->regmap, SN_DSI_LANES_REG,
+ CHA_DSI_LANES_MASK, val);
+
+ /* DP lane config */
+ val = DP_NUM_LANES(pdata->dsi->lanes - 1);
+ regmap_update_bits(pdata->regmap, SN_SSC_CONFIG_REG, DP_NUM_LANES_MASK,
+ val);
+
+ /* set dsi/dp clk frequency value */
+ ti_sn_bridge_set_dsi_dp_rate(pdata);
+
+ /* enable DP PLL */
+ regmap_write(pdata->regmap, SN_PLL_ENABLE_REG, 1);
+
+ ret = regmap_read_poll_timeout(pdata->regmap, SN_DPPLL_SRC_REG, val,
+ val & DPPLL_SRC_DP_PLL_LOCK, 1000,
+ 50 * 1000);
+ if (ret) {
+ DRM_ERROR("DP_PLL_LOCK polling failed (%d)\n", ret);
+ return;
+ }
+
+ /**
+ * The SN65DSI86 only supports ASSR Display Authentication method and
+ * this method is enabled by default. An eDP panel must support this
+ * authentication method. We need to enable this method in the eDP panel
+ * at DisplayPort address 0x0010A prior to link training.
+ */
+ drm_dp_dpcd_writeb(&pdata->aux, DP_EDP_CONFIGURATION_SET,
+ DP_ALTERNATE_SCRAMBLER_RESET_ENABLE);
+
+ /* Semi auto link training mode */
+ regmap_write(pdata->regmap, SN_ML_TX_MODE_REG, 0x0A);
+ ret = regmap_read_poll_timeout(pdata->regmap, SN_ML_TX_MODE_REG, val,
+ val == ML_TX_MAIN_LINK_OFF ||
+ val == ML_TX_NORMAL_MODE, 1000,
+ 500 * 1000);
+ if (ret) {
+ DRM_ERROR("Training complete polling failed (%d)\n", ret);
+ return;
+ } else if (val == ML_TX_MAIN_LINK_OFF) {
+ DRM_ERROR("Link training failed, link is off\n");
+ return;
+ }
+
+ /* config video parameters */
+ ti_sn_bridge_set_video_timings(pdata);
+
+ /* enable video stream */
+ regmap_update_bits(pdata->regmap, SN_ENH_FRAME_REG, VSTREAM_ENABLE,
+ VSTREAM_ENABLE);
+
+ drm_panel_enable(pdata->panel);
+}
+
+static void ti_sn_bridge_pre_enable(struct drm_bridge *bridge)
+{
+ struct ti_sn_bridge *pdata = bridge_to_ti_sn_bridge(bridge);
+
+ pm_runtime_get_sync(pdata->dev);
+
+ /* configure bridge ref_clk */
+ ti_sn_bridge_set_refclk_freq(pdata);
+
+ /* in case drm_panel is connected then HPD is not supported */
+ regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG, HPD_DISABLE,
+ HPD_DISABLE);
+
+ drm_panel_prepare(pdata->panel);
+}
+
+static void ti_sn_bridge_post_disable(struct drm_bridge *bridge)
+{
+ struct ti_sn_bridge *pdata = bridge_to_ti_sn_bridge(bridge);
+
+ if (pdata->refclk)
+ clk_disable_unprepare(pdata->refclk);
+
+ pm_runtime_put_sync(pdata->dev);
+}
+
+static const struct drm_bridge_funcs ti_sn_bridge_funcs = {
+ .attach = ti_sn_bridge_attach,
+ .pre_enable = ti_sn_bridge_pre_enable,
+ .enable = ti_sn_bridge_enable,
+ .disable = ti_sn_bridge_disable,
+ .post_disable = ti_sn_bridge_post_disable,
+};
+
+static struct ti_sn_bridge *aux_to_ti_sn_bridge(struct drm_dp_aux *aux)
+{
+ return container_of(aux, struct ti_sn_bridge, aux);
+}
+
+static ssize_t ti_sn_aux_transfer(struct drm_dp_aux *aux,
+ struct drm_dp_aux_msg *msg)
+{
+ struct ti_sn_bridge *pdata = aux_to_ti_sn_bridge(aux);
+ u32 request = msg->request & ~DP_AUX_I2C_MOT;
+ u32 request_val = AUX_CMD_REQ(msg->request);
+ u8 *buf = (u8 *)msg->buffer;
+ unsigned int val;
+ int ret, i;
+
+ if (msg->size > SN_AUX_MAX_PAYLOAD_BYTES)
+ return -EINVAL;
+
+ switch (request) {
+ case DP_AUX_NATIVE_WRITE:
+ case DP_AUX_I2C_WRITE:
+ case DP_AUX_NATIVE_READ:
+ case DP_AUX_I2C_READ:
+ regmap_write(pdata->regmap, SN_AUX_CMD_REG, request_val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_write(pdata->regmap, SN_AUX_ADDR_19_16_REG,
+ (msg->address >> 16) & 0xF);
+ regmap_write(pdata->regmap, SN_AUX_ADDR_15_8_REG,
+ (msg->address >> 8) & 0xFF);
+ regmap_write(pdata->regmap, SN_AUX_ADDR_7_0_REG, msg->address & 0xFF);
+
+ regmap_write(pdata->regmap, SN_AUX_LENGTH_REG, msg->size);
+
+ if (request == DP_AUX_NATIVE_WRITE || request == DP_AUX_I2C_WRITE) {
+ for (i = 0; i < msg->size; i++)
+ regmap_write(pdata->regmap, SN_AUX_WDATA_REG(i),
+ buf[i]);
+ }
+
+ regmap_write(pdata->regmap, SN_AUX_CMD_REG, request_val | AUX_CMD_SEND);
+
+ ret = regmap_read_poll_timeout(pdata->regmap, SN_AUX_CMD_REG, val,
+ !(val & AUX_CMD_SEND), 200,
+ 50 * 1000);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(pdata->regmap, SN_AUX_CMD_STATUS_REG, &val);
+ if (ret)
+ return ret;
+ else if ((val & AUX_IRQ_STATUS_NAT_I2C_FAIL)
+ || (val & AUX_IRQ_STATUS_AUX_RPLY_TOUT)
+ || (val & AUX_IRQ_STATUS_AUX_SHORT))
+ return -ENXIO;
+
+ if (request == DP_AUX_NATIVE_WRITE || request == DP_AUX_I2C_WRITE)
+ return msg->size;
+
+ for (i = 0; i < msg->size; i++) {
+ unsigned int val;
+ ret = regmap_read(pdata->regmap, SN_AUX_RDATA_REG(i),
+ &val);
+ if (ret)
+ return ret;
+
+ WARN_ON(val & ~0xFF);
+ buf[i] = (u8)(val & 0xFF);
+ }
+
+ return msg->size;
+}
+
+static int ti_sn_bridge_parse_dsi_host(struct ti_sn_bridge *pdata)
+{
+ struct device_node *np = pdata->dev->of_node;
+
+ pdata->host_node = of_graph_get_remote_node(np, 0, 0);
+
+ if (!pdata->host_node) {
+ DRM_ERROR("remote dsi host node not found\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int ti_sn_bridge_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ti_sn_bridge *pdata;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ DRM_ERROR("device doesn't support I2C\n");
+ return -ENODEV;
+ }
+
+ pdata = devm_kzalloc(&client->dev, sizeof(struct ti_sn_bridge),
+ GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ pdata->regmap = devm_regmap_init_i2c(client,
+ &ti_sn_bridge_regmap_config);
+ if (IS_ERR(pdata->regmap)) {
+ DRM_ERROR("regmap i2c init failed\n");
+ return PTR_ERR(pdata->regmap);
+ }
+
+ pdata->dev = &client->dev;
+
+ ret = drm_of_find_panel_or_bridge(pdata->dev->of_node, 1, 0,
+ &pdata->panel, NULL);
+ if (ret) {
+ DRM_ERROR("could not find any panel node\n");
+ return ret;
+ }
+
+ dev_set_drvdata(&client->dev, pdata);
+
+ pdata->enable_gpio = devm_gpiod_get(pdata->dev, "enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(pdata->enable_gpio)) {
+ DRM_ERROR("failed to get enable gpio from DT\n");
+ ret = PTR_ERR(pdata->enable_gpio);
+ return ret;
+ }
+
+ ret = ti_sn_bridge_parse_regulators(pdata);
+ if (ret) {
+ DRM_ERROR("failed to parse regulators\n");
+ return ret;
+ }
+
+ pdata->refclk = devm_clk_get(pdata->dev, "refclk");
+ if (IS_ERR(pdata->refclk)) {
+ ret = PTR_ERR(pdata->refclk);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ DRM_DEBUG_KMS("refclk not found\n");
+ pdata->refclk = NULL;
+ }
+
+ ret = ti_sn_bridge_parse_dsi_host(pdata);
+ if (ret)
+ return ret;
+
+ pm_runtime_enable(pdata->dev);
+
+ i2c_set_clientdata(client, pdata);
+
+ pdata->aux.name = "ti-sn65dsi86-aux";
+ pdata->aux.dev = pdata->dev;
+ pdata->aux.transfer = ti_sn_aux_transfer;
+ drm_dp_aux_register(&pdata->aux);
+
+ pdata->bridge.funcs = &ti_sn_bridge_funcs;
+ pdata->bridge.of_node = client->dev.of_node;
+
+ drm_bridge_add(&pdata->bridge);
+
+ return 0;
+}
+
+static int ti_sn_bridge_remove(struct i2c_client *client)
+{
+ struct ti_sn_bridge *pdata = i2c_get_clientdata(client);
+
+ if (!pdata)
+ return -EINVAL;
+
+ of_node_put(pdata->host_node);
+
+ pm_runtime_disable(pdata->dev);
+
+ if (pdata->dsi) {
+ mipi_dsi_detach(pdata->dsi);
+ mipi_dsi_device_unregister(pdata->dsi);
+ }
+
+ drm_bridge_remove(&pdata->bridge);
+
+ return 0;
+}
+
+static struct i2c_device_id ti_sn_bridge_id[] = {
+ { "ti,sn65dsi86", 0},
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, ti_sn_bridge_id);
+
+static const struct of_device_id ti_sn_bridge_match_table[] = {
+ {.compatible = "ti,sn65dsi86"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, ti_sn_bridge_match_table);
+
+static struct i2c_driver ti_sn_bridge_driver = {
+ .driver = {
+ .name = "ti_sn65dsi86",
+ .of_match_table = ti_sn_bridge_match_table,
+ .pm = &ti_sn_bridge_pm_ops,
+ },
+ .probe = ti_sn_bridge_probe,
+ .remove = ti_sn_bridge_remove,
+ .id_table = ti_sn_bridge_id,
+};
+module_i2c_driver(ti_sn_bridge_driver);
+
+MODULE_AUTHOR("Sandeep Panda <[email protected]>");
+MODULE_DESCRIPTION("sn65dsi86 DSI to eDP bridge driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index 69c4e352dd78..db40b77c7f7c 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -16,11 +16,11 @@
#include "cirrus_drv.h"
int cirrus_modeset = -1;
-int cirrus_bpp = 24;
+int cirrus_bpp = 16;
MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
module_param_named(modeset, cirrus_modeset, int, 0400);
-MODULE_PARM_DESC(bpp, "Max bits-per-pixel (default:24)");
+MODULE_PARM_DESC(bpp, "Max bits-per-pixel (default:16)");
module_param_named(bpp, cirrus_bpp, int, 0400);
/*
@@ -42,33 +42,12 @@ static const struct pci_device_id pciidlist[] = {
};
-static int cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
-{
- struct apertures_struct *ap;
- bool primary = false;
-
- ap = alloc_apertures(1);
- if (!ap)
- return -ENOMEM;
-
- ap->ranges[0].base = pci_resource_start(pdev, 0);
- ap->ranges[0].size = pci_resource_len(pdev, 0);
-
-#ifdef CONFIG_X86
- primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
-#endif
- drm_fb_helper_remove_conflicting_framebuffers(ap, "cirrusdrmfb", primary);
- kfree(ap);
-
- return 0;
-}
-
static int cirrus_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
int ret;
- ret = cirrus_kick_out_firmware_fb(pdev);
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "cirrusdrmfb");
if (ret)
return ret;
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index ce9db7aab225..a29f87e98d9d 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -146,7 +146,7 @@ struct cirrus_device {
struct cirrus_fbdev {
struct drm_fb_helper helper;
- struct drm_framebuffer gfb;
+ struct drm_framebuffer *gfb;
void *sysram;
int size;
int x1, y1, x2, y2; /* dirty rect */
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index b643ac92801c..68ab1821e15b 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -22,14 +22,14 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
struct drm_gem_object *obj;
struct cirrus_bo *bo;
int src_offset, dst_offset;
- int bpp = afbdev->gfb.format->cpp[0];
+ int bpp = afbdev->gfb->format->cpp[0];
int ret = -EBUSY;
bool unmap = false;
bool store_for_later = false;
int x2, y2;
unsigned long flags;
- obj = afbdev->gfb.obj[0];
+ obj = afbdev->gfb->obj[0];
bo = gem_to_cirrus_bo(obj);
/*
@@ -82,7 +82,7 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
}
for (i = y; i < y + height; i++) {
/* assume equal stride for now */
- src_offset = dst_offset = i * afbdev->gfb.pitches[0] + (x * bpp);
+ src_offset = dst_offset = i * afbdev->gfb->pitches[0] + (x * bpp);
memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp);
}
@@ -192,23 +192,26 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
return -ENOMEM;
info = drm_fb_helper_alloc_fbi(helper);
- if (IS_ERR(info))
- return PTR_ERR(info);
+ if (IS_ERR(info)) {
+ ret = PTR_ERR(info);
+ goto err_vfree;
+ }
info->par = gfbdev;
- ret = cirrus_framebuffer_init(cdev->dev, &gfbdev->gfb, &mode_cmd, gobj);
+ fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+ if (!fb) {
+ ret = -ENOMEM;
+ goto err_drm_gem_object_put_unlocked;
+ }
+
+ ret = cirrus_framebuffer_init(cdev->dev, fb, &mode_cmd, gobj);
if (ret)
- return ret;
+ goto err_kfree;
gfbdev->sysram = sysram;
gfbdev->size = size;
-
- fb = &gfbdev->gfb;
- if (!fb) {
- DRM_INFO("fb is NULL\n");
- return -EINVAL;
- }
+ gfbdev->gfb = fb;
/* setup helper */
gfbdev->helper.fb = fb;
@@ -241,24 +244,27 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
DRM_INFO(" pitch is %d\n", fb->pitches[0]);
return 0;
+
+err_kfree:
+ kfree(fb);
+err_drm_gem_object_put_unlocked:
+ drm_gem_object_put_unlocked(gobj);
+err_vfree:
+ vfree(sysram);
+ return ret;
}
static int cirrus_fbdev_destroy(struct drm_device *dev,
struct cirrus_fbdev *gfbdev)
{
- struct drm_framebuffer *gfb = &gfbdev->gfb;
+ struct drm_framebuffer *gfb = gfbdev->gfb;
drm_fb_helper_unregister_fbi(&gfbdev->helper);
- if (gfb->obj[0]) {
- drm_gem_object_put_unlocked(gfb->obj[0]);
- gfb->obj[0] = NULL;
- }
-
vfree(gfbdev->sysram);
drm_fb_helper_fini(&gfbdev->helper);
- drm_framebuffer_unregister_private(gfb);
- drm_framebuffer_cleanup(gfb);
+ if (gfb)
+ drm_framebuffer_put(gfb);
return 0;
}
@@ -271,7 +277,6 @@ int cirrus_fbdev_init(struct cirrus_device *cdev)
{
struct cirrus_fbdev *gfbdev;
int ret;
- int bpp_sel = 24;
/*bpp_sel = 8;*/
gfbdev = kzalloc(sizeof(struct cirrus_fbdev), GFP_KERNEL);
@@ -296,7 +301,7 @@ int cirrus_fbdev_init(struct cirrus_device *cdev)
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(cdev->dev);
- return drm_fb_helper_initial_config(&gfbdev->helper, bpp_sel);
+ return drm_fb_helper_initial_config(&gfbdev->helper, cirrus_bpp);
}
void cirrus_fbdev_fini(struct cirrus_device *cdev)
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index 60d54e10a34d..57f8fe6d020b 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -269,7 +269,7 @@ static void cirrus_bo_unref(struct cirrus_bo **bo)
return;
tbo = &((*bo)->bo);
- ttm_bo_unref(&tbo);
+ ttm_bo_put(tbo);
*bo = NULL;
}
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index 336bfda40125..ed7dcf212a34 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -127,7 +127,7 @@ static int cirrus_crtc_do_set_base(struct drm_crtc *crtc,
return ret;
}
- if (&cdev->mode_info.gfbdev->gfb == crtc->primary->fb) {
+ if (cdev->mode_info.gfbdev->gfb == crtc->primary->fb) {
/* if pushing console in kmap it */
ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
if (ret)
@@ -512,7 +512,7 @@ int cirrus_modeset_init(struct cirrus_device *cdev)
cdev->dev->mode_config.max_height = CIRRUS_MAX_FB_HEIGHT;
cdev->dev->mode_config.fb_base = cdev->mc.vram_base;
- cdev->dev->mode_config.preferred_depth = 24;
+ cdev->dev->mode_config.preferred_depth = cirrus_bpp;
/* don't prefer a shadow on virt GPU */
cdev->dev->mode_config.prefer_shadow = 0;
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 3eb061e11e2e..7ada75919756 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -28,6 +28,7 @@
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_uapi.h>
#include <drm/drm_mode.h>
#include <drm/drm_print.h>
#include <drm/drm_writeback.h>
@@ -309,350 +310,6 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
}
EXPORT_SYMBOL(drm_atomic_get_crtc_state);
-static void set_out_fence_for_crtc(struct drm_atomic_state *state,
- struct drm_crtc *crtc, s32 __user *fence_ptr)
-{
- state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr;
-}
-
-static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
- struct drm_crtc *crtc)
-{
- s32 __user *fence_ptr;
-
- fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr;
- state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL;
-
- return fence_ptr;
-}
-
-static int set_out_fence_for_connector(struct drm_atomic_state *state,
- struct drm_connector *connector,
- s32 __user *fence_ptr)
-{
- unsigned int index = drm_connector_index(connector);
-
- if (!fence_ptr)
- return 0;
-
- if (put_user(-1, fence_ptr))
- return -EFAULT;
-
- state->connectors[index].out_fence_ptr = fence_ptr;
-
- return 0;
-}
-
-static s32 __user *get_out_fence_for_connector(struct drm_atomic_state *state,
- struct drm_connector *connector)
-{
- unsigned int index = drm_connector_index(connector);
- s32 __user *fence_ptr;
-
- fence_ptr = state->connectors[index].out_fence_ptr;
- state->connectors[index].out_fence_ptr = NULL;
-
- return fence_ptr;
-}
-
-/**
- * drm_atomic_set_mode_for_crtc - set mode for CRTC
- * @state: the CRTC whose incoming state to update
- * @mode: kernel-internal mode to use for the CRTC, or NULL to disable
- *
- * Set a mode (originating from the kernel) on the desired CRTC state and update
- * the enable property.
- *
- * RETURNS:
- * Zero on success, error code on failure. Cannot return -EDEADLK.
- */
-int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
- const struct drm_display_mode *mode)
-{
- struct drm_crtc *crtc = state->crtc;
- struct drm_mode_modeinfo umode;
-
- /* Early return for no change. */
- if (mode && memcmp(&state->mode, mode, sizeof(*mode)) == 0)
- return 0;
-
- drm_property_blob_put(state->mode_blob);
- state->mode_blob = NULL;
-
- if (mode) {
- drm_mode_convert_to_umode(&umode, mode);
- state->mode_blob =
- drm_property_create_blob(state->crtc->dev,
- sizeof(umode),
- &umode);
- if (IS_ERR(state->mode_blob))
- return PTR_ERR(state->mode_blob);
-
- drm_mode_copy(&state->mode, mode);
- state->enable = true;
- DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
- mode->name, crtc->base.id, crtc->name, state);
- } else {
- memset(&state->mode, 0, sizeof(state->mode));
- state->enable = false;
- DRM_DEBUG_ATOMIC("Set [NOMODE] for [CRTC:%d:%s] state %p\n",
- crtc->base.id, crtc->name, state);
- }
-
- return 0;
-}
-EXPORT_SYMBOL(drm_atomic_set_mode_for_crtc);
-
-/**
- * drm_atomic_set_mode_prop_for_crtc - set mode for CRTC
- * @state: the CRTC whose incoming state to update
- * @blob: pointer to blob property to use for mode
- *
- * Set a mode (originating from a blob property) on the desired CRTC state.
- * This function will take a reference on the blob property for the CRTC state,
- * and release the reference held on the state's existing mode property, if any
- * was set.
- *
- * RETURNS:
- * Zero on success, error code on failure. Cannot return -EDEADLK.
- */
-int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
- struct drm_property_blob *blob)
-{
- struct drm_crtc *crtc = state->crtc;
-
- if (blob == state->mode_blob)
- return 0;
-
- drm_property_blob_put(state->mode_blob);
- state->mode_blob = NULL;
-
- memset(&state->mode, 0, sizeof(state->mode));
-
- if (blob) {
- int ret;
-
- if (blob->length != sizeof(struct drm_mode_modeinfo)) {
- DRM_DEBUG_ATOMIC("[CRTC:%d:%s] bad mode blob length: %zu\n",
- crtc->base.id, crtc->name,
- blob->length);
- return -EINVAL;
- }
-
- ret = drm_mode_convert_umode(crtc->dev,
- &state->mode, blob->data);
- if (ret) {
- DRM_DEBUG_ATOMIC("[CRTC:%d:%s] invalid mode (ret=%d, status=%s):\n",
- crtc->base.id, crtc->name,
- ret, drm_get_mode_status_name(state->mode.status));
- drm_mode_debug_printmodeline(&state->mode);
- return -EINVAL;
- }
-
- state->mode_blob = drm_property_blob_get(blob);
- state->enable = true;
- DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
- state->mode.name, crtc->base.id, crtc->name,
- state);
- } else {
- state->enable = false;
- DRM_DEBUG_ATOMIC("Set [NOMODE] for [CRTC:%d:%s] state %p\n",
- crtc->base.id, crtc->name, state);
- }
-
- return 0;
-}
-EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc);
-
-/**
- * drm_atomic_replace_property_blob_from_id - lookup the new blob and replace the old one with it
- * @dev: DRM device
- * @blob: a pointer to the member blob to be replaced
- * @blob_id: ID of the new blob
- * @expected_size: total expected size of the blob data (in bytes)
- * @expected_elem_size: expected element size of the blob data (in bytes)
- * @replaced: did the blob get replaced?
- *
- * Replace @blob with another blob with the ID @blob_id. If @blob_id is zero
- * @blob becomes NULL.
- *
- * If @expected_size is positive the new blob length is expected to be equal
- * to @expected_size bytes. If @expected_elem_size is positive the new blob
- * length is expected to be a multiple of @expected_elem_size bytes. Otherwise
- * an error is returned.
- *
- * @replaced will indicate to the caller whether the blob was replaced or not.
- * If the old and new blobs were in fact the same blob @replaced will be false
- * otherwise it will be true.
- *
- * RETURNS:
- * Zero on success, error code on failure.
- */
-static int
-drm_atomic_replace_property_blob_from_id(struct drm_device *dev,
- struct drm_property_blob **blob,
- uint64_t blob_id,
- ssize_t expected_size,
- ssize_t expected_elem_size,
- bool *replaced)
-{
- struct drm_property_blob *new_blob = NULL;
-
- if (blob_id != 0) {
- new_blob = drm_property_lookup_blob(dev, blob_id);
- if (new_blob == NULL)
- return -EINVAL;
-
- if (expected_size > 0 &&
- new_blob->length != expected_size) {
- drm_property_blob_put(new_blob);
- return -EINVAL;
- }
- if (expected_elem_size > 0 &&
- new_blob->length % expected_elem_size != 0) {
- drm_property_blob_put(new_blob);
- return -EINVAL;
- }
- }
-
- *replaced |= drm_property_replace_blob(blob, new_blob);
- drm_property_blob_put(new_blob);
-
- return 0;
-}
-
-/**
- * drm_atomic_crtc_set_property - set property on CRTC
- * @crtc: the drm CRTC to set a property on
- * @state: the state object to update with the new property value
- * @property: the property to set
- * @val: the new property value
- *
- * This function handles generic/core properties and calls out to driver's
- * &drm_crtc_funcs.atomic_set_property for driver properties. To ensure
- * consistent behavior you must call this function rather than the driver hook
- * directly.
- *
- * RETURNS:
- * Zero on success, error code on failure
- */
-int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
- struct drm_crtc_state *state, struct drm_property *property,
- uint64_t val)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_mode_config *config = &dev->mode_config;
- bool replaced = false;
- int ret;
-
- if (property == config->prop_active)
- state->active = val;
- else if (property == config->prop_mode_id) {
- struct drm_property_blob *mode =
- drm_property_lookup_blob(dev, val);
- ret = drm_atomic_set_mode_prop_for_crtc(state, mode);
- drm_property_blob_put(mode);
- return ret;
- } else if (property == config->degamma_lut_property) {
- ret = drm_atomic_replace_property_blob_from_id(dev,
- &state->degamma_lut,
- val,
- -1, sizeof(struct drm_color_lut),
- &replaced);
- state->color_mgmt_changed |= replaced;
- return ret;
- } else if (property == config->ctm_property) {
- ret = drm_atomic_replace_property_blob_from_id(dev,
- &state->ctm,
- val,
- sizeof(struct drm_color_ctm), -1,
- &replaced);
- state->color_mgmt_changed |= replaced;
- return ret;
- } else if (property == config->gamma_lut_property) {
- ret = drm_atomic_replace_property_blob_from_id(dev,
- &state->gamma_lut,
- val,
- -1, sizeof(struct drm_color_lut),
- &replaced);
- state->color_mgmt_changed |= replaced;
- return ret;
- } else if (property == config->prop_out_fence_ptr) {
- s32 __user *fence_ptr = u64_to_user_ptr(val);
-
- if (!fence_ptr)
- return 0;
-
- if (put_user(-1, fence_ptr))
- return -EFAULT;
-
- set_out_fence_for_crtc(state->state, crtc, fence_ptr);
- } else if (crtc->funcs->atomic_set_property) {
- return crtc->funcs->atomic_set_property(crtc, state, property, val);
- } else {
- DRM_DEBUG_ATOMIC("[CRTC:%d:%s] unknown property [PROP:%d:%s]]\n",
- crtc->base.id, crtc->name,
- property->base.id, property->name);
- return -EINVAL;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(drm_atomic_crtc_set_property);
-
-/**
- * drm_atomic_crtc_get_property - get property value from CRTC state
- * @crtc: the drm CRTC to set a property on
- * @state: the state object to get the property value from
- * @property: the property to set
- * @val: return location for the property value
- *
- * This function handles generic/core properties and calls out to driver's
- * &drm_crtc_funcs.atomic_get_property for driver properties. To ensure
- * consistent behavior you must call this function rather than the driver hook
- * directly.
- *
- * RETURNS:
- * Zero on success, error code on failure
- */
-static int
-drm_atomic_crtc_get_property(struct drm_crtc *crtc,
- const struct drm_crtc_state *state,
- struct drm_property *property, uint64_t *val)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_mode_config *config = &dev->mode_config;
-
- if (property == config->prop_active)
- *val = state->active;
- else if (property == config->prop_mode_id)
- *val = (state->mode_blob) ? state->mode_blob->base.id : 0;
- else if (property == config->degamma_lut_property)
- *val = (state->degamma_lut) ? state->degamma_lut->base.id : 0;
- else if (property == config->ctm_property)
- *val = (state->ctm) ? state->ctm->base.id : 0;
- else if (property == config->gamma_lut_property)
- *val = (state->gamma_lut) ? state->gamma_lut->base.id : 0;
- else if (property == config->prop_out_fence_ptr)
- *val = 0;
- else if (crtc->funcs->atomic_get_property)
- return crtc->funcs->atomic_get_property(crtc, state, property, val);
- else
- return -EINVAL;
-
- return 0;
-}
-
-/**
- * drm_atomic_crtc_check - check crtc state
- * @crtc: crtc to check
- * @state: crtc state to check
- *
- * Provides core sanity checks for crtc state.
- *
- * RETURNS:
- * Zero on success, error code on failure
- */
static int drm_atomic_crtc_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
@@ -728,16 +385,6 @@ static void drm_atomic_crtc_print_state(struct drm_printer *p,
crtc->funcs->atomic_print_state(p, state);
}
-/**
- * drm_atomic_connector_check - check connector state
- * @connector: connector to check
- * @state: connector state to check
- *
- * Provides core sanity checks for connector state.
- *
- * RETURNS:
- * Zero on success, error code on failure
- */
static int drm_atomic_connector_check(struct drm_connector *connector,
struct drm_connector_state *state)
{
@@ -836,155 +483,6 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
}
EXPORT_SYMBOL(drm_atomic_get_plane_state);
-/**
- * drm_atomic_plane_set_property - set property on plane
- * @plane: the drm plane to set a property on
- * @state: the state object to update with the new property value
- * @property: the property to set
- * @val: the new property value
- *
- * This function handles generic/core properties and calls out to driver's
- * &drm_plane_funcs.atomic_set_property for driver properties. To ensure
- * consistent behavior you must call this function rather than the driver hook
- * directly.
- *
- * RETURNS:
- * Zero on success, error code on failure
- */
-static int drm_atomic_plane_set_property(struct drm_plane *plane,
- struct drm_plane_state *state, struct drm_property *property,
- uint64_t val)
-{
- struct drm_device *dev = plane->dev;
- struct drm_mode_config *config = &dev->mode_config;
-
- if (property == config->prop_fb_id) {
- struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val);
- drm_atomic_set_fb_for_plane(state, fb);
- if (fb)
- drm_framebuffer_put(fb);
- } else if (property == config->prop_in_fence_fd) {
- if (state->fence)
- return -EINVAL;
-
- if (U642I64(val) == -1)
- return 0;
-
- state->fence = sync_file_get_fence(val);
- if (!state->fence)
- return -EINVAL;
-
- } else if (property == config->prop_crtc_id) {
- struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val);
- return drm_atomic_set_crtc_for_plane(state, crtc);
- } else if (property == config->prop_crtc_x) {
- state->crtc_x = U642I64(val);
- } else if (property == config->prop_crtc_y) {
- state->crtc_y = U642I64(val);
- } else if (property == config->prop_crtc_w) {
- state->crtc_w = val;
- } else if (property == config->prop_crtc_h) {
- state->crtc_h = val;
- } else if (property == config->prop_src_x) {
- state->src_x = val;
- } else if (property == config->prop_src_y) {
- state->src_y = val;
- } else if (property == config->prop_src_w) {
- state->src_w = val;
- } else if (property == config->prop_src_h) {
- state->src_h = val;
- } else if (property == plane->alpha_property) {
- state->alpha = val;
- } else if (property == plane->rotation_property) {
- if (!is_power_of_2(val & DRM_MODE_ROTATE_MASK)) {
- DRM_DEBUG_ATOMIC("[PLANE:%d:%s] bad rotation bitmask: 0x%llx\n",
- plane->base.id, plane->name, val);
- return -EINVAL;
- }
- state->rotation = val;
- } else if (property == plane->zpos_property) {
- state->zpos = val;
- } else if (property == plane->color_encoding_property) {
- state->color_encoding = val;
- } else if (property == plane->color_range_property) {
- state->color_range = val;
- } else if (plane->funcs->atomic_set_property) {
- return plane->funcs->atomic_set_property(plane, state,
- property, val);
- } else {
- DRM_DEBUG_ATOMIC("[PLANE:%d:%s] unknown property [PROP:%d:%s]]\n",
- plane->base.id, plane->name,
- property->base.id, property->name);
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
- * drm_atomic_plane_get_property - get property value from plane state
- * @plane: the drm plane to set a property on
- * @state: the state object to get the property value from
- * @property: the property to set
- * @val: return location for the property value
- *
- * This function handles generic/core properties and calls out to driver's
- * &drm_plane_funcs.atomic_get_property for driver properties. To ensure
- * consistent behavior you must call this function rather than the driver hook
- * directly.
- *
- * RETURNS:
- * Zero on success, error code on failure
- */
-static int
-drm_atomic_plane_get_property(struct drm_plane *plane,
- const struct drm_plane_state *state,
- struct drm_property *property, uint64_t *val)
-{
- struct drm_device *dev = plane->dev;
- struct drm_mode_config *config = &dev->mode_config;
-
- if (property == config->prop_fb_id) {
- *val = (state->fb) ? state->fb->base.id : 0;
- } else if (property == config->prop_in_fence_fd) {
- *val = -1;
- } else if (property == config->prop_crtc_id) {
- *val = (state->crtc) ? state->crtc->base.id : 0;
- } else if (property == config->prop_crtc_x) {
- *val = I642U64(state->crtc_x);
- } else if (property == config->prop_crtc_y) {
- *val = I642U64(state->crtc_y);
- } else if (property == config->prop_crtc_w) {
- *val = state->crtc_w;
- } else if (property == config->prop_crtc_h) {
- *val = state->crtc_h;
- } else if (property == config->prop_src_x) {
- *val = state->src_x;
- } else if (property == config->prop_src_y) {
- *val = state->src_y;
- } else if (property == config->prop_src_w) {
- *val = state->src_w;
- } else if (property == config->prop_src_h) {
- *val = state->src_h;
- } else if (property == plane->alpha_property) {
- *val = state->alpha;
- } else if (property == plane->rotation_property) {
- *val = state->rotation;
- } else if (property == plane->zpos_property) {
- *val = state->zpos;
- } else if (property == plane->color_encoding_property) {
- *val = state->color_encoding;
- } else if (property == plane->color_range_property) {
- *val = state->color_range;
- } else if (plane->funcs->atomic_get_property) {
- return plane->funcs->atomic_get_property(plane, state, property, val);
- } else {
- return -EINVAL;
- }
-
- return 0;
-}
-
static bool
plane_switching_crtc(struct drm_atomic_state *state,
struct drm_plane *plane,
@@ -1324,111 +822,6 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state,
}
EXPORT_SYMBOL(drm_atomic_get_connector_state);
-/**
- * drm_atomic_connector_set_property - set property on connector.
- * @connector: the drm connector to set a property on
- * @state: the state object to update with the new property value
- * @property: the property to set
- * @val: the new property value
- *
- * This function handles generic/core properties and calls out to driver's
- * &drm_connector_funcs.atomic_set_property for driver properties. To ensure
- * consistent behavior you must call this function rather than the driver hook
- * directly.
- *
- * RETURNS:
- * Zero on success, error code on failure
- */
-static int drm_atomic_connector_set_property(struct drm_connector *connector,
- struct drm_connector_state *state, struct drm_property *property,
- uint64_t val)
-{
- struct drm_device *dev = connector->dev;
- struct drm_mode_config *config = &dev->mode_config;
-
- if (property == config->prop_crtc_id) {
- struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val);
- return drm_atomic_set_crtc_for_connector(state, crtc);
- } else if (property == config->dpms_property) {
- /* setting DPMS property requires special handling, which
- * is done in legacy setprop path for us. Disallow (for
- * now?) atomic writes to DPMS property:
- */
- return -EINVAL;
- } else if (property == config->tv_select_subconnector_property) {
- state->tv.subconnector = val;
- } else if (property == config->tv_left_margin_property) {
- state->tv.margins.left = val;
- } else if (property == config->tv_right_margin_property) {
- state->tv.margins.right = val;
- } else if (property == config->tv_top_margin_property) {
- state->tv.margins.top = val;
- } else if (property == config->tv_bottom_margin_property) {
- state->tv.margins.bottom = val;
- } else if (property == config->tv_mode_property) {
- state->tv.mode = val;
- } else if (property == config->tv_brightness_property) {
- state->tv.brightness = val;
- } else if (property == config->tv_contrast_property) {
- state->tv.contrast = val;
- } else if (property == config->tv_flicker_reduction_property) {
- state->tv.flicker_reduction = val;
- } else if (property == config->tv_overscan_property) {
- state->tv.overscan = val;
- } else if (property == config->tv_saturation_property) {
- state->tv.saturation = val;
- } else if (property == config->tv_hue_property) {
- state->tv.hue = val;
- } else if (property == config->link_status_property) {
- /* Never downgrade from GOOD to BAD on userspace's request here,
- * only hw issues can do that.
- *
- * For an atomic property the userspace doesn't need to be able
- * to understand all the properties, but needs to be able to
- * restore the state it wants on VT switch. So if the userspace
- * tries to change the link_status from GOOD to BAD, driver
- * silently rejects it and returns a 0. This prevents userspace
- * from accidently breaking the display when it restores the
- * state.
- */
- if (state->link_status != DRM_LINK_STATUS_GOOD)
- state->link_status = val;
- } else if (property == config->aspect_ratio_property) {
- state->picture_aspect_ratio = val;
- } else if (property == config->content_type_property) {
- state->content_type = val;
- } else if (property == connector->scaling_mode_property) {
- state->scaling_mode = val;
- } else if (property == connector->content_protection_property) {
- if (val == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
- DRM_DEBUG_KMS("only drivers can set CP Enabled\n");
- return -EINVAL;
- }
- state->content_protection = val;
- } else if (property == config->writeback_fb_id_property) {
- struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val);
- int ret = drm_atomic_set_writeback_fb_for_connector(state, fb);
- if (fb)
- drm_framebuffer_put(fb);
- return ret;
- } else if (property == config->writeback_out_fence_ptr_property) {
- s32 __user *fence_ptr = u64_to_user_ptr(val);
-
- return set_out_fence_for_connector(state->state, connector,
- fence_ptr);
- } else if (connector->funcs->atomic_set_property) {
- return connector->funcs->atomic_set_property(connector,
- state, property, val);
- } else {
- DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] unknown property [PROP:%d:%s]]\n",
- connector->base.id, connector->name,
- property->base.id, property->name);
- return -EINVAL;
- }
-
- return 0;
-}
-
static void drm_atomic_connector_print_state(struct drm_printer *p,
const struct drm_connector_state *state)
{
@@ -1446,360 +839,6 @@ static void drm_atomic_connector_print_state(struct drm_printer *p,
}
/**
- * drm_atomic_connector_get_property - get property value from connector state
- * @connector: the drm connector to set a property on
- * @state: the state object to get the property value from
- * @property: the property to set
- * @val: return location for the property value
- *
- * This function handles generic/core properties and calls out to driver's
- * &drm_connector_funcs.atomic_get_property for driver properties. To ensure
- * consistent behavior you must call this function rather than the driver hook
- * directly.
- *
- * RETURNS:
- * Zero on success, error code on failure
- */
-static int
-drm_atomic_connector_get_property(struct drm_connector *connector,
- const struct drm_connector_state *state,
- struct drm_property *property, uint64_t *val)
-{
- struct drm_device *dev = connector->dev;
- struct drm_mode_config *config = &dev->mode_config;
-
- if (property == config->prop_crtc_id) {
- *val = (state->crtc) ? state->crtc->base.id : 0;
- } else if (property == config->dpms_property) {
- *val = connector->dpms;
- } else if (property == config->tv_select_subconnector_property) {
- *val = state->tv.subconnector;
- } else if (property == config->tv_left_margin_property) {
- *val = state->tv.margins.left;
- } else if (property == config->tv_right_margin_property) {
- *val = state->tv.margins.right;
- } else if (property == config->tv_top_margin_property) {
- *val = state->tv.margins.top;
- } else if (property == config->tv_bottom_margin_property) {
- *val = state->tv.margins.bottom;
- } else if (property == config->tv_mode_property) {
- *val = state->tv.mode;
- } else if (property == config->tv_brightness_property) {
- *val = state->tv.brightness;
- } else if (property == config->tv_contrast_property) {
- *val = state->tv.contrast;
- } else if (property == config->tv_flicker_reduction_property) {
- *val = state->tv.flicker_reduction;
- } else if (property == config->tv_overscan_property) {
- *val = state->tv.overscan;
- } else if (property == config->tv_saturation_property) {
- *val = state->tv.saturation;
- } else if (property == config->tv_hue_property) {
- *val = state->tv.hue;
- } else if (property == config->link_status_property) {
- *val = state->link_status;
- } else if (property == config->aspect_ratio_property) {
- *val = state->picture_aspect_ratio;
- } else if (property == config->content_type_property) {
- *val = state->content_type;
- } else if (property == connector->scaling_mode_property) {
- *val = state->scaling_mode;
- } else if (property == connector->content_protection_property) {
- *val = state->content_protection;
- } else if (property == config->writeback_fb_id_property) {
- /* Writeback framebuffer is one-shot, write and forget */
- *val = 0;
- } else if (property == config->writeback_out_fence_ptr_property) {
- *val = 0;
- } else if (connector->funcs->atomic_get_property) {
- return connector->funcs->atomic_get_property(connector,
- state, property, val);
- } else {
- return -EINVAL;
- }
-
- return 0;
-}
-
-int drm_atomic_get_property(struct drm_mode_object *obj,
- struct drm_property *property, uint64_t *val)
-{
- struct drm_device *dev = property->dev;
- int ret;
-
- switch (obj->type) {
- case DRM_MODE_OBJECT_CONNECTOR: {
- struct drm_connector *connector = obj_to_connector(obj);
- WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
- ret = drm_atomic_connector_get_property(connector,
- connector->state, property, val);
- break;
- }
- case DRM_MODE_OBJECT_CRTC: {
- struct drm_crtc *crtc = obj_to_crtc(obj);
- WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
- ret = drm_atomic_crtc_get_property(crtc,
- crtc->state, property, val);
- break;
- }
- case DRM_MODE_OBJECT_PLANE: {
- struct drm_plane *plane = obj_to_plane(obj);
- WARN_ON(!drm_modeset_is_locked(&plane->mutex));
- ret = drm_atomic_plane_get_property(plane,
- plane->state, property, val);
- break;
- }
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-/**
- * drm_atomic_set_crtc_for_plane - set crtc for plane
- * @plane_state: the plane whose incoming state to update
- * @crtc: crtc to use for the plane
- *
- * Changing the assigned crtc for a plane requires us to grab the lock and state
- * for the new crtc, as needed. This function takes care of all these details
- * besides updating the pointer in the state object itself.
- *
- * Returns:
- * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
- * then the w/w mutex code has detected a deadlock and the entire atomic
- * sequence must be restarted. All other errors are fatal.
- */
-int
-drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
- struct drm_crtc *crtc)
-{
- struct drm_plane *plane = plane_state->plane;
- struct drm_crtc_state *crtc_state;
- /* Nothing to do for same crtc*/
- if (plane_state->crtc == crtc)
- return 0;
- if (plane_state->crtc) {
- crtc_state = drm_atomic_get_crtc_state(plane_state->state,
- plane_state->crtc);
- if (WARN_ON(IS_ERR(crtc_state)))
- return PTR_ERR(crtc_state);
-
- crtc_state->plane_mask &= ~drm_plane_mask(plane);
- }
-
- plane_state->crtc = crtc;
-
- if (crtc) {
- crtc_state = drm_atomic_get_crtc_state(plane_state->state,
- crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
- crtc_state->plane_mask |= drm_plane_mask(plane);
- }
-
- if (crtc)
- DRM_DEBUG_ATOMIC("Link [PLANE:%d:%s] state %p to [CRTC:%d:%s]\n",
- plane->base.id, plane->name, plane_state,
- crtc->base.id, crtc->name);
- else
- DRM_DEBUG_ATOMIC("Link [PLANE:%d:%s] state %p to [NOCRTC]\n",
- plane->base.id, plane->name, plane_state);
-
- return 0;
-}
-EXPORT_SYMBOL(drm_atomic_set_crtc_for_plane);
-
-/**
- * drm_atomic_set_fb_for_plane - set framebuffer for plane
- * @plane_state: atomic state object for the plane
- * @fb: fb to use for the plane
- *
- * Changing the assigned framebuffer for a plane requires us to grab a reference
- * to the new fb and drop the reference to the old fb, if there is one. This
- * function takes care of all these details besides updating the pointer in the
- * state object itself.
- */
-void
-drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
- struct drm_framebuffer *fb)
-{
- struct drm_plane *plane = plane_state->plane;
-
- if (fb)
- DRM_DEBUG_ATOMIC("Set [FB:%d] for [PLANE:%d:%s] state %p\n",
- fb->base.id, plane->base.id, plane->name,
- plane_state);
- else
- DRM_DEBUG_ATOMIC("Set [NOFB] for [PLANE:%d:%s] state %p\n",
- plane->base.id, plane->name, plane_state);
-
- drm_framebuffer_assign(&plane_state->fb, fb);
-}
-EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
-
-/**
- * drm_atomic_set_fence_for_plane - set fence for plane
- * @plane_state: atomic state object for the plane
- * @fence: dma_fence to use for the plane
- *
- * Helper to setup the plane_state fence in case it is not set yet.
- * By using this drivers doesn't need to worry if the user choose
- * implicit or explicit fencing.
- *
- * This function will not set the fence to the state if it was set
- * via explicit fencing interfaces on the atomic ioctl. In that case it will
- * drop the reference to the fence as we are not storing it anywhere.
- * Otherwise, if &drm_plane_state.fence is not set this function we just set it
- * with the received implicit fence. In both cases this function consumes a
- * reference for @fence.
- *
- * This way explicit fencing can be used to overrule implicit fencing, which is
- * important to make explicit fencing use-cases work: One example is using one
- * buffer for 2 screens with different refresh rates. Implicit fencing will
- * clamp rendering to the refresh rate of the slower screen, whereas explicit
- * fence allows 2 independent render and display loops on a single buffer. If a
- * driver allows obeys both implicit and explicit fences for plane updates, then
- * it will break all the benefits of explicit fencing.
- */
-void
-drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
- struct dma_fence *fence)
-{
- if (plane_state->fence) {
- dma_fence_put(fence);
- return;
- }
-
- plane_state->fence = fence;
-}
-EXPORT_SYMBOL(drm_atomic_set_fence_for_plane);
-
-/**
- * drm_atomic_set_crtc_for_connector - set crtc for connector
- * @conn_state: atomic state object for the connector
- * @crtc: crtc to use for the connector
- *
- * Changing the assigned crtc for a connector requires us to grab the lock and
- * state for the new crtc, as needed. This function takes care of all these
- * details besides updating the pointer in the state object itself.
- *
- * Returns:
- * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
- * then the w/w mutex code has detected a deadlock and the entire atomic
- * sequence must be restarted. All other errors are fatal.
- */
-int
-drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
- struct drm_crtc *crtc)
-{
- struct drm_connector *connector = conn_state->connector;
- struct drm_crtc_state *crtc_state;
-
- if (conn_state->crtc == crtc)
- return 0;
-
- if (conn_state->crtc) {
- crtc_state = drm_atomic_get_new_crtc_state(conn_state->state,
- conn_state->crtc);
-
- crtc_state->connector_mask &=
- ~drm_connector_mask(conn_state->connector);
-
- drm_connector_put(conn_state->connector);
- conn_state->crtc = NULL;
- }
-
- if (crtc) {
- crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
-
- crtc_state->connector_mask |=
- drm_connector_mask(conn_state->connector);
-
- drm_connector_get(conn_state->connector);
- conn_state->crtc = crtc;
-
- DRM_DEBUG_ATOMIC("Link [CONNECTOR:%d:%s] state %p to [CRTC:%d:%s]\n",
- connector->base.id, connector->name,
- conn_state, crtc->base.id, crtc->name);
- } else {
- DRM_DEBUG_ATOMIC("Link [CONNECTOR:%d:%s] state %p to [NOCRTC]\n",
- connector->base.id, connector->name,
- conn_state);
- }
-
- return 0;
-}
-EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector);
-
-/*
- * drm_atomic_get_writeback_job - return or allocate a writeback job
- * @conn_state: Connector state to get the job for
- *
- * Writeback jobs have a different lifetime to the atomic state they are
- * associated with. This convenience function takes care of allocating a job
- * if there isn't yet one associated with the connector state, otherwise
- * it just returns the existing job.
- *
- * Returns: The writeback job for the given connector state
- */
-static struct drm_writeback_job *
-drm_atomic_get_writeback_job(struct drm_connector_state *conn_state)
-{
- WARN_ON(conn_state->connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
-
- if (!conn_state->writeback_job)
- conn_state->writeback_job =
- kzalloc(sizeof(*conn_state->writeback_job), GFP_KERNEL);
-
- return conn_state->writeback_job;
-}
-
-/**
- * drm_atomic_set_writeback_fb_for_connector - set writeback framebuffer
- * @conn_state: atomic state object for the connector
- * @fb: fb to use for the connector
- *
- * This is used to set the framebuffer for a writeback connector, which outputs
- * to a buffer instead of an actual physical connector.
- * Changing the assigned framebuffer requires us to grab a reference to the new
- * fb and drop the reference to the old fb, if there is one. This function
- * takes care of all these details besides updating the pointer in the
- * state object itself.
- *
- * Note: The only way conn_state can already have an fb set is if the commit
- * sets the property more than once.
- *
- * See also: drm_writeback_connector_init()
- *
- * Returns: 0 on success
- */
-int drm_atomic_set_writeback_fb_for_connector(
- struct drm_connector_state *conn_state,
- struct drm_framebuffer *fb)
-{
- struct drm_writeback_job *job =
- drm_atomic_get_writeback_job(conn_state);
- if (!job)
- return -ENOMEM;
-
- drm_framebuffer_assign(&job->fb, fb);
-
- if (fb)
- DRM_DEBUG_ATOMIC("Set [FB:%d] for connector state %p\n",
- fb->base.id, conn_state);
- else
- DRM_DEBUG_ATOMIC("Set [NOFB] for connector state %p\n",
- conn_state);
-
- return 0;
-}
-EXPORT_SYMBOL(drm_atomic_set_writeback_fb_for_connector);
-
-/**
* drm_atomic_add_affected_connectors - add connectors for crtc
* @state: atomic state
* @crtc: DRM crtc
@@ -2035,7 +1074,7 @@ int drm_atomic_nonblocking_commit(struct drm_atomic_state *state)
}
EXPORT_SYMBOL(drm_atomic_nonblocking_commit);
-static void drm_atomic_print_state(const struct drm_atomic_state *state)
+void drm_atomic_print_state(const struct drm_atomic_state *state)
{
struct drm_printer p = drm_info_printer(state->dev->dev);
struct drm_plane *plane;
@@ -2142,544 +1181,3 @@ int drm_atomic_debugfs_init(struct drm_minor *minor)
}
#endif
-/*
- * The big monster ioctl
- */
-
-static struct drm_pending_vblank_event *create_vblank_event(
- struct drm_crtc *crtc, uint64_t user_data)
-{
- struct drm_pending_vblank_event *e = NULL;
-
- e = kzalloc(sizeof *e, GFP_KERNEL);
- if (!e)
- return NULL;
-
- e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
- e->event.base.length = sizeof(e->event);
- e->event.vbl.crtc_id = crtc->base.id;
- e->event.vbl.user_data = user_data;
-
- return e;
-}
-
-int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state,
- struct drm_connector *connector,
- int mode)
-{
- struct drm_connector *tmp_connector;
- struct drm_connector_state *new_conn_state;
- struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
- int i, ret, old_mode = connector->dpms;
- bool active = false;
-
- ret = drm_modeset_lock(&state->dev->mode_config.connection_mutex,
- state->acquire_ctx);
- if (ret)
- return ret;
-
- if (mode != DRM_MODE_DPMS_ON)
- mode = DRM_MODE_DPMS_OFF;
- connector->dpms = mode;
-
- crtc = connector->state->crtc;
- if (!crtc)
- goto out;
- ret = drm_atomic_add_affected_connectors(state, crtc);
- if (ret)
- goto out;
-
- crtc_state = drm_atomic_get_crtc_state(state, crtc);
- if (IS_ERR(crtc_state)) {
- ret = PTR_ERR(crtc_state);
- goto out;
- }
-
- for_each_new_connector_in_state(state, tmp_connector, new_conn_state, i) {
- if (new_conn_state->crtc != crtc)
- continue;
- if (tmp_connector->dpms == DRM_MODE_DPMS_ON) {
- active = true;
- break;
- }
- }
-
- crtc_state->active = active;
- ret = drm_atomic_commit(state);
-out:
- if (ret != 0)
- connector->dpms = old_mode;
- return ret;
-}
-
-int drm_atomic_set_property(struct drm_atomic_state *state,
- struct drm_mode_object *obj,
- struct drm_property *prop,
- uint64_t prop_value)
-{
- struct drm_mode_object *ref;
- int ret;
-
- if (!drm_property_change_valid_get(prop, prop_value, &ref))
- return -EINVAL;
-
- switch (obj->type) {
- case DRM_MODE_OBJECT_CONNECTOR: {
- struct drm_connector *connector = obj_to_connector(obj);
- struct drm_connector_state *connector_state;
-
- connector_state = drm_atomic_get_connector_state(state, connector);
- if (IS_ERR(connector_state)) {
- ret = PTR_ERR(connector_state);
- break;
- }
-
- ret = drm_atomic_connector_set_property(connector,
- connector_state, prop, prop_value);
- break;
- }
- case DRM_MODE_OBJECT_CRTC: {
- struct drm_crtc *crtc = obj_to_crtc(obj);
- struct drm_crtc_state *crtc_state;
-
- crtc_state = drm_atomic_get_crtc_state(state, crtc);
- if (IS_ERR(crtc_state)) {
- ret = PTR_ERR(crtc_state);
- break;
- }
-
- ret = drm_atomic_crtc_set_property(crtc,
- crtc_state, prop, prop_value);
- break;
- }
- case DRM_MODE_OBJECT_PLANE: {
- struct drm_plane *plane = obj_to_plane(obj);
- struct drm_plane_state *plane_state;
-
- plane_state = drm_atomic_get_plane_state(state, plane);
- if (IS_ERR(plane_state)) {
- ret = PTR_ERR(plane_state);
- break;
- }
-
- ret = drm_atomic_plane_set_property(plane,
- plane_state, prop, prop_value);
- break;
- }
- default:
- ret = -EINVAL;
- break;
- }
-
- drm_property_change_valid_put(prop, ref);
- return ret;
-}
-
-/**
- * DOC: explicit fencing properties
- *
- * Explicit fencing allows userspace to control the buffer synchronization
- * between devices. A Fence or a group of fences are transfered to/from
- * userspace using Sync File fds and there are two DRM properties for that.
- * IN_FENCE_FD on each DRM Plane to send fences to the kernel and
- * OUT_FENCE_PTR on each DRM CRTC to receive fences from the kernel.
- *
- * As a contrast, with implicit fencing the kernel keeps track of any
- * ongoing rendering, and automatically ensures that the atomic update waits
- * for any pending rendering to complete. For shared buffers represented with
- * a &struct dma_buf this is tracked in &struct reservation_object.
- * Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org),
- * whereas explicit fencing is what Android wants.
- *
- * "IN_FENCE_FD”:
- * Use this property to pass a fence that DRM should wait on before
- * proceeding with the Atomic Commit request and show the framebuffer for
- * the plane on the screen. The fence can be either a normal fence or a
- * merged one, the sync_file framework will handle both cases and use a
- * fence_array if a merged fence is received. Passing -1 here means no
- * fences to wait on.
- *
- * If the Atomic Commit request has the DRM_MODE_ATOMIC_TEST_ONLY flag
- * it will only check if the Sync File is a valid one.
- *
- * On the driver side the fence is stored on the @fence parameter of
- * &struct drm_plane_state. Drivers which also support implicit fencing
- * should set the implicit fence using drm_atomic_set_fence_for_plane(),
- * to make sure there's consistent behaviour between drivers in precedence
- * of implicit vs. explicit fencing.
- *
- * "OUT_FENCE_PTR”:
- * Use this property to pass a file descriptor pointer to DRM. Once the
- * Atomic Commit request call returns OUT_FENCE_PTR will be filled with
- * the file descriptor number of a Sync File. This Sync File contains the
- * CRTC fence that will be signaled when all framebuffers present on the
- * Atomic Commit * request for that given CRTC are scanned out on the
- * screen.
- *
- * The Atomic Commit request fails if a invalid pointer is passed. If the
- * Atomic Commit request fails for any other reason the out fence fd
- * returned will be -1. On a Atomic Commit with the
- * DRM_MODE_ATOMIC_TEST_ONLY flag the out fence will also be set to -1.
- *
- * Note that out-fences don't have a special interface to drivers and are
- * internally represented by a &struct drm_pending_vblank_event in struct
- * &drm_crtc_state, which is also used by the nonblocking atomic commit
- * helpers and for the DRM event handling for existing userspace.
- */
-
-struct drm_out_fence_state {
- s32 __user *out_fence_ptr;
- struct sync_file *sync_file;
- int fd;
-};
-
-static int setup_out_fence(struct drm_out_fence_state *fence_state,
- struct dma_fence *fence)
-{
- fence_state->fd = get_unused_fd_flags(O_CLOEXEC);
- if (fence_state->fd < 0)
- return fence_state->fd;
-
- if (put_user(fence_state->fd, fence_state->out_fence_ptr))
- return -EFAULT;
-
- fence_state->sync_file = sync_file_create(fence);
- if (!fence_state->sync_file)
- return -ENOMEM;
-
- return 0;
-}
-
-static int prepare_signaling(struct drm_device *dev,
- struct drm_atomic_state *state,
- struct drm_mode_atomic *arg,
- struct drm_file *file_priv,
- struct drm_out_fence_state **fence_state,
- unsigned int *num_fences)
-{
- struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
- struct drm_connector *conn;
- struct drm_connector_state *conn_state;
- int i, c = 0, ret;
-
- if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
- return 0;
-
- for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
- s32 __user *fence_ptr;
-
- fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc);
-
- if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT || fence_ptr) {
- struct drm_pending_vblank_event *e;
-
- e = create_vblank_event(crtc, arg->user_data);
- if (!e)
- return -ENOMEM;
-
- crtc_state->event = e;
- }
-
- if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
- struct drm_pending_vblank_event *e = crtc_state->event;
-
- if (!file_priv)
- continue;
-
- ret = drm_event_reserve_init(dev, file_priv, &e->base,
- &e->event.base);
- if (ret) {
- kfree(e);
- crtc_state->event = NULL;
- return ret;
- }
- }
-
- if (fence_ptr) {
- struct dma_fence *fence;
- struct drm_out_fence_state *f;
-
- f = krealloc(*fence_state, sizeof(**fence_state) *
- (*num_fences + 1), GFP_KERNEL);
- if (!f)
- return -ENOMEM;
-
- memset(&f[*num_fences], 0, sizeof(*f));
-
- f[*num_fences].out_fence_ptr = fence_ptr;
- *fence_state = f;
-
- fence = drm_crtc_create_fence(crtc);
- if (!fence)
- return -ENOMEM;
-
- ret = setup_out_fence(&f[(*num_fences)++], fence);
- if (ret) {
- dma_fence_put(fence);
- return ret;
- }
-
- crtc_state->event->base.fence = fence;
- }
-
- c++;
- }
-
- for_each_new_connector_in_state(state, conn, conn_state, i) {
- struct drm_writeback_connector *wb_conn;
- struct drm_writeback_job *job;
- struct drm_out_fence_state *f;
- struct dma_fence *fence;
- s32 __user *fence_ptr;
-
- fence_ptr = get_out_fence_for_connector(state, conn);
- if (!fence_ptr)
- continue;
-
- job = drm_atomic_get_writeback_job(conn_state);
- if (!job)
- return -ENOMEM;
-
- f = krealloc(*fence_state, sizeof(**fence_state) *
- (*num_fences + 1), GFP_KERNEL);
- if (!f)
- return -ENOMEM;
-
- memset(&f[*num_fences], 0, sizeof(*f));
-
- f[*num_fences].out_fence_ptr = fence_ptr;
- *fence_state = f;
-
- wb_conn = drm_connector_to_writeback(conn);
- fence = drm_writeback_get_out_fence(wb_conn);
- if (!fence)
- return -ENOMEM;
-
- ret = setup_out_fence(&f[(*num_fences)++], fence);
- if (ret) {
- dma_fence_put(fence);
- return ret;
- }
-
- job->out_fence = fence;
- }
-
- /*
- * Having this flag means user mode pends on event which will never
- * reach due to lack of at least one CRTC for signaling
- */
- if (c == 0 && (arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
- return -EINVAL;
-
- return 0;
-}
-
-static void complete_signaling(struct drm_device *dev,
- struct drm_atomic_state *state,
- struct drm_out_fence_state *fence_state,
- unsigned int num_fences,
- bool install_fds)
-{
- struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
- int i;
-
- if (install_fds) {
- for (i = 0; i < num_fences; i++)
- fd_install(fence_state[i].fd,
- fence_state[i].sync_file->file);
-
- kfree(fence_state);
- return;
- }
-
- for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
- struct drm_pending_vblank_event *event = crtc_state->event;
- /*
- * Free the allocated event. drm_atomic_helper_setup_commit
- * can allocate an event too, so only free it if it's ours
- * to prevent a double free in drm_atomic_state_clear.
- */
- if (event && (event->base.fence || event->base.file_priv)) {
- drm_event_cancel_free(dev, &event->base);
- crtc_state->event = NULL;
- }
- }
-
- if (!fence_state)
- return;
-
- for (i = 0; i < num_fences; i++) {
- if (fence_state[i].sync_file)
- fput(fence_state[i].sync_file->file);
- if (fence_state[i].fd >= 0)
- put_unused_fd(fence_state[i].fd);
-
- /* If this fails log error to the user */
- if (fence_state[i].out_fence_ptr &&
- put_user(-1, fence_state[i].out_fence_ptr))
- DRM_DEBUG_ATOMIC("Couldn't clear out_fence_ptr\n");
- }
-
- kfree(fence_state);
-}
-
-int drm_mode_atomic_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
-{
- struct drm_mode_atomic *arg = data;
- uint32_t __user *objs_ptr = (uint32_t __user *)(unsigned long)(arg->objs_ptr);
- uint32_t __user *count_props_ptr = (uint32_t __user *)(unsigned long)(arg->count_props_ptr);
- uint32_t __user *props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr);
- uint64_t __user *prop_values_ptr = (uint64_t __user *)(unsigned long)(arg->prop_values_ptr);
- unsigned int copied_objs, copied_props;
- struct drm_atomic_state *state;
- struct drm_modeset_acquire_ctx ctx;
- struct drm_out_fence_state *fence_state;
- int ret = 0;
- unsigned int i, j, num_fences;
-
- /* disallow for drivers not supporting atomic: */
- if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
- return -EINVAL;
-
- /* disallow for userspace that has not enabled atomic cap (even
- * though this may be a bit overkill, since legacy userspace
- * wouldn't know how to call this ioctl)
- */
- if (!file_priv->atomic)
- return -EINVAL;
-
- if (arg->flags & ~DRM_MODE_ATOMIC_FLAGS)
- return -EINVAL;
-
- if (arg->reserved)
- return -EINVAL;
-
- if ((arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) &&
- !dev->mode_config.async_page_flip)
- return -EINVAL;
-
- /* can't test and expect an event at the same time. */
- if ((arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) &&
- (arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
- return -EINVAL;
-
- drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
-
- state = drm_atomic_state_alloc(dev);
- if (!state)
- return -ENOMEM;
-
- state->acquire_ctx = &ctx;
- state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
-
-retry:
- copied_objs = 0;
- copied_props = 0;
- fence_state = NULL;
- num_fences = 0;
-
- for (i = 0; i < arg->count_objs; i++) {
- uint32_t obj_id, count_props;
- struct drm_mode_object *obj;
-
- if (get_user(obj_id, objs_ptr + copied_objs)) {
- ret = -EFAULT;
- goto out;
- }
-
- obj = drm_mode_object_find(dev, file_priv, obj_id, DRM_MODE_OBJECT_ANY);
- if (!obj) {
- ret = -ENOENT;
- goto out;
- }
-
- if (!obj->properties) {
- drm_mode_object_put(obj);
- ret = -ENOENT;
- goto out;
- }
-
- if (get_user(count_props, count_props_ptr + copied_objs)) {
- drm_mode_object_put(obj);
- ret = -EFAULT;
- goto out;
- }
-
- copied_objs++;
-
- for (j = 0; j < count_props; j++) {
- uint32_t prop_id;
- uint64_t prop_value;
- struct drm_property *prop;
-
- if (get_user(prop_id, props_ptr + copied_props)) {
- drm_mode_object_put(obj);
- ret = -EFAULT;
- goto out;
- }
-
- prop = drm_mode_obj_find_prop_id(obj, prop_id);
- if (!prop) {
- drm_mode_object_put(obj);
- ret = -ENOENT;
- goto out;
- }
-
- if (copy_from_user(&prop_value,
- prop_values_ptr + copied_props,
- sizeof(prop_value))) {
- drm_mode_object_put(obj);
- ret = -EFAULT;
- goto out;
- }
-
- ret = drm_atomic_set_property(state, obj, prop,
- prop_value);
- if (ret) {
- drm_mode_object_put(obj);
- goto out;
- }
-
- copied_props++;
- }
-
- drm_mode_object_put(obj);
- }
-
- ret = prepare_signaling(dev, state, arg, file_priv, &fence_state,
- &num_fences);
- if (ret)
- goto out;
-
- if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) {
- ret = drm_atomic_check_only(state);
- } else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
- ret = drm_atomic_nonblocking_commit(state);
- } else {
- if (unlikely(drm_debug & DRM_UT_STATE))
- drm_atomic_print_state(state);
-
- ret = drm_atomic_commit(state);
- }
-
-out:
- complete_signaling(dev, state, fence_state, num_fences, !ret);
-
- if (ret == -EDEADLK) {
- drm_atomic_state_clear(state);
- ret = drm_modeset_backoff(&ctx);
- if (!ret)
- goto retry;
- }
-
- drm_atomic_state_put(state);
-
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
-
- return ret;
-}
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 80be74df7ba6..3cf1aa132778 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -27,6 +27,7 @@
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_uapi.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_atomic_helper.h>
@@ -3555,6 +3556,29 @@ void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
EXPORT_SYMBOL(drm_atomic_helper_crtc_destroy_state);
/**
+ * __drm_atomic_helper_plane_reset - resets planes state to default values
+ * @plane: plane object, must not be NULL
+ * @state: atomic plane state, must not be NULL
+ *
+ * Initializes plane state to default. This is useful for drivers that subclass
+ * the plane state.
+ */
+void __drm_atomic_helper_plane_reset(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ state->plane = plane;
+ state->rotation = DRM_MODE_ROTATE_0;
+
+ /* Reset the alpha value to fully opaque if it matters */
+ if (plane->alpha_property)
+ state->alpha = plane->alpha_property->values[1];
+ state->pixel_blend_mode = DRM_MODE_BLEND_PREMULTI;
+
+ plane->state = state;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_plane_reset);
+
+/**
* drm_atomic_helper_plane_reset - default &drm_plane_funcs.reset hook for planes
* @plane: drm plane
*
@@ -3568,15 +3592,8 @@ void drm_atomic_helper_plane_reset(struct drm_plane *plane)
kfree(plane->state);
plane->state = kzalloc(sizeof(*plane->state), GFP_KERNEL);
-
- if (plane->state) {
- plane->state->plane = plane;
- plane->state->rotation = DRM_MODE_ROTATE_0;
-
- /* Reset the alpha value to fully opaque if it matters */
- if (plane->alpha_property)
- plane->state->alpha = plane->alpha_property->values[1];
- }
+ if (plane->state)
+ __drm_atomic_helper_plane_reset(plane, plane->state);
}
EXPORT_SYMBOL(drm_atomic_helper_plane_reset);
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
new file mode 100644
index 000000000000..d5b7f315098c
--- /dev/null
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -0,0 +1,1393 @@
+/*
+ * Copyright (C) 2014 Red Hat
+ * Copyright (C) 2014 Intel Corp.
+ * Copyright (C) 2018 Intel Corp.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <[email protected]>
+ * Daniel Vetter <[email protected]>
+ */
+
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_print.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_writeback.h>
+#include <drm/drm_vblank.h>
+
+#include <linux/dma-fence.h>
+#include <linux/uaccess.h>
+#include <linux/sync_file.h>
+#include <linux/file.h>
+
+#include "drm_crtc_internal.h"
+
+/**
+ * DOC: overview
+ *
+ * This file contains the marshalling and demarshalling glue for the atomic UAPI
+ * in all it's form: The monster ATOMIC IOCTL itself, code for GET_PROPERTY and
+ * SET_PROPERTY IOCTls. Plus interface functions for compatibility helpers and
+ * drivers which have special needs to construct their own atomic updates, e.g.
+ * for load detect or similiar.
+ */
+
+/**
+ * drm_atomic_set_mode_for_crtc - set mode for CRTC
+ * @state: the CRTC whose incoming state to update
+ * @mode: kernel-internal mode to use for the CRTC, or NULL to disable
+ *
+ * Set a mode (originating from the kernel) on the desired CRTC state and update
+ * the enable property.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure. Cannot return -EDEADLK.
+ */
+int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
+ const struct drm_display_mode *mode)
+{
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_mode_modeinfo umode;
+
+ /* Early return for no change. */
+ if (mode && memcmp(&state->mode, mode, sizeof(*mode)) == 0)
+ return 0;
+
+ drm_property_blob_put(state->mode_blob);
+ state->mode_blob = NULL;
+
+ if (mode) {
+ drm_mode_convert_to_umode(&umode, mode);
+ state->mode_blob =
+ drm_property_create_blob(state->crtc->dev,
+ sizeof(umode),
+ &umode);
+ if (IS_ERR(state->mode_blob))
+ return PTR_ERR(state->mode_blob);
+
+ drm_mode_copy(&state->mode, mode);
+ state->enable = true;
+ DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
+ mode->name, crtc->base.id, crtc->name, state);
+ } else {
+ memset(&state->mode, 0, sizeof(state->mode));
+ state->enable = false;
+ DRM_DEBUG_ATOMIC("Set [NOMODE] for [CRTC:%d:%s] state %p\n",
+ crtc->base.id, crtc->name, state);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_atomic_set_mode_for_crtc);
+
+/**
+ * drm_atomic_set_mode_prop_for_crtc - set mode for CRTC
+ * @state: the CRTC whose incoming state to update
+ * @blob: pointer to blob property to use for mode
+ *
+ * Set a mode (originating from a blob property) on the desired CRTC state.
+ * This function will take a reference on the blob property for the CRTC state,
+ * and release the reference held on the state's existing mode property, if any
+ * was set.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure. Cannot return -EDEADLK.
+ */
+int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
+ struct drm_property_blob *blob)
+{
+ struct drm_crtc *crtc = state->crtc;
+
+ if (blob == state->mode_blob)
+ return 0;
+
+ drm_property_blob_put(state->mode_blob);
+ state->mode_blob = NULL;
+
+ memset(&state->mode, 0, sizeof(state->mode));
+
+ if (blob) {
+ int ret;
+
+ if (blob->length != sizeof(struct drm_mode_modeinfo)) {
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] bad mode blob length: %zu\n",
+ crtc->base.id, crtc->name,
+ blob->length);
+ return -EINVAL;
+ }
+
+ ret = drm_mode_convert_umode(crtc->dev,
+ &state->mode, blob->data);
+ if (ret) {
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] invalid mode (ret=%d, status=%s):\n",
+ crtc->base.id, crtc->name,
+ ret, drm_get_mode_status_name(state->mode.status));
+ drm_mode_debug_printmodeline(&state->mode);
+ return -EINVAL;
+ }
+
+ state->mode_blob = drm_property_blob_get(blob);
+ state->enable = true;
+ DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
+ state->mode.name, crtc->base.id, crtc->name,
+ state);
+ } else {
+ state->enable = false;
+ DRM_DEBUG_ATOMIC("Set [NOMODE] for [CRTC:%d:%s] state %p\n",
+ crtc->base.id, crtc->name, state);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc);
+
+/**
+ * drm_atomic_set_crtc_for_plane - set crtc for plane
+ * @plane_state: the plane whose incoming state to update
+ * @crtc: crtc to use for the plane
+ *
+ * Changing the assigned crtc for a plane requires us to grab the lock and state
+ * for the new crtc, as needed. This function takes care of all these details
+ * besides updating the pointer in the state object itself.
+ *
+ * Returns:
+ * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
+ * then the w/w mutex code has detected a deadlock and the entire atomic
+ * sequence must be restarted. All other errors are fatal.
+ */
+int
+drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
+ struct drm_crtc *crtc)
+{
+ struct drm_plane *plane = plane_state->plane;
+ struct drm_crtc_state *crtc_state;
+ /* Nothing to do for same crtc*/
+ if (plane_state->crtc == crtc)
+ return 0;
+ if (plane_state->crtc) {
+ crtc_state = drm_atomic_get_crtc_state(plane_state->state,
+ plane_state->crtc);
+ if (WARN_ON(IS_ERR(crtc_state)))
+ return PTR_ERR(crtc_state);
+
+ crtc_state->plane_mask &= ~drm_plane_mask(plane);
+ }
+
+ plane_state->crtc = crtc;
+
+ if (crtc) {
+ crtc_state = drm_atomic_get_crtc_state(plane_state->state,
+ crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+ crtc_state->plane_mask |= drm_plane_mask(plane);
+ }
+
+ if (crtc)
+ DRM_DEBUG_ATOMIC("Link [PLANE:%d:%s] state %p to [CRTC:%d:%s]\n",
+ plane->base.id, plane->name, plane_state,
+ crtc->base.id, crtc->name);
+ else
+ DRM_DEBUG_ATOMIC("Link [PLANE:%d:%s] state %p to [NOCRTC]\n",
+ plane->base.id, plane->name, plane_state);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_atomic_set_crtc_for_plane);
+
+/**
+ * drm_atomic_set_fb_for_plane - set framebuffer for plane
+ * @plane_state: atomic state object for the plane
+ * @fb: fb to use for the plane
+ *
+ * Changing the assigned framebuffer for a plane requires us to grab a reference
+ * to the new fb and drop the reference to the old fb, if there is one. This
+ * function takes care of all these details besides updating the pointer in the
+ * state object itself.
+ */
+void
+drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
+ struct drm_framebuffer *fb)
+{
+ struct drm_plane *plane = plane_state->plane;
+
+ if (fb)
+ DRM_DEBUG_ATOMIC("Set [FB:%d] for [PLANE:%d:%s] state %p\n",
+ fb->base.id, plane->base.id, plane->name,
+ plane_state);
+ else
+ DRM_DEBUG_ATOMIC("Set [NOFB] for [PLANE:%d:%s] state %p\n",
+ plane->base.id, plane->name, plane_state);
+
+ drm_framebuffer_assign(&plane_state->fb, fb);
+}
+EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
+
+/**
+ * drm_atomic_set_fence_for_plane - set fence for plane
+ * @plane_state: atomic state object for the plane
+ * @fence: dma_fence to use for the plane
+ *
+ * Helper to setup the plane_state fence in case it is not set yet.
+ * By using this drivers doesn't need to worry if the user choose
+ * implicit or explicit fencing.
+ *
+ * This function will not set the fence to the state if it was set
+ * via explicit fencing interfaces on the atomic ioctl. In that case it will
+ * drop the reference to the fence as we are not storing it anywhere.
+ * Otherwise, if &drm_plane_state.fence is not set this function we just set it
+ * with the received implicit fence. In both cases this function consumes a
+ * reference for @fence.
+ *
+ * This way explicit fencing can be used to overrule implicit fencing, which is
+ * important to make explicit fencing use-cases work: One example is using one
+ * buffer for 2 screens with different refresh rates. Implicit fencing will
+ * clamp rendering to the refresh rate of the slower screen, whereas explicit
+ * fence allows 2 independent render and display loops on a single buffer. If a
+ * driver allows obeys both implicit and explicit fences for plane updates, then
+ * it will break all the benefits of explicit fencing.
+ */
+void
+drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
+ struct dma_fence *fence)
+{
+ if (plane_state->fence) {
+ dma_fence_put(fence);
+ return;
+ }
+
+ plane_state->fence = fence;
+}
+EXPORT_SYMBOL(drm_atomic_set_fence_for_plane);
+
+/**
+ * drm_atomic_set_crtc_for_connector - set crtc for connector
+ * @conn_state: atomic state object for the connector
+ * @crtc: crtc to use for the connector
+ *
+ * Changing the assigned crtc for a connector requires us to grab the lock and
+ * state for the new crtc, as needed. This function takes care of all these
+ * details besides updating the pointer in the state object itself.
+ *
+ * Returns:
+ * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
+ * then the w/w mutex code has detected a deadlock and the entire atomic
+ * sequence must be restarted. All other errors are fatal.
+ */
+int
+drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
+ struct drm_crtc *crtc)
+{
+ struct drm_connector *connector = conn_state->connector;
+ struct drm_crtc_state *crtc_state;
+
+ if (conn_state->crtc == crtc)
+ return 0;
+
+ if (conn_state->crtc) {
+ crtc_state = drm_atomic_get_new_crtc_state(conn_state->state,
+ conn_state->crtc);
+
+ crtc_state->connector_mask &=
+ ~drm_connector_mask(conn_state->connector);
+
+ drm_connector_put(conn_state->connector);
+ conn_state->crtc = NULL;
+ }
+
+ if (crtc) {
+ crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ crtc_state->connector_mask |=
+ drm_connector_mask(conn_state->connector);
+
+ drm_connector_get(conn_state->connector);
+ conn_state->crtc = crtc;
+
+ DRM_DEBUG_ATOMIC("Link [CONNECTOR:%d:%s] state %p to [CRTC:%d:%s]\n",
+ connector->base.id, connector->name,
+ conn_state, crtc->base.id, crtc->name);
+ } else {
+ DRM_DEBUG_ATOMIC("Link [CONNECTOR:%d:%s] state %p to [NOCRTC]\n",
+ connector->base.id, connector->name,
+ conn_state);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector);
+
+static void set_out_fence_for_crtc(struct drm_atomic_state *state,
+ struct drm_crtc *crtc, s32 __user *fence_ptr)
+{
+ state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr;
+}
+
+static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
+ struct drm_crtc *crtc)
+{
+ s32 __user *fence_ptr;
+
+ fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr;
+ state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL;
+
+ return fence_ptr;
+}
+
+static int set_out_fence_for_connector(struct drm_atomic_state *state,
+ struct drm_connector *connector,
+ s32 __user *fence_ptr)
+{
+ unsigned int index = drm_connector_index(connector);
+
+ if (!fence_ptr)
+ return 0;
+
+ if (put_user(-1, fence_ptr))
+ return -EFAULT;
+
+ state->connectors[index].out_fence_ptr = fence_ptr;
+
+ return 0;
+}
+
+static s32 __user *get_out_fence_for_connector(struct drm_atomic_state *state,
+ struct drm_connector *connector)
+{
+ unsigned int index = drm_connector_index(connector);
+ s32 __user *fence_ptr;
+
+ fence_ptr = state->connectors[index].out_fence_ptr;
+ state->connectors[index].out_fence_ptr = NULL;
+
+ return fence_ptr;
+}
+
+static int
+drm_atomic_replace_property_blob_from_id(struct drm_device *dev,
+ struct drm_property_blob **blob,
+ uint64_t blob_id,
+ ssize_t expected_size,
+ ssize_t expected_elem_size,
+ bool *replaced)
+{
+ struct drm_property_blob *new_blob = NULL;
+
+ if (blob_id != 0) {
+ new_blob = drm_property_lookup_blob(dev, blob_id);
+ if (new_blob == NULL)
+ return -EINVAL;
+
+ if (expected_size > 0 &&
+ new_blob->length != expected_size) {
+ drm_property_blob_put(new_blob);
+ return -EINVAL;
+ }
+ if (expected_elem_size > 0 &&
+ new_blob->length % expected_elem_size != 0) {
+ drm_property_blob_put(new_blob);
+ return -EINVAL;
+ }
+ }
+
+ *replaced |= drm_property_replace_blob(blob, new_blob);
+ drm_property_blob_put(new_blob);
+
+ return 0;
+}
+
+static int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
+ struct drm_crtc_state *state, struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *config = &dev->mode_config;
+ bool replaced = false;
+ int ret;
+
+ if (property == config->prop_active)
+ state->active = val;
+ else if (property == config->prop_mode_id) {
+ struct drm_property_blob *mode =
+ drm_property_lookup_blob(dev, val);
+ ret = drm_atomic_set_mode_prop_for_crtc(state, mode);
+ drm_property_blob_put(mode);
+ return ret;
+ } else if (property == config->degamma_lut_property) {
+ ret = drm_atomic_replace_property_blob_from_id(dev,
+ &state->degamma_lut,
+ val,
+ -1, sizeof(struct drm_color_lut),
+ &replaced);
+ state->color_mgmt_changed |= replaced;
+ return ret;
+ } else if (property == config->ctm_property) {
+ ret = drm_atomic_replace_property_blob_from_id(dev,
+ &state->ctm,
+ val,
+ sizeof(struct drm_color_ctm), -1,
+ &replaced);
+ state->color_mgmt_changed |= replaced;
+ return ret;
+ } else if (property == config->gamma_lut_property) {
+ ret = drm_atomic_replace_property_blob_from_id(dev,
+ &state->gamma_lut,
+ val,
+ -1, sizeof(struct drm_color_lut),
+ &replaced);
+ state->color_mgmt_changed |= replaced;
+ return ret;
+ } else if (property == config->prop_out_fence_ptr) {
+ s32 __user *fence_ptr = u64_to_user_ptr(val);
+
+ if (!fence_ptr)
+ return 0;
+
+ if (put_user(-1, fence_ptr))
+ return -EFAULT;
+
+ set_out_fence_for_crtc(state->state, crtc, fence_ptr);
+ } else if (crtc->funcs->atomic_set_property) {
+ return crtc->funcs->atomic_set_property(crtc, state, property, val);
+ } else {
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] unknown property [PROP:%d:%s]]\n",
+ crtc->base.id, crtc->name,
+ property->base.id, property->name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+drm_atomic_crtc_get_property(struct drm_crtc *crtc,
+ const struct drm_crtc_state *state,
+ struct drm_property *property, uint64_t *val)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *config = &dev->mode_config;
+
+ if (property == config->prop_active)
+ *val = state->active;
+ else if (property == config->prop_mode_id)
+ *val = (state->mode_blob) ? state->mode_blob->base.id : 0;
+ else if (property == config->degamma_lut_property)
+ *val = (state->degamma_lut) ? state->degamma_lut->base.id : 0;
+ else if (property == config->ctm_property)
+ *val = (state->ctm) ? state->ctm->base.id : 0;
+ else if (property == config->gamma_lut_property)
+ *val = (state->gamma_lut) ? state->gamma_lut->base.id : 0;
+ else if (property == config->prop_out_fence_ptr)
+ *val = 0;
+ else if (crtc->funcs->atomic_get_property)
+ return crtc->funcs->atomic_get_property(crtc, state, property, val);
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int drm_atomic_plane_set_property(struct drm_plane *plane,
+ struct drm_plane_state *state, struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_mode_config *config = &dev->mode_config;
+
+ if (property == config->prop_fb_id) {
+ struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val);
+ drm_atomic_set_fb_for_plane(state, fb);
+ if (fb)
+ drm_framebuffer_put(fb);
+ } else if (property == config->prop_in_fence_fd) {
+ if (state->fence)
+ return -EINVAL;
+
+ if (U642I64(val) == -1)
+ return 0;
+
+ state->fence = sync_file_get_fence(val);
+ if (!state->fence)
+ return -EINVAL;
+
+ } else if (property == config->prop_crtc_id) {
+ struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val);
+ return drm_atomic_set_crtc_for_plane(state, crtc);
+ } else if (property == config->prop_crtc_x) {
+ state->crtc_x = U642I64(val);
+ } else if (property == config->prop_crtc_y) {
+ state->crtc_y = U642I64(val);
+ } else if (property == config->prop_crtc_w) {
+ state->crtc_w = val;
+ } else if (property == config->prop_crtc_h) {
+ state->crtc_h = val;
+ } else if (property == config->prop_src_x) {
+ state->src_x = val;
+ } else if (property == config->prop_src_y) {
+ state->src_y = val;
+ } else if (property == config->prop_src_w) {
+ state->src_w = val;
+ } else if (property == config->prop_src_h) {
+ state->src_h = val;
+ } else if (property == plane->alpha_property) {
+ state->alpha = val;
+ } else if (property == plane->blend_mode_property) {
+ state->pixel_blend_mode = val;
+ } else if (property == plane->rotation_property) {
+ if (!is_power_of_2(val & DRM_MODE_ROTATE_MASK)) {
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] bad rotation bitmask: 0x%llx\n",
+ plane->base.id, plane->name, val);
+ return -EINVAL;
+ }
+ state->rotation = val;
+ } else if (property == plane->zpos_property) {
+ state->zpos = val;
+ } else if (property == plane->color_encoding_property) {
+ state->color_encoding = val;
+ } else if (property == plane->color_range_property) {
+ state->color_range = val;
+ } else if (plane->funcs->atomic_set_property) {
+ return plane->funcs->atomic_set_property(plane, state,
+ property, val);
+ } else {
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] unknown property [PROP:%d:%s]]\n",
+ plane->base.id, plane->name,
+ property->base.id, property->name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+drm_atomic_plane_get_property(struct drm_plane *plane,
+ const struct drm_plane_state *state,
+ struct drm_property *property, uint64_t *val)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_mode_config *config = &dev->mode_config;
+
+ if (property == config->prop_fb_id) {
+ *val = (state->fb) ? state->fb->base.id : 0;
+ } else if (property == config->prop_in_fence_fd) {
+ *val = -1;
+ } else if (property == config->prop_crtc_id) {
+ *val = (state->crtc) ? state->crtc->base.id : 0;
+ } else if (property == config->prop_crtc_x) {
+ *val = I642U64(state->crtc_x);
+ } else if (property == config->prop_crtc_y) {
+ *val = I642U64(state->crtc_y);
+ } else if (property == config->prop_crtc_w) {
+ *val = state->crtc_w;
+ } else if (property == config->prop_crtc_h) {
+ *val = state->crtc_h;
+ } else if (property == config->prop_src_x) {
+ *val = state->src_x;
+ } else if (property == config->prop_src_y) {
+ *val = state->src_y;
+ } else if (property == config->prop_src_w) {
+ *val = state->src_w;
+ } else if (property == config->prop_src_h) {
+ *val = state->src_h;
+ } else if (property == plane->alpha_property) {
+ *val = state->alpha;
+ } else if (property == plane->blend_mode_property) {
+ *val = state->pixel_blend_mode;
+ } else if (property == plane->rotation_property) {
+ *val = state->rotation;
+ } else if (property == plane->zpos_property) {
+ *val = state->zpos;
+ } else if (property == plane->color_encoding_property) {
+ *val = state->color_encoding;
+ } else if (property == plane->color_range_property) {
+ *val = state->color_range;
+ } else if (plane->funcs->atomic_get_property) {
+ return plane->funcs->atomic_get_property(plane, state, property, val);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct drm_writeback_job *
+drm_atomic_get_writeback_job(struct drm_connector_state *conn_state)
+{
+ WARN_ON(conn_state->connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
+
+ if (!conn_state->writeback_job)
+ conn_state->writeback_job =
+ kzalloc(sizeof(*conn_state->writeback_job), GFP_KERNEL);
+
+ return conn_state->writeback_job;
+}
+
+static int drm_atomic_set_writeback_fb_for_connector(
+ struct drm_connector_state *conn_state,
+ struct drm_framebuffer *fb)
+{
+ struct drm_writeback_job *job =
+ drm_atomic_get_writeback_job(conn_state);
+ if (!job)
+ return -ENOMEM;
+
+ drm_framebuffer_assign(&job->fb, fb);
+
+ if (fb)
+ DRM_DEBUG_ATOMIC("Set [FB:%d] for connector state %p\n",
+ fb->base.id, conn_state);
+ else
+ DRM_DEBUG_ATOMIC("Set [NOFB] for connector state %p\n",
+ conn_state);
+
+ return 0;
+}
+
+static int drm_atomic_connector_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state, struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_mode_config *config = &dev->mode_config;
+
+ if (property == config->prop_crtc_id) {
+ struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val);
+ return drm_atomic_set_crtc_for_connector(state, crtc);
+ } else if (property == config->dpms_property) {
+ /* setting DPMS property requires special handling, which
+ * is done in legacy setprop path for us. Disallow (for
+ * now?) atomic writes to DPMS property:
+ */
+ return -EINVAL;
+ } else if (property == config->tv_select_subconnector_property) {
+ state->tv.subconnector = val;
+ } else if (property == config->tv_left_margin_property) {
+ state->tv.margins.left = val;
+ } else if (property == config->tv_right_margin_property) {
+ state->tv.margins.right = val;
+ } else if (property == config->tv_top_margin_property) {
+ state->tv.margins.top = val;
+ } else if (property == config->tv_bottom_margin_property) {
+ state->tv.margins.bottom = val;
+ } else if (property == config->tv_mode_property) {
+ state->tv.mode = val;
+ } else if (property == config->tv_brightness_property) {
+ state->tv.brightness = val;
+ } else if (property == config->tv_contrast_property) {
+ state->tv.contrast = val;
+ } else if (property == config->tv_flicker_reduction_property) {
+ state->tv.flicker_reduction = val;
+ } else if (property == config->tv_overscan_property) {
+ state->tv.overscan = val;
+ } else if (property == config->tv_saturation_property) {
+ state->tv.saturation = val;
+ } else if (property == config->tv_hue_property) {
+ state->tv.hue = val;
+ } else if (property == config->link_status_property) {
+ /* Never downgrade from GOOD to BAD on userspace's request here,
+ * only hw issues can do that.
+ *
+ * For an atomic property the userspace doesn't need to be able
+ * to understand all the properties, but needs to be able to
+ * restore the state it wants on VT switch. So if the userspace
+ * tries to change the link_status from GOOD to BAD, driver
+ * silently rejects it and returns a 0. This prevents userspace
+ * from accidently breaking the display when it restores the
+ * state.
+ */
+ if (state->link_status != DRM_LINK_STATUS_GOOD)
+ state->link_status = val;
+ } else if (property == config->aspect_ratio_property) {
+ state->picture_aspect_ratio = val;
+ } else if (property == config->content_type_property) {
+ state->content_type = val;
+ } else if (property == connector->scaling_mode_property) {
+ state->scaling_mode = val;
+ } else if (property == connector->content_protection_property) {
+ if (val == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
+ DRM_DEBUG_KMS("only drivers can set CP Enabled\n");
+ return -EINVAL;
+ }
+ state->content_protection = val;
+ } else if (property == config->writeback_fb_id_property) {
+ struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val);
+ int ret = drm_atomic_set_writeback_fb_for_connector(state, fb);
+ if (fb)
+ drm_framebuffer_put(fb);
+ return ret;
+ } else if (property == config->writeback_out_fence_ptr_property) {
+ s32 __user *fence_ptr = u64_to_user_ptr(val);
+
+ return set_out_fence_for_connector(state->state, connector,
+ fence_ptr);
+ } else if (connector->funcs->atomic_set_property) {
+ return connector->funcs->atomic_set_property(connector,
+ state, property, val);
+ } else {
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] unknown property [PROP:%d:%s]]\n",
+ connector->base.id, connector->name,
+ property->base.id, property->name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+drm_atomic_connector_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property, uint64_t *val)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_mode_config *config = &dev->mode_config;
+
+ if (property == config->prop_crtc_id) {
+ *val = (state->crtc) ? state->crtc->base.id : 0;
+ } else if (property == config->dpms_property) {
+ *val = connector->dpms;
+ } else if (property == config->tv_select_subconnector_property) {
+ *val = state->tv.subconnector;
+ } else if (property == config->tv_left_margin_property) {
+ *val = state->tv.margins.left;
+ } else if (property == config->tv_right_margin_property) {
+ *val = state->tv.margins.right;
+ } else if (property == config->tv_top_margin_property) {
+ *val = state->tv.margins.top;
+ } else if (property == config->tv_bottom_margin_property) {
+ *val = state->tv.margins.bottom;
+ } else if (property == config->tv_mode_property) {
+ *val = state->tv.mode;
+ } else if (property == config->tv_brightness_property) {
+ *val = state->tv.brightness;
+ } else if (property == config->tv_contrast_property) {
+ *val = state->tv.contrast;
+ } else if (property == config->tv_flicker_reduction_property) {
+ *val = state->tv.flicker_reduction;
+ } else if (property == config->tv_overscan_property) {
+ *val = state->tv.overscan;
+ } else if (property == config->tv_saturation_property) {
+ *val = state->tv.saturation;
+ } else if (property == config->tv_hue_property) {
+ *val = state->tv.hue;
+ } else if (property == config->link_status_property) {
+ *val = state->link_status;
+ } else if (property == config->aspect_ratio_property) {
+ *val = state->picture_aspect_ratio;
+ } else if (property == config->content_type_property) {
+ *val = state->content_type;
+ } else if (property == connector->scaling_mode_property) {
+ *val = state->scaling_mode;
+ } else if (property == connector->content_protection_property) {
+ *val = state->content_protection;
+ } else if (property == config->writeback_fb_id_property) {
+ /* Writeback framebuffer is one-shot, write and forget */
+ *val = 0;
+ } else if (property == config->writeback_out_fence_ptr_property) {
+ *val = 0;
+ } else if (connector->funcs->atomic_get_property) {
+ return connector->funcs->atomic_get_property(connector,
+ state, property, val);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int drm_atomic_get_property(struct drm_mode_object *obj,
+ struct drm_property *property, uint64_t *val)
+{
+ struct drm_device *dev = property->dev;
+ int ret;
+
+ switch (obj->type) {
+ case DRM_MODE_OBJECT_CONNECTOR: {
+ struct drm_connector *connector = obj_to_connector(obj);
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ ret = drm_atomic_connector_get_property(connector,
+ connector->state, property, val);
+ break;
+ }
+ case DRM_MODE_OBJECT_CRTC: {
+ struct drm_crtc *crtc = obj_to_crtc(obj);
+ WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+ ret = drm_atomic_crtc_get_property(crtc,
+ crtc->state, property, val);
+ break;
+ }
+ case DRM_MODE_OBJECT_PLANE: {
+ struct drm_plane *plane = obj_to_plane(obj);
+ WARN_ON(!drm_modeset_is_locked(&plane->mutex));
+ ret = drm_atomic_plane_get_property(plane,
+ plane->state, property, val);
+ break;
+ }
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * The big monster ioctl
+ */
+
+static struct drm_pending_vblank_event *create_vblank_event(
+ struct drm_crtc *crtc, uint64_t user_data)
+{
+ struct drm_pending_vblank_event *e = NULL;
+
+ e = kzalloc(sizeof *e, GFP_KERNEL);
+ if (!e)
+ return NULL;
+
+ e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
+ e->event.base.length = sizeof(e->event);
+ e->event.vbl.crtc_id = crtc->base.id;
+ e->event.vbl.user_data = user_data;
+
+ return e;
+}
+
+int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state,
+ struct drm_connector *connector,
+ int mode)
+{
+ struct drm_connector *tmp_connector;
+ struct drm_connector_state *new_conn_state;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ int i, ret, old_mode = connector->dpms;
+ bool active = false;
+
+ ret = drm_modeset_lock(&state->dev->mode_config.connection_mutex,
+ state->acquire_ctx);
+ if (ret)
+ return ret;
+
+ if (mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
+ connector->dpms = mode;
+
+ crtc = connector->state->crtc;
+ if (!crtc)
+ goto out;
+ ret = drm_atomic_add_affected_connectors(state, crtc);
+ if (ret)
+ goto out;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ goto out;
+ }
+
+ for_each_new_connector_in_state(state, tmp_connector, new_conn_state, i) {
+ if (new_conn_state->crtc != crtc)
+ continue;
+ if (tmp_connector->dpms == DRM_MODE_DPMS_ON) {
+ active = true;
+ break;
+ }
+ }
+
+ crtc_state->active = active;
+ ret = drm_atomic_commit(state);
+out:
+ if (ret != 0)
+ connector->dpms = old_mode;
+ return ret;
+}
+
+int drm_atomic_set_property(struct drm_atomic_state *state,
+ struct drm_mode_object *obj,
+ struct drm_property *prop,
+ uint64_t prop_value)
+{
+ struct drm_mode_object *ref;
+ int ret;
+
+ if (!drm_property_change_valid_get(prop, prop_value, &ref))
+ return -EINVAL;
+
+ switch (obj->type) {
+ case DRM_MODE_OBJECT_CONNECTOR: {
+ struct drm_connector *connector = obj_to_connector(obj);
+ struct drm_connector_state *connector_state;
+
+ connector_state = drm_atomic_get_connector_state(state, connector);
+ if (IS_ERR(connector_state)) {
+ ret = PTR_ERR(connector_state);
+ break;
+ }
+
+ ret = drm_atomic_connector_set_property(connector,
+ connector_state, prop, prop_value);
+ break;
+ }
+ case DRM_MODE_OBJECT_CRTC: {
+ struct drm_crtc *crtc = obj_to_crtc(obj);
+ struct drm_crtc_state *crtc_state;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ break;
+ }
+
+ ret = drm_atomic_crtc_set_property(crtc,
+ crtc_state, prop, prop_value);
+ break;
+ }
+ case DRM_MODE_OBJECT_PLANE: {
+ struct drm_plane *plane = obj_to_plane(obj);
+ struct drm_plane_state *plane_state;
+
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
+ break;
+ }
+
+ ret = drm_atomic_plane_set_property(plane,
+ plane_state, prop, prop_value);
+ break;
+ }
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ drm_property_change_valid_put(prop, ref);
+ return ret;
+}
+
+/**
+ * DOC: explicit fencing properties
+ *
+ * Explicit fencing allows userspace to control the buffer synchronization
+ * between devices. A Fence or a group of fences are transfered to/from
+ * userspace using Sync File fds and there are two DRM properties for that.
+ * IN_FENCE_FD on each DRM Plane to send fences to the kernel and
+ * OUT_FENCE_PTR on each DRM CRTC to receive fences from the kernel.
+ *
+ * As a contrast, with implicit fencing the kernel keeps track of any
+ * ongoing rendering, and automatically ensures that the atomic update waits
+ * for any pending rendering to complete. For shared buffers represented with
+ * a &struct dma_buf this is tracked in &struct reservation_object.
+ * Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org),
+ * whereas explicit fencing is what Android wants.
+ *
+ * "IN_FENCE_FD”:
+ * Use this property to pass a fence that DRM should wait on before
+ * proceeding with the Atomic Commit request and show the framebuffer for
+ * the plane on the screen. The fence can be either a normal fence or a
+ * merged one, the sync_file framework will handle both cases and use a
+ * fence_array if a merged fence is received. Passing -1 here means no
+ * fences to wait on.
+ *
+ * If the Atomic Commit request has the DRM_MODE_ATOMIC_TEST_ONLY flag
+ * it will only check if the Sync File is a valid one.
+ *
+ * On the driver side the fence is stored on the @fence parameter of
+ * &struct drm_plane_state. Drivers which also support implicit fencing
+ * should set the implicit fence using drm_atomic_set_fence_for_plane(),
+ * to make sure there's consistent behaviour between drivers in precedence
+ * of implicit vs. explicit fencing.
+ *
+ * "OUT_FENCE_PTR”:
+ * Use this property to pass a file descriptor pointer to DRM. Once the
+ * Atomic Commit request call returns OUT_FENCE_PTR will be filled with
+ * the file descriptor number of a Sync File. This Sync File contains the
+ * CRTC fence that will be signaled when all framebuffers present on the
+ * Atomic Commit * request for that given CRTC are scanned out on the
+ * screen.
+ *
+ * The Atomic Commit request fails if a invalid pointer is passed. If the
+ * Atomic Commit request fails for any other reason the out fence fd
+ * returned will be -1. On a Atomic Commit with the
+ * DRM_MODE_ATOMIC_TEST_ONLY flag the out fence will also be set to -1.
+ *
+ * Note that out-fences don't have a special interface to drivers and are
+ * internally represented by a &struct drm_pending_vblank_event in struct
+ * &drm_crtc_state, which is also used by the nonblocking atomic commit
+ * helpers and for the DRM event handling for existing userspace.
+ */
+
+struct drm_out_fence_state {
+ s32 __user *out_fence_ptr;
+ struct sync_file *sync_file;
+ int fd;
+};
+
+static int setup_out_fence(struct drm_out_fence_state *fence_state,
+ struct dma_fence *fence)
+{
+ fence_state->fd = get_unused_fd_flags(O_CLOEXEC);
+ if (fence_state->fd < 0)
+ return fence_state->fd;
+
+ if (put_user(fence_state->fd, fence_state->out_fence_ptr))
+ return -EFAULT;
+
+ fence_state->sync_file = sync_file_create(fence);
+ if (!fence_state->sync_file)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int prepare_signaling(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ struct drm_mode_atomic *arg,
+ struct drm_file *file_priv,
+ struct drm_out_fence_state **fence_state,
+ unsigned int *num_fences)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_connector *conn;
+ struct drm_connector_state *conn_state;
+ int i, c = 0, ret;
+
+ if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
+ return 0;
+
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ s32 __user *fence_ptr;
+
+ fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc);
+
+ if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT || fence_ptr) {
+ struct drm_pending_vblank_event *e;
+
+ e = create_vblank_event(crtc, arg->user_data);
+ if (!e)
+ return -ENOMEM;
+
+ crtc_state->event = e;
+ }
+
+ if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+ struct drm_pending_vblank_event *e = crtc_state->event;
+
+ if (!file_priv)
+ continue;
+
+ ret = drm_event_reserve_init(dev, file_priv, &e->base,
+ &e->event.base);
+ if (ret) {
+ kfree(e);
+ crtc_state->event = NULL;
+ return ret;
+ }
+ }
+
+ if (fence_ptr) {
+ struct dma_fence *fence;
+ struct drm_out_fence_state *f;
+
+ f = krealloc(*fence_state, sizeof(**fence_state) *
+ (*num_fences + 1), GFP_KERNEL);
+ if (!f)
+ return -ENOMEM;
+
+ memset(&f[*num_fences], 0, sizeof(*f));
+
+ f[*num_fences].out_fence_ptr = fence_ptr;
+ *fence_state = f;
+
+ fence = drm_crtc_create_fence(crtc);
+ if (!fence)
+ return -ENOMEM;
+
+ ret = setup_out_fence(&f[(*num_fences)++], fence);
+ if (ret) {
+ dma_fence_put(fence);
+ return ret;
+ }
+
+ crtc_state->event->base.fence = fence;
+ }
+
+ c++;
+ }
+
+ for_each_new_connector_in_state(state, conn, conn_state, i) {
+ struct drm_writeback_connector *wb_conn;
+ struct drm_writeback_job *job;
+ struct drm_out_fence_state *f;
+ struct dma_fence *fence;
+ s32 __user *fence_ptr;
+
+ fence_ptr = get_out_fence_for_connector(state, conn);
+ if (!fence_ptr)
+ continue;
+
+ job = drm_atomic_get_writeback_job(conn_state);
+ if (!job)
+ return -ENOMEM;
+
+ f = krealloc(*fence_state, sizeof(**fence_state) *
+ (*num_fences + 1), GFP_KERNEL);
+ if (!f)
+ return -ENOMEM;
+
+ memset(&f[*num_fences], 0, sizeof(*f));
+
+ f[*num_fences].out_fence_ptr = fence_ptr;
+ *fence_state = f;
+
+ wb_conn = drm_connector_to_writeback(conn);
+ fence = drm_writeback_get_out_fence(wb_conn);
+ if (!fence)
+ return -ENOMEM;
+
+ ret = setup_out_fence(&f[(*num_fences)++], fence);
+ if (ret) {
+ dma_fence_put(fence);
+ return ret;
+ }
+
+ job->out_fence = fence;
+ }
+
+ /*
+ * Having this flag means user mode pends on event which will never
+ * reach due to lack of at least one CRTC for signaling
+ */
+ if (c == 0 && (arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void complete_signaling(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ struct drm_out_fence_state *fence_state,
+ unsigned int num_fences,
+ bool install_fds)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ int i;
+
+ if (install_fds) {
+ for (i = 0; i < num_fences; i++)
+ fd_install(fence_state[i].fd,
+ fence_state[i].sync_file->file);
+
+ kfree(fence_state);
+ return;
+ }
+
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ struct drm_pending_vblank_event *event = crtc_state->event;
+ /*
+ * Free the allocated event. drm_atomic_helper_setup_commit
+ * can allocate an event too, so only free it if it's ours
+ * to prevent a double free in drm_atomic_state_clear.
+ */
+ if (event && (event->base.fence || event->base.file_priv)) {
+ drm_event_cancel_free(dev, &event->base);
+ crtc_state->event = NULL;
+ }
+ }
+
+ if (!fence_state)
+ return;
+
+ for (i = 0; i < num_fences; i++) {
+ if (fence_state[i].sync_file)
+ fput(fence_state[i].sync_file->file);
+ if (fence_state[i].fd >= 0)
+ put_unused_fd(fence_state[i].fd);
+
+ /* If this fails log error to the user */
+ if (fence_state[i].out_fence_ptr &&
+ put_user(-1, fence_state[i].out_fence_ptr))
+ DRM_DEBUG_ATOMIC("Couldn't clear out_fence_ptr\n");
+ }
+
+ kfree(fence_state);
+}
+
+int drm_mode_atomic_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_atomic *arg = data;
+ uint32_t __user *objs_ptr = (uint32_t __user *)(unsigned long)(arg->objs_ptr);
+ uint32_t __user *count_props_ptr = (uint32_t __user *)(unsigned long)(arg->count_props_ptr);
+ uint32_t __user *props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr);
+ uint64_t __user *prop_values_ptr = (uint64_t __user *)(unsigned long)(arg->prop_values_ptr);
+ unsigned int copied_objs, copied_props;
+ struct drm_atomic_state *state;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_out_fence_state *fence_state;
+ int ret = 0;
+ unsigned int i, j, num_fences;
+
+ /* disallow for drivers not supporting atomic: */
+ if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
+ return -EOPNOTSUPP;
+
+ /* disallow for userspace that has not enabled atomic cap (even
+ * though this may be a bit overkill, since legacy userspace
+ * wouldn't know how to call this ioctl)
+ */
+ if (!file_priv->atomic)
+ return -EINVAL;
+
+ if (arg->flags & ~DRM_MODE_ATOMIC_FLAGS)
+ return -EINVAL;
+
+ if (arg->reserved)
+ return -EINVAL;
+
+ if ((arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) &&
+ !dev->mode_config.async_page_flip)
+ return -EINVAL;
+
+ /* can't test and expect an event at the same time. */
+ if ((arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) &&
+ (arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
+ return -EINVAL;
+
+ drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
+
+ state = drm_atomic_state_alloc(dev);
+ if (!state)
+ return -ENOMEM;
+
+ state->acquire_ctx = &ctx;
+ state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
+
+retry:
+ copied_objs = 0;
+ copied_props = 0;
+ fence_state = NULL;
+ num_fences = 0;
+
+ for (i = 0; i < arg->count_objs; i++) {
+ uint32_t obj_id, count_props;
+ struct drm_mode_object *obj;
+
+ if (get_user(obj_id, objs_ptr + copied_objs)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ obj = drm_mode_object_find(dev, file_priv, obj_id, DRM_MODE_OBJECT_ANY);
+ if (!obj) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ if (!obj->properties) {
+ drm_mode_object_put(obj);
+ ret = -ENOENT;
+ goto out;
+ }
+
+ if (get_user(count_props, count_props_ptr + copied_objs)) {
+ drm_mode_object_put(obj);
+ ret = -EFAULT;
+ goto out;
+ }
+
+ copied_objs++;
+
+ for (j = 0; j < count_props; j++) {
+ uint32_t prop_id;
+ uint64_t prop_value;
+ struct drm_property *prop;
+
+ if (get_user(prop_id, props_ptr + copied_props)) {
+ drm_mode_object_put(obj);
+ ret = -EFAULT;
+ goto out;
+ }
+
+ prop = drm_mode_obj_find_prop_id(obj, prop_id);
+ if (!prop) {
+ drm_mode_object_put(obj);
+ ret = -ENOENT;
+ goto out;
+ }
+
+ if (copy_from_user(&prop_value,
+ prop_values_ptr + copied_props,
+ sizeof(prop_value))) {
+ drm_mode_object_put(obj);
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ret = drm_atomic_set_property(state, obj, prop,
+ prop_value);
+ if (ret) {
+ drm_mode_object_put(obj);
+ goto out;
+ }
+
+ copied_props++;
+ }
+
+ drm_mode_object_put(obj);
+ }
+
+ ret = prepare_signaling(dev, state, arg, file_priv, &fence_state,
+ &num_fences);
+ if (ret)
+ goto out;
+
+ if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) {
+ ret = drm_atomic_check_only(state);
+ } else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
+ ret = drm_atomic_nonblocking_commit(state);
+ } else {
+ if (unlikely(drm_debug & DRM_UT_STATE))
+ drm_atomic_print_state(state);
+
+ ret = drm_atomic_commit(state);
+ }
+
+out:
+ complete_signaling(dev, state, fence_state, num_fences, !ret);
+
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry;
+ }
+
+ drm_atomic_state_put(state);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
index a16a74d7e15e..0c78ca386cbe 100644
--- a/drivers/gpu/drm/drm_blend.c
+++ b/drivers/gpu/drm/drm_blend.c
@@ -101,12 +101,80 @@
* Without this property the rectangle is only scaled, but not rotated or
* reflected.
*
+ * Possbile values:
+ *
+ * "rotate-<degrees>":
+ * Signals that a drm plane is rotated <degrees> degrees in counter
+ * clockwise direction.
+ *
+ * "reflect-<axis>":
+ * Signals that the contents of a drm plane is reflected along the
+ * <axis> axis, in the same way as mirroring.
+ *
+ * reflect-x::
+ *
+ * |o | | o|
+ * | | -> | |
+ * | v| |v |
+ *
+ * reflect-y::
+ *
+ * |o | | ^|
+ * | | -> | |
+ * | v| |o |
+ *
* zpos:
* Z position is set up with drm_plane_create_zpos_immutable_property() and
* drm_plane_create_zpos_property(). It controls the visibility of overlapping
* planes. Without this property the primary plane is always below the cursor
* plane, and ordering between all other planes is undefined.
*
+ * pixel blend mode:
+ * Pixel blend mode is set up with drm_plane_create_blend_mode_property().
+ * It adds a blend mode for alpha blending equation selection, describing
+ * how the pixels from the current plane are composited with the
+ * background.
+ *
+ * Three alpha blending equations are defined:
+ *
+ * "None":
+ * Blend formula that ignores the pixel alpha::
+ *
+ * out.rgb = plane_alpha * fg.rgb +
+ * (1 - plane_alpha) * bg.rgb
+ *
+ * "Pre-multiplied":
+ * Blend formula that assumes the pixel color values
+ * have been already pre-multiplied with the alpha
+ * channel values::
+ *
+ * out.rgb = plane_alpha * fg.rgb +
+ * (1 - (plane_alpha * fg.alpha)) * bg.rgb
+ *
+ * "Coverage":
+ * Blend formula that assumes the pixel color values have not
+ * been pre-multiplied and will do so when blending them to the
+ * background color values::
+ *
+ * out.rgb = plane_alpha * fg.alpha * fg.rgb +
+ * (1 - (plane_alpha * fg.alpha)) * bg.rgb
+ *
+ * Using the following symbols:
+ *
+ * "fg.rgb":
+ * Each of the RGB component values from the plane's pixel
+ * "fg.alpha":
+ * Alpha component value from the plane's pixel. If the plane's
+ * pixel format has no alpha component, then this is assumed to be
+ * 1.0. In these cases, this property has no effect, as all three
+ * equations become equivalent.
+ * "bg.rgb":
+ * Each of the RGB component values from the background
+ * "plane_alpha":
+ * Plane alpha value set by the plane "alpha" property. If the
+ * plane does not expose the "alpha" property, then this is
+ * assumed to be 1.0
+ *
* Note that all the property extensions described here apply either to the
* plane or the CRTC (e.g. for the background color, which currently is not
* exposed and assumed to be black).
@@ -448,3 +516,80 @@ int drm_atomic_normalize_zpos(struct drm_device *dev,
return 0;
}
EXPORT_SYMBOL(drm_atomic_normalize_zpos);
+
+/**
+ * drm_plane_create_blend_mode_property - create a new blend mode property
+ * @plane: drm plane
+ * @supported_modes: bitmask of supported modes, must include
+ * BIT(DRM_MODE_BLEND_PREMULTI). Current DRM assumption is
+ * that alpha is premultiplied, and old userspace can break if
+ * the property defaults to anything else.
+ *
+ * This creates a new property describing the blend mode.
+ *
+ * The property exposed to userspace is an enumeration property (see
+ * drm_property_create_enum()) called "pixel blend mode" and has the
+ * following enumeration values:
+ *
+ * "None":
+ * Blend formula that ignores the pixel alpha.
+ *
+ * "Pre-multiplied":
+ * Blend formula that assumes the pixel color values have been already
+ * pre-multiplied with the alpha channel values.
+ *
+ * "Coverage":
+ * Blend formula that assumes the pixel color values have not been
+ * pre-multiplied and will do so when blending them to the background color
+ * values.
+ *
+ * RETURNS:
+ * Zero for success or -errno
+ */
+int drm_plane_create_blend_mode_property(struct drm_plane *plane,
+ unsigned int supported_modes)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_property *prop;
+ static const struct drm_prop_enum_list props[] = {
+ { DRM_MODE_BLEND_PIXEL_NONE, "None" },
+ { DRM_MODE_BLEND_PREMULTI, "Pre-multiplied" },
+ { DRM_MODE_BLEND_COVERAGE, "Coverage" },
+ };
+ unsigned int valid_mode_mask = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
+ BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE);
+ int i;
+
+ if (WARN_ON((supported_modes & ~valid_mode_mask) ||
+ ((supported_modes & BIT(DRM_MODE_BLEND_PREMULTI)) == 0)))
+ return -EINVAL;
+
+ prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
+ "pixel blend mode",
+ hweight32(supported_modes));
+ if (!prop)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(props); i++) {
+ int ret;
+
+ if (!(BIT(props[i].type) & supported_modes))
+ continue;
+
+ ret = drm_property_add_enum(prop, props[i].type,
+ props[i].name);
+
+ if (ret) {
+ drm_property_destroy(dev, prop);
+
+ return ret;
+ }
+ }
+
+ drm_object_attach_property(&plane->base, prop, DRM_MODE_BLEND_PREMULTI);
+ plane->blend_mode_property = prop;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_plane_create_blend_mode_property);
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index 1638bfe9627c..ba7025041e46 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -104,6 +104,10 @@ EXPORT_SYMBOL(drm_bridge_remove);
* If non-NULL the previous bridge must be already attached by a call to this
* function.
*
+ * Note that bridges attached to encoders are auto-detached during encoder
+ * cleanup in drm_encoder_cleanup(), so drm_bridge_attach() should generally
+ * *not* be balanced with a drm_bridge_detach() in driver code.
+ *
* RETURNS:
* Zero on success, error code on failure
*/
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index ba8cfe65c65b..7412acaf3cde 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -398,7 +398,7 @@ int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EINVAL;
+ return -EOPNOTSUPP;
err = drm_addmap_core(dev, map->offset, map->size, map->type,
map->flags, &maplist);
@@ -444,7 +444,7 @@ int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EINVAL;
+ return -EOPNOTSUPP;
idx = map->offset;
if (idx < 0)
@@ -596,7 +596,7 @@ int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EINVAL;
+ return -EOPNOTSUPP;
mutex_lock(&dev->struct_mutex);
list_for_each_entry(r_list, &dev->maplist, head) {
@@ -860,7 +860,7 @@ int drm_legacy_addbufs_pci(struct drm_device *dev,
struct drm_buf **temp_buflist;
if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
- return -EINVAL;
+ return -EOPNOTSUPP;
if (!dma)
return -EINVAL;
@@ -1064,7 +1064,7 @@ static int drm_legacy_addbufs_sg(struct drm_device *dev,
struct drm_buf **temp_buflist;
if (!drm_core_check_feature(dev, DRIVER_SG))
- return -EINVAL;
+ return -EOPNOTSUPP;
if (!dma)
return -EINVAL;
@@ -1221,10 +1221,10 @@ int drm_legacy_addbufs(struct drm_device *dev, void *data,
int ret;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EINVAL;
+ return -EOPNOTSUPP;
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
- return -EINVAL;
+ return -EOPNOTSUPP;
#if IS_ENABLED(CONFIG_AGP)
if (request->flags & _DRM_AGP_BUFFER)
@@ -1267,10 +1267,10 @@ int __drm_legacy_infobufs(struct drm_device *dev,
int count;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EINVAL;
+ return -EOPNOTSUPP;
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
- return -EINVAL;
+ return -EOPNOTSUPP;
if (!dma)
return -EINVAL;
@@ -1352,10 +1352,10 @@ int drm_legacy_markbufs(struct drm_device *dev, void *data,
struct drm_buf_entry *entry;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EINVAL;
+ return -EOPNOTSUPP;
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
- return -EINVAL;
+ return -EOPNOTSUPP;
if (!dma)
return -EINVAL;
@@ -1400,10 +1400,10 @@ int drm_legacy_freebufs(struct drm_device *dev, void *data,
struct drm_buf *buf;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EINVAL;
+ return -EOPNOTSUPP;
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
- return -EINVAL;
+ return -EOPNOTSUPP;
if (!dma)
return -EINVAL;
@@ -1455,10 +1455,10 @@ int __drm_legacy_mapbufs(struct drm_device *dev, void *data, int *p,
int i;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EINVAL;
+ return -EOPNOTSUPP;
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
- return -EINVAL;
+ return -EOPNOTSUPP;
if (!dma)
return -EINVAL;
@@ -1545,7 +1545,7 @@ int drm_legacy_dma_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EINVAL;
+ return -EOPNOTSUPP;
if (dev->driver->dma_ioctl)
return dev->driver->dma_ioctl(dev, data, file_priv);
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index baff50a4c234..17d9a64e885e 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -82,7 +82,7 @@ int drm_client_new(struct drm_device *dev, struct drm_client_dev *client,
if (!drm_core_check_feature(dev, DRIVER_MODESET) ||
!dev->driver->dumb_create || !dev->driver->gem_prime_vmap)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (funcs && !try_module_get(funcs->owner))
return -ENODEV;
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
index b97e2de2c029..581cc3788223 100644
--- a/drivers/gpu/drm/drm_color_mgmt.c
+++ b/drivers/gpu/drm/drm_color_mgmt.c
@@ -242,7 +242,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
+ return -EOPNOTSUPP;
crtc = drm_crtc_find(dev, file_priv, crtc_lut->crtc_id);
if (!crtc)
@@ -320,7 +320,7 @@ int drm_mode_gamma_get_ioctl(struct drm_device *dev,
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
+ return -EOPNOTSUPP;
crtc = drm_crtc_find(dev, file_priv, crtc_lut->crtc_id);
if (!crtc)
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 6011d769d50b..1e40e5decbe9 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -20,11 +20,15 @@
* OF THIS SOFTWARE.
*/
-#include <drm/drmP.h>
#include <drm/drm_connector.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
#include <drm/drm_utils.h>
+#include <drm/drm_print.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+
+#include <linux/uaccess.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
@@ -1721,7 +1725,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
LIST_HEAD(export_list);
if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
+ return -EOPNOTSUPP;
memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index f973d287696a..506663c69b0a 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -178,7 +178,7 @@ int drm_legacy_getsareactx(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EINVAL;
+ return -EOPNOTSUPP;
mutex_lock(&dev->struct_mutex);
@@ -226,7 +226,7 @@ int drm_legacy_setsareactx(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EINVAL;
+ return -EOPNOTSUPP;
mutex_lock(&dev->struct_mutex);
list_for_each_entry(r_list, &dev->maplist, head) {
@@ -330,7 +330,7 @@ int drm_legacy_resctx(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EINVAL;
+ return -EOPNOTSUPP;
if (res->count >= DRM_RESERVED_CONTEXTS) {
memset(&ctx, 0, sizeof(ctx));
@@ -364,7 +364,7 @@ int drm_legacy_addctx(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EINVAL;
+ return -EOPNOTSUPP;
ctx->handle = drm_legacy_ctxbitmap_next(dev);
if (ctx->handle == DRM_KERNEL_CONTEXT) {
@@ -411,7 +411,7 @@ int drm_legacy_getctx(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EINVAL;
+ return -EOPNOTSUPP;
/* This is 0, because we don't handle any context flags */
ctx->flags = 0;
@@ -437,7 +437,7 @@ int drm_legacy_switchctx(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EINVAL;
+ return -EOPNOTSUPP;
DRM_DEBUG("%d\n", ctx->handle);
return drm_context_switch(dev, dev->last_context, ctx->handle);
@@ -461,7 +461,7 @@ int drm_legacy_newctx(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EINVAL;
+ return -EOPNOTSUPP;
DRM_DEBUG("%d\n", ctx->handle);
drm_context_switch_complete(dev, file_priv, ctx->handle);
@@ -487,7 +487,7 @@ int drm_legacy_rmctx(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EINVAL;
+ return -EOPNOTSUPP;
DRM_DEBUG("%d\n", ctx->handle);
if (ctx->handle != DRM_KERNEL_CONTEXT) {
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index bae43938c8f6..5f488aa80bcd 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -34,7 +34,7 @@
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/dma-fence.h>
-#include <drm/drmP.h>
+#include <linux/uaccess.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
@@ -42,6 +42,9 @@
#include <drm/drm_atomic.h>
#include <drm/drm_auth.h>
#include <drm/drm_debugfs_crc.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_print.h>
+#include <drm/drm_file.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
@@ -402,7 +405,7 @@ int drm_mode_getcrtc(struct drm_device *dev,
struct drm_plane *plane;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
+ return -EOPNOTSUPP;
crtc = drm_crtc_find(dev, file_priv, crtc_resp->crtc_id);
if (!crtc)
@@ -577,7 +580,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
int i;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
+ return -EOPNOTSUPP;
/*
* Universal plane src offsets are only 16.16, prevent havoc for
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 5a84c3bc915d..ce75e9506e85 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -35,6 +35,7 @@
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_uapi.h>
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fourcc.h>
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index b61322763394..86893448f486 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -31,6 +31,14 @@
* and are not exported to drivers.
*/
+enum drm_mode_status;
+enum drm_connector_force;
+
+struct drm_display_mode;
+struct work_struct;
+struct drm_connector;
+struct drm_bridge;
+struct edid;
/* drm_crtc.c */
int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
@@ -174,6 +182,8 @@ void drm_fb_release(struct drm_file *file_priv);
int drm_mode_addfb(struct drm_device *dev, struct drm_mode_fb_cmd *or,
struct drm_file *file_priv);
+int drm_mode_addfb2(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
int drm_mode_rmfb(struct drm_device *dev, u32 fb_id,
struct drm_file *file_priv);
@@ -181,8 +191,8 @@ int drm_mode_rmfb(struct drm_device *dev, u32 fb_id,
/* IOCTL */
int drm_mode_addfb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
-int drm_mode_addfb2(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
+int drm_mode_addfb2_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
int drm_mode_rmfb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
int drm_mode_getfb(struct drm_device *dev,
@@ -196,6 +206,9 @@ struct drm_minor;
int drm_atomic_debugfs_init(struct drm_minor *minor);
#endif
+void drm_atomic_print_state(const struct drm_atomic_state *state);
+
+/* drm_atomic_uapi.c */
int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state,
struct drm_connector *connector,
int mode);
@@ -205,6 +218,8 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
uint64_t prop_value);
int drm_atomic_get_property(struct drm_mode_object *obj,
struct drm_property *property, uint64_t *val);
+
+/* IOCTL */
int drm_mode_atomic_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
index 99961192bf03..00e743153e94 100644
--- a/drivers/gpu/drm/drm_debugfs_crc.c
+++ b/drivers/gpu/drm/drm_debugfs_crc.c
@@ -68,8 +68,29 @@ static int crc_control_show(struct seq_file *m, void *data)
{
struct drm_crtc *crtc = m->private;
- seq_printf(m, "%s\n", crtc->crc.source);
+ if (crtc->funcs->get_crc_sources) {
+ size_t count;
+ const char *const *sources = crtc->funcs->get_crc_sources(crtc,
+ &count);
+ size_t values_cnt;
+ int i;
+
+ if (count == 0 || !sources)
+ goto out;
+
+ for (i = 0; i < count; i++)
+ if (!crtc->funcs->verify_crc_source(crtc, sources[i],
+ &values_cnt)) {
+ if (strcmp(sources[i], crtc->crc.source))
+ seq_printf(m, "%s\n", sources[i]);
+ else
+ seq_printf(m, "%s*\n", sources[i]);
+ }
+ }
+ return 0;
+out:
+ seq_printf(m, "%s*\n", crtc->crc.source);
return 0;
}
@@ -87,6 +108,8 @@ static ssize_t crc_control_write(struct file *file, const char __user *ubuf,
struct drm_crtc *crtc = m->private;
struct drm_crtc_crc *crc = &crtc->crc;
char *source;
+ size_t values_cnt;
+ int ret;
if (len == 0)
return 0;
@@ -104,6 +127,10 @@ static ssize_t crc_control_write(struct file *file, const char __user *ubuf,
if (source[len] == '\n')
source[len] = '\0';
+ ret = crtc->funcs->verify_crc_source(crtc, source, &values_cnt);
+ if (ret)
+ return ret;
+
spin_lock_irq(&crc->lock);
if (crc->opened) {
@@ -168,57 +195,41 @@ static int crtc_crc_open(struct inode *inode, struct file *filep)
return ret;
}
- spin_lock_irq(&crc->lock);
- if (!crc->opened)
- crc->opened = true;
- else
- ret = -EBUSY;
- spin_unlock_irq(&crc->lock);
-
+ ret = crtc->funcs->verify_crc_source(crtc, crc->source, &values_cnt);
if (ret)
return ret;
- ret = crtc->funcs->set_crc_source(crtc, crc->source, &values_cnt);
- if (ret)
- goto err;
-
- if (WARN_ON(values_cnt > DRM_MAX_CRC_NR)) {
- ret = -EINVAL;
- goto err_disable;
- }
+ if (WARN_ON(values_cnt > DRM_MAX_CRC_NR))
+ return -EINVAL;
- if (WARN_ON(values_cnt == 0)) {
- ret = -EINVAL;
- goto err_disable;
- }
+ if (WARN_ON(values_cnt == 0))
+ return -EINVAL;
entries = kcalloc(DRM_CRC_ENTRIES_NR, sizeof(*entries), GFP_KERNEL);
- if (!entries) {
- ret = -ENOMEM;
- goto err_disable;
- }
+ if (!entries)
+ return -ENOMEM;
spin_lock_irq(&crc->lock);
- crc->entries = entries;
- crc->values_cnt = values_cnt;
-
- /*
- * Only return once we got a first frame, so userspace doesn't have to
- * guess when this particular piece of HW will be ready to start
- * generating CRCs.
- */
- ret = wait_event_interruptible_lock_irq(crc->wq,
- crtc_crc_data_count(crc),
- crc->lock);
+ if (!crc->opened) {
+ crc->opened = true;
+ crc->entries = entries;
+ crc->values_cnt = values_cnt;
+ } else {
+ ret = -EBUSY;
+ }
spin_unlock_irq(&crc->lock);
+ if (ret) {
+ kfree(entries);
+ return ret;
+ }
+
+ ret = crtc->funcs->set_crc_source(crtc, crc->source);
if (ret)
- goto err_disable;
+ goto err;
return 0;
-err_disable:
- crtc->funcs->set_crc_source(crtc, NULL, &values_cnt);
err:
spin_lock_irq(&crc->lock);
crtc_crc_cleanup(crc);
@@ -230,9 +241,8 @@ static int crtc_crc_release(struct inode *inode, struct file *filep)
{
struct drm_crtc *crtc = filep->f_inode->i_private;
struct drm_crtc_crc *crc = &crtc->crc;
- size_t values_cnt;
- crtc->funcs->set_crc_source(crtc, NULL, &values_cnt);
+ crtc->funcs->set_crc_source(crtc, NULL);
spin_lock_irq(&crc->lock);
crtc_crc_cleanup(crc);
@@ -338,7 +348,7 @@ int drm_debugfs_crtc_crc_add(struct drm_crtc *crtc)
{
struct dentry *crc_ent, *ent;
- if (!crtc->funcs->set_crc_source)
+ if (!crtc->funcs->set_crc_source || !crtc->funcs->verify_crc_source)
return 0;
crc_ent = debugfs_create_dir("crc", crtc->debugfs_entry);
diff --git a/drivers/gpu/drm/drm_dp_cec.c b/drivers/gpu/drm/drm_dp_cec.c
index 988513346e9c..8a718f85079a 100644
--- a/drivers/gpu/drm/drm_dp_cec.c
+++ b/drivers/gpu/drm/drm_dp_cec.c
@@ -16,7 +16,9 @@
* here. Quite a few active (mini-)DP-to-HDMI or USB-C-to-HDMI adapters
* have a converter chip that supports CEC-Tunneling-over-AUX (usually the
* Parade PS176), but they do not wire up the CEC pin, thus making CEC
- * useless.
+ * useless. Note that MegaChips 2900-based adapters appear to have good
+ * support for CEC tunneling. Those adapters that I have tested using
+ * this chipset all have the CEC line connected.
*
* Sadly there is no way for this driver to know this. What happens is
* that a /dev/cecX device is created that is isolated and unable to see
@@ -238,6 +240,10 @@ void drm_dp_cec_irq(struct drm_dp_aux *aux)
u8 cec_irq;
int ret;
+ /* No transfer function was set, so not a DP connector */
+ if (!aux->transfer)
+ return;
+
mutex_lock(&aux->cec.lock);
if (!aux->cec.adap)
goto unlock;
@@ -293,6 +299,10 @@ void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid)
unsigned int num_las = 1;
u8 cap;
+ /* No transfer function was set, so not a DP connector */
+ if (!aux->transfer)
+ return;
+
#ifndef CONFIG_MEDIA_CEC_RC
/*
* CEC_CAP_RC is part of CEC_CAP_DEFAULTS, but it is stripped by
@@ -361,6 +371,10 @@ EXPORT_SYMBOL(drm_dp_cec_set_edid);
*/
void drm_dp_cec_unset_edid(struct drm_dp_aux *aux)
{
+ /* No transfer function was set, so not a DP connector */
+ if (!aux->transfer)
+ return;
+
cancel_delayed_work_sync(&aux->cec.unregister_work);
mutex_lock(&aux->cec.lock);
@@ -404,6 +418,8 @@ void drm_dp_cec_register_connector(struct drm_dp_aux *aux, const char *name,
struct device *parent)
{
WARN_ON(aux->cec.adap);
+ if (WARN_ON(!aux->transfer))
+ return;
aux->cec.name = name;
aux->cec.parent = parent;
INIT_DELAYED_WORK(&aux->cec.unregister_work,
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 0cccbcb2d03e..37c01b6076ec 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -850,7 +850,8 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
return ret;
case DP_AUX_I2C_REPLY_NACK:
- DRM_DEBUG_KMS("I2C nack (result=%d, size=%zu\n", ret, msg->size);
+ DRM_DEBUG_KMS("I2C nack (result=%d, size=%zu)\n",
+ ret, msg->size);
aux->i2c_nack_count++;
return -EREMOTEIO;
@@ -1256,15 +1257,22 @@ EXPORT_SYMBOL(drm_dp_stop_crc);
struct dpcd_quirk {
u8 oui[3];
+ u8 device_id[6];
bool is_branch;
u32 quirks;
};
#define OUI(first, second, third) { (first), (second), (third) }
+#define DEVICE_ID(first, second, third, fourth, fifth, sixth) \
+ { (first), (second), (third), (fourth), (fifth), (sixth) }
+
+#define DEVICE_ID_ANY DEVICE_ID(0, 0, 0, 0, 0, 0)
static const struct dpcd_quirk dpcd_quirk_list[] = {
/* Analogix 7737 needs reduced M and N at HBR2 link rates */
- { OUI(0x00, 0x22, 0xb9), true, BIT(DP_DPCD_QUIRK_LIMITED_M_N) },
+ { OUI(0x00, 0x22, 0xb9), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_CONSTANT_N) },
+ /* LG LP140WF6-SPM1 eDP panel */
+ { OUI(0x00, 0x22, 0xb9), DEVICE_ID('s', 'i', 'v', 'a', 'r', 'T'), false, BIT(DP_DPCD_QUIRK_CONSTANT_N) },
};
#undef OUI
@@ -1283,6 +1291,7 @@ drm_dp_get_quirks(const struct drm_dp_dpcd_ident *ident, bool is_branch)
const struct dpcd_quirk *quirk;
u32 quirks = 0;
int i;
+ u8 any_device[] = DEVICE_ID_ANY;
for (i = 0; i < ARRAY_SIZE(dpcd_quirk_list); i++) {
quirk = &dpcd_quirk_list[i];
@@ -1293,12 +1302,19 @@ drm_dp_get_quirks(const struct drm_dp_dpcd_ident *ident, bool is_branch)
if (memcmp(quirk->oui, ident->oui, sizeof(ident->oui)) != 0)
continue;
+ if (memcmp(quirk->device_id, any_device, sizeof(any_device)) != 0 &&
+ memcmp(quirk->device_id, ident->device_id, sizeof(ident->device_id)) != 0)
+ continue;
+
quirks |= quirk->quirks;
}
return quirks;
}
+#undef DEVICE_ID_ANY
+#undef DEVICE_ID
+
/**
* drm_dp_read_desc - read sink/branch descriptor from DPCD
* @aux: DisplayPort AUX channel
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 7780567aa669..5ff1d79b86c4 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -439,6 +439,7 @@ static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx
if (idx > raw->curlen)
goto fail_len;
repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
+ idx++;
if (idx > raw->curlen)
goto fail_len;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index ea4941da9b27..36e8e9cbec52 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -506,6 +506,9 @@ int drm_dev_init(struct drm_device *dev,
dev->dev = parent;
dev->driver = driver;
+ /* no per-device feature limits by default */
+ dev->driver_features = ~0u;
+
INIT_LIST_HEAD(&dev->filelist);
INIT_LIST_HEAD(&dev->filelist_internal);
INIT_LIST_HEAD(&dev->clientlist);
diff --git a/drivers/gpu/drm/drm_encoder.c b/drivers/gpu/drm/drm_encoder.c
index 273e1c59c54a..b694fb57eaa4 100644
--- a/drivers/gpu/drm/drm_encoder.c
+++ b/drivers/gpu/drm/drm_encoder.c
@@ -222,7 +222,7 @@ int drm_mode_getencoder(struct drm_device *dev, void *data,
struct drm_crtc *crtc;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
+ return -EOPNOTSUPP;
encoder = drm_encoder_find(dev, file_priv, enc_resp->encoder_id);
if (!encoder)
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 9da36a6271d3..47e0e2f6642d 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -86,14 +86,21 @@ dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb,
{
struct drm_gem_cma_object *obj;
dma_addr_t paddr;
+ u8 h_div = 1, v_div = 1;
obj = drm_fb_cma_get_gem_obj(fb, plane);
if (!obj)
return 0;
paddr = obj->paddr + fb->offsets[plane];
- paddr += fb->format->cpp[plane] * (state->src_x >> 16);
- paddr += fb->pitches[plane] * (state->src_y >> 16);
+
+ if (plane > 0) {
+ h_div = fb->format->hsub;
+ v_div = fb->format->vsub;
+ }
+
+ paddr += (fb->format->cpp[plane] * (state->src_x >> 16)) / h_div;
+ paddr += (fb->pitches[plane] * (state->src_y >> 16)) / v_div;
return paddr;
}
@@ -219,21 +226,6 @@ void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma)
EXPORT_SYMBOL_GPL(drm_fbdev_cma_hotplug_event);
/**
- * drm_fbdev_cma_set_suspend - wrapper around drm_fb_helper_set_suspend
- * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
- * @state: desired state, zero to resume, non-zero to suspend
- *
- * Calls drm_fb_helper_set_suspend, which is a wrapper around
- * fb_set_suspend implemented by fbdev core.
- */
-void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, bool state)
-{
- if (fbdev_cma)
- drm_fb_helper_set_suspend(&fbdev_cma->fb_helper, state);
-}
-EXPORT_SYMBOL(drm_fbdev_cma_set_suspend);
-
-/**
* drm_fbdev_cma_set_suspend_unlocked - wrapper around
* drm_fb_helper_set_suspend_unlocked
* @fbdev_cma: The drm_fbdev_cma struct, may be NULL
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 4b0dd20bccb8..8e95d0f7c71d 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -2673,6 +2673,8 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper,
info = fb_helper->fbdev;
info->var.pixclock = 0;
+ /* don't leak any physical addresses to userspace */
+ info->flags |= FBINFO_HIDE_SMEM_START;
/* Need to drop locks to avoid recursive deadlock in
* register_framebuffer. This is ok because the only thing left to do is
@@ -2821,7 +2823,9 @@ EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
* The caller must to provide a &drm_fb_helper_funcs->fb_probe callback
* function.
*
- * See also: drm_fb_helper_initial_config()
+ * Use drm_fb_helper_fbdev_teardown() to destroy the fbdev.
+ *
+ * See also: drm_fb_helper_initial_config(), drm_fbdev_generic_setup().
*
* Returns:
* Zero on success or negative error code on failure.
@@ -3037,7 +3041,7 @@ static struct fb_deferred_io drm_fbdev_defio = {
* @fb_helper: fbdev helper structure
* @sizes: describes fbdev size and scanout surface size
*
- * This function uses the client API to crate a framebuffer backed by a dumb buffer.
+ * This function uses the client API to create a framebuffer backed by a dumb buffer.
*
* The _sys_ versions are used for &fb_ops.fb_read, fb_write, fb_fillrect,
* fb_copyarea, fb_imageblit.
@@ -3165,8 +3169,10 @@ static int drm_fbdev_client_hotplug(struct drm_client_dev *client)
if (dev->fb_helper)
return drm_fb_helper_hotplug_event(dev->fb_helper);
- if (!dev->mode_config.num_connector)
+ if (!dev->mode_config.num_connector) {
+ DRM_DEV_DEBUG(dev->dev, "No connectors found, will not create framebuffer!\n");
return 0;
+ }
ret = drm_fb_helper_fbdev_setup(dev, fb_helper, &drm_fb_helper_generic_funcs,
fb_helper->preferred_bpp, 0);
@@ -3187,13 +3193,14 @@ static const struct drm_client_funcs drm_fbdev_client_funcs = {
};
/**
- * drm_fb_helper_generic_fbdev_setup() - Setup generic fbdev emulation
+ * drm_fbdev_generic_setup() - Setup generic fbdev emulation
* @dev: DRM device
* @preferred_bpp: Preferred bits per pixel for the device.
* @dev->mode_config.preferred_depth is used if this is zero.
*
* This function sets up generic fbdev emulation for drivers that supports
- * dumb buffers with a virtual address and that can be mmap'ed.
+ * dumb buffers with a virtual address and that can be mmap'ed. If the driver
+ * does not support these functions, it could use drm_fb_helper_fbdev_setup().
*
* Restore, hotplug events and teardown are all taken care of. Drivers that do
* suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves.
@@ -3206,6 +3213,8 @@ static const struct drm_client_funcs drm_fbdev_client_funcs = {
* This function is safe to call even when there are no connectors present.
* Setup will be retried on the next hotplug event.
*
+ * The fbdev is destroyed by drm_dev_unregister().
+ *
* Returns:
* Zero on success or negative error code on failure.
*/
@@ -3214,6 +3223,8 @@ int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
struct drm_fb_helper *fb_helper;
int ret;
+ WARN(dev->fb_helper, "fb_helper is already set!\n");
+
if (!drm_fbdev_emulation)
return 0;
@@ -3224,12 +3235,15 @@ int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
ret = drm_client_new(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
if (ret) {
kfree(fb_helper);
+ DRM_DEV_ERROR(dev->dev, "Failed to register client: %d\n", ret);
return ret;
}
fb_helper->preferred_bpp = preferred_bpp;
- drm_fbdev_client_hotplug(&fb_helper->client);
+ ret = drm_fbdev_client_hotplug(&fb_helper->client);
+ if (ret)
+ DRM_DEV_DEBUG(dev->dev, "client hotplug ret=%d\n", ret);
return 0;
}
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index 35c1e2742c27..be1d6aaef651 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -45,32 +45,49 @@ static char printable_char(int c)
*/
uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
{
- uint32_t fmt;
+ uint32_t fmt = DRM_FORMAT_INVALID;
switch (bpp) {
case 8:
- fmt = DRM_FORMAT_C8;
+ if (depth == 8)
+ fmt = DRM_FORMAT_C8;
break;
+
case 16:
- if (depth == 15)
+ switch (depth) {
+ case 15:
fmt = DRM_FORMAT_XRGB1555;
- else
+ break;
+ case 16:
fmt = DRM_FORMAT_RGB565;
+ break;
+ default:
+ break;
+ }
break;
+
case 24:
- fmt = DRM_FORMAT_RGB888;
+ if (depth == 24)
+ fmt = DRM_FORMAT_RGB888;
break;
+
case 32:
- if (depth == 24)
+ switch (depth) {
+ case 24:
fmt = DRM_FORMAT_XRGB8888;
- else if (depth == 30)
+ break;
+ case 30:
fmt = DRM_FORMAT_XRGB2101010;
- else
+ break;
+ case 32:
fmt = DRM_FORMAT_ARGB8888;
+ break;
+ default:
+ break;
+ }
break;
+
default:
- DRM_ERROR("bad bpp, assuming x8r8g8b8 pixel format\n");
- fmt = DRM_FORMAT_XRGB8888;
break;
}
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 781af1d42d76..1ee3d6b44280 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -25,6 +25,7 @@
#include <drm/drm_auth.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_uapi.h>
#include <drm/drm_print.h>
#include "drm_internal.h"
@@ -112,18 +113,37 @@ int drm_mode_addfb(struct drm_device *dev, struct drm_mode_fb_cmd *or,
struct drm_mode_fb_cmd2 r = {};
int ret;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EOPNOTSUPP;
+
+ r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth);
+ if (r.pixel_format == DRM_FORMAT_INVALID) {
+ DRM_DEBUG("bad {bpp:%d, depth:%d}\n", or->bpp, or->depth);
+ return -EINVAL;
+ }
+
/* convert to new format and call new ioctl */
r.fb_id = or->fb_id;
r.width = or->width;
r.height = or->height;
r.pitches[0] = or->pitch;
- r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth);
r.handles[0] = or->handle;
- if (r.pixel_format == DRM_FORMAT_XRGB2101010 &&
- dev->driver->driver_features & DRIVER_PREFER_XBGR_30BPP)
+ if (dev->mode_config.quirk_addfb_prefer_xbgr_30bpp &&
+ r.pixel_format == DRM_FORMAT_XRGB2101010)
r.pixel_format = DRM_FORMAT_XBGR2101010;
+ if (dev->mode_config.quirk_addfb_prefer_host_byte_order) {
+ if (r.pixel_format == DRM_FORMAT_XRGB8888)
+ r.pixel_format = DRM_FORMAT_HOST_XRGB8888;
+ if (r.pixel_format == DRM_FORMAT_ARGB8888)
+ r.pixel_format = DRM_FORMAT_HOST_ARGB8888;
+ if (r.pixel_format == DRM_FORMAT_RGB565)
+ r.pixel_format = DRM_FORMAT_HOST_RGB565;
+ if (r.pixel_format == DRM_FORMAT_XRGB1555)
+ r.pixel_format = DRM_FORMAT_HOST_XRGB1555;
+ }
+
ret = drm_mode_addfb2(dev, &r, file_priv);
if (ret)
return ret;
@@ -164,7 +184,7 @@ static int framebuffer_check(struct drm_device *dev,
int i;
/* check if the format is supported at all */
- info = __drm_format_info(r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN);
+ info = __drm_format_info(r->pixel_format);
if (!info) {
struct drm_format_name_buf format_name;
@@ -335,7 +355,7 @@ int drm_mode_addfb2(struct drm_device *dev,
struct drm_framebuffer *fb;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
+ return -EOPNOTSUPP;
fb = drm_internal_framebuffer_create(dev, r, file_priv);
if (IS_ERR(fb))
@@ -352,6 +372,30 @@ int drm_mode_addfb2(struct drm_device *dev,
return 0;
}
+int drm_mode_addfb2_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+#ifdef __BIG_ENDIAN
+ if (!dev->mode_config.quirk_addfb_prefer_host_byte_order) {
+ /*
+ * Drivers must set the
+ * quirk_addfb_prefer_host_byte_order quirk to make
+ * the drm_mode_addfb() compat code work correctly on
+ * bigendian machines.
+ *
+ * If they don't they interpret pixel_format values
+ * incorrectly for bug compatibility, which in turn
+ * implies the ADDFB2 ioctl does not work correctly
+ * then. So block it to make userspace fallback to
+ * ADDFB.
+ */
+ DRM_DEBUG_KMS("addfb2 broken on bigendian");
+ return -EOPNOTSUPP;
+ }
+#endif
+ return drm_mode_addfb2(dev, data, file_priv);
+}
+
struct drm_mode_rmfb_work {
struct work_struct work;
struct list_head fbs;
@@ -391,7 +435,7 @@ int drm_mode_rmfb(struct drm_device *dev, u32 fb_id,
int found = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
+ return -EOPNOTSUPP;
fb = drm_framebuffer_lookup(dev, file_priv, fb_id);
if (!fb)
@@ -468,7 +512,7 @@ int drm_mode_getfb(struct drm_device *dev,
int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
+ return -EOPNOTSUPP;
fb = drm_framebuffer_lookup(dev, file_priv, r->fb_id);
if (!fb)
@@ -541,7 +585,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
+ return -EOPNOTSUPP;
fb = drm_framebuffer_lookup(dev, file_priv, r->fb_id);
if (!fb)
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index bf90625df3c5..512078ebd97b 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -667,7 +667,7 @@ drm_gem_close_ioctl(struct drm_device *dev, void *data,
int ret;
if (!drm_core_check_feature(dev, DRIVER_GEM))
- return -ENODEV;
+ return -EOPNOTSUPP;
ret = drm_gem_handle_delete(file_priv, args->handle);
@@ -694,7 +694,7 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
int ret;
if (!drm_core_check_feature(dev, DRIVER_GEM))
- return -ENODEV;
+ return -EOPNOTSUPP;
obj = drm_gem_object_lookup(file_priv, args->handle);
if (obj == NULL)
@@ -745,7 +745,7 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
u32 handle;
if (!drm_core_check_feature(dev, DRIVER_GEM))
- return -ENODEV;
+ return -EOPNOTSUPP;
mutex_lock(&dev->object_name_lock);
obj = idr_find(&dev->object_name_idr, (int) args->name);
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 80a5115c3846..1d2ced882b66 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -436,7 +436,7 @@ struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj)
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt)
- return NULL;
+ return ERR_PTR(-ENOMEM);
ret = dma_get_sgtable(obj->dev->dev, sgt, cma_obj->vaddr,
cma_obj->paddr, obj->size);
@@ -447,7 +447,7 @@ struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj)
out:
kfree(sgt);
- return NULL;
+ return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(drm_gem_cma_prime_get_sg_table);
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index 2810d4131411..7607f9cd6f77 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -16,6 +16,7 @@
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_uapi.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 40179c5fc6b8..0c4eb4a9ab31 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -21,9 +21,14 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <drm/drm_ioctl.h>
+
#define DRM_IF_MAJOR 1
#define DRM_IF_MINOR 4
+struct drm_prime_file_private;
+struct dma_buf;
+
/* drm_file.c */
extern struct mutex drm_global_mutex;
struct drm_file *drm_file_alloc(struct drm_minor *minor);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index ea10e9a26aad..60dfbfae6a02 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -248,7 +248,7 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_
/* Other caps only work with KMS drivers */
if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
switch (req->capability) {
case DRM_CAP_DUMB_BUFFER:
@@ -319,7 +319,7 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
break;
case DRM_CLIENT_CAP_ATOMIC:
if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
- return -EINVAL;
+ return -EOPNOTSUPP;
if (req->value > 1)
return -EINVAL;
file_priv->atomic = req->value;
@@ -645,7 +645,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_UNLOCKED),
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 3b04c25100ae..45a07652fa00 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -104,7 +104,7 @@ int drm_irq_install(struct drm_device *dev, int irq)
unsigned long sh_flags = 0;
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
- return -EINVAL;
+ return -EOPNOTSUPP;
if (irq == 0)
return -EINVAL;
@@ -175,7 +175,7 @@ int drm_irq_uninstall(struct drm_device *dev)
int i;
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
- return -EINVAL;
+ return -EOPNOTSUPP;
irq_enabled = dev->irq_enabled;
dev->irq_enabled = false;
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
index b54fb78a283c..5894b4e144d7 100644
--- a/drivers/gpu/drm/drm_lease.c
+++ b/drivers/gpu/drm/drm_lease.c
@@ -506,7 +506,7 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
/* Can't lease without MODESET */
if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
+ return -EOPNOTSUPP;
/* Do not allow sub-leases */
if (lessor->lessor)
@@ -615,7 +615,7 @@ int drm_mode_list_lessees_ioctl(struct drm_device *dev,
/* Can't lease without MODESET */
if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
+ return -EOPNOTSUPP;
DRM_DEBUG_LEASE("List lessees for %d\n", lessor->lessee_id);
@@ -671,7 +671,7 @@ int drm_mode_get_lease_ioctl(struct drm_device *dev,
/* Can't lease without MODESET */
if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
+ return -EOPNOTSUPP;
DRM_DEBUG_LEASE("get lease for %d\n", lessee->lessee_id);
@@ -726,7 +726,7 @@ int drm_mode_revoke_lease_ioctl(struct drm_device *dev,
/* Can't lease without MODESET */
if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
+ return -EOPNOTSUPP;
mutex_lock(&dev->mode_config.idr_mutex);
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index 96bb6badb818..67a1a2ca7174 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -166,7 +166,7 @@ int drm_legacy_lock(struct drm_device *dev, void *data,
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EINVAL;
+ return -EOPNOTSUPP;
++file_priv->lock_count;
@@ -256,7 +256,7 @@ int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_
struct drm_master *master = file_priv->master;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EINVAL;
+ return -EOPNOTSUPP;
if (lock->context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 21e353bd3948..ee80788f2c40 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -97,8 +97,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
struct drm_connector_list_iter conn_iter;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
+ return -EOPNOTSUPP;
mutex_lock(&file_priv->fbs_lock);
count = 0;
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
index fcb0ab0abb75..be8b754eaf60 100644
--- a/drivers/gpu/drm/drm_mode_object.c
+++ b/drivers/gpu/drm/drm_mode_object.c
@@ -381,7 +381,7 @@ int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
+ return -EOPNOTSUPP;
drm_modeset_lock_all(dev);
@@ -504,7 +504,7 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
int ret = -EINVAL;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
+ return -EOPNOTSUPP;
arg_obj = drm_mode_object_find(dev, file_priv, arg->obj_id, arg->obj_type);
if (!arg_obj)
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index b902361dee6e..0db486d10d1c 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -152,7 +152,9 @@ EXPORT_SYMBOL(drm_panel_detach);
*
* Return: A pointer to the panel registered for the specified device tree
* node or an ERR_PTR() if no panel matching the device tree node can be found.
+ *
* Possible error codes returned by this function:
+ *
* - EPROBE_DEFER: the panel device has not been probed yet, and the caller
* should retry later
* - ENODEV: the device is not available (status != "okay" or "ok")
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 896e42a34895..48f615d38931 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -182,14 +182,14 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
struct drm_irq_busid *p = data;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EINVAL;
+ return -EOPNOTSUPP;
/* UMS was only ever support on PCI devices. */
if (WARN_ON(!dev->pdev))
return -EINVAL;
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
- return -EINVAL;
+ return -EOPNOTSUPP;
return drm_pci_irq_by_busid(dev, p);
}
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 6153cbda239f..1fa98bd12003 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -20,8 +20,17 @@
* OF THIS SOFTWARE.
*/
-#include <drm/drmP.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
#include <drm/drm_plane.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_print.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_file.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_vblank.h>
#include "drm_crtc_internal.h"
@@ -463,15 +472,13 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_get_plane_res *plane_resp = data;
- struct drm_mode_config *config;
struct drm_plane *plane;
uint32_t __user *plane_ptr;
int count = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
+ return -EOPNOTSUPP;
- config = &dev->mode_config;
plane_ptr = u64_to_user_ptr(plane_resp->plane_id_ptr);
/*
@@ -507,7 +514,7 @@ int drm_mode_getplane(struct drm_device *dev, void *data,
uint32_t __user *format_ptr;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
+ return -EOPNOTSUPP;
plane = drm_plane_find(dev, file_priv, plane_resp->plane_id);
if (!plane)
@@ -774,7 +781,7 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
+ return -EOPNOTSUPP;
/*
* First, find the plane, crtc, and fb objects. If not available,
@@ -912,7 +919,7 @@ static int drm_mode_cursor_common(struct drm_device *dev,
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
+ return -EOPNOTSUPP;
if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags))
return -EINVAL;
@@ -1016,7 +1023,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
int ret = -EINVAL;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
+ return -EOPNOTSUPP;
if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS)
return -EINVAL;
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 621f17643bb0..a393756b664e 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -28,6 +28,7 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_rect.h>
#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_uapi.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_encoder.h>
#include <drm/drm_atomic_helper.h>
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 186db2e4c57a..3f0205fc0a1a 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -844,7 +844,7 @@ int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
struct drm_prime_handle *args = data;
if (!drm_core_check_feature(dev, DRIVER_PRIME))
- return -EINVAL;
+ return -EOPNOTSUPP;
if (!dev->driver->prime_handle_to_fd)
return -ENOSYS;
@@ -863,7 +863,7 @@ int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
struct drm_prime_handle *args = data;
if (!drm_core_check_feature(dev, DRIVER_PRIME))
- return -EINVAL;
+ return -EOPNOTSUPP;
if (!dev->driver->prime_fd_to_handle)
return -ENOSYS;
diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c
index cdb10f885a4f..79c77c3cad86 100644
--- a/drivers/gpu/drm/drm_property.c
+++ b/drivers/gpu/drm/drm_property.c
@@ -464,7 +464,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
uint64_t __user *values_ptr;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
+ return -EOPNOTSUPP;
property = drm_property_find(dev, file_priv, out_resp->prop_id);
if (!property)
@@ -757,7 +757,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
+ return -EOPNOTSUPP;
blob = drm_property_lookup_blob(dev, out_resp->blob_id);
if (!blob)
@@ -786,7 +786,7 @@ int drm_mode_createblob_ioctl(struct drm_device *dev,
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
+ return -EOPNOTSUPP;
blob = drm_property_create_blob(dev, out_resp->length, NULL);
if (IS_ERR(blob))
@@ -823,7 +823,7 @@ int drm_mode_destroyblob_ioctl(struct drm_device *dev,
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
+ return -EOPNOTSUPP;
blob = drm_property_lookup_blob(dev, out_resp->blob_id);
if (!blob)
diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c
index 275bca44f38c..bb829a115fc6 100644
--- a/drivers/gpu/drm/drm_scatter.c
+++ b/drivers/gpu/drm/drm_scatter.c
@@ -89,10 +89,10 @@ int drm_legacy_sg_alloc(struct drm_device *dev, void *data,
DRM_DEBUG("\n");
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EINVAL;
+ return -EOPNOTSUPP;
if (!drm_core_check_feature(dev, DRIVER_SG))
- return -EINVAL;
+ return -EOPNOTSUPP;
if (dev->sg)
return -EINVAL;
@@ -202,10 +202,10 @@ int drm_legacy_sg_free(struct drm_device *dev, void *data,
struct drm_sg_mem *entry;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return -EINVAL;
+ return -EOPNOTSUPP;
if (!drm_core_check_feature(dev, DRIVER_SG))
- return -EINVAL;
+ return -EOPNOTSUPP;
entry = dev->sg;
dev->sg = NULL;
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index adb3cb27d31e..497729202bfe 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -56,6 +56,33 @@
#include "drm_internal.h"
#include <drm/drm_syncobj.h>
+struct drm_syncobj_stub_fence {
+ struct dma_fence base;
+ spinlock_t lock;
+};
+
+static const char *drm_syncobj_stub_fence_get_name(struct dma_fence *fence)
+{
+ return "syncobjstub";
+}
+
+static bool drm_syncobj_stub_fence_enable_signaling(struct dma_fence *fence)
+{
+ return !dma_fence_is_signaled(fence);
+}
+
+static void drm_syncobj_stub_fence_release(struct dma_fence *f)
+{
+ kfree(f);
+}
+static const struct dma_fence_ops drm_syncobj_stub_fence_ops = {
+ .get_driver_name = drm_syncobj_stub_fence_get_name,
+ .get_timeline_name = drm_syncobj_stub_fence_get_name,
+ .enable_signaling = drm_syncobj_stub_fence_enable_signaling,
+ .release = drm_syncobj_stub_fence_release,
+};
+
+
/**
* drm_syncobj_find - lookup and reference a sync object.
* @file_private: drm file private pointer
@@ -120,14 +147,6 @@ static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj,
return ret;
}
-/**
- * drm_syncobj_add_callback - adds a callback to syncobj::cb_list
- * @syncobj: Sync object to which to add the callback
- * @cb: Callback to add
- * @func: Func to use when initializing the drm_syncobj_cb struct
- *
- * This adds a callback to be called next time the fence is replaced
- */
void drm_syncobj_add_callback(struct drm_syncobj *syncobj,
struct drm_syncobj_cb *cb,
drm_syncobj_func_t func)
@@ -136,13 +155,7 @@ void drm_syncobj_add_callback(struct drm_syncobj *syncobj,
drm_syncobj_add_callback_locked(syncobj, cb, func);
spin_unlock(&syncobj->lock);
}
-EXPORT_SYMBOL(drm_syncobj_add_callback);
-/**
- * drm_syncobj_add_callback - removes a callback to syncobj::cb_list
- * @syncobj: Sync object from which to remove the callback
- * @cb: Callback to remove
- */
void drm_syncobj_remove_callback(struct drm_syncobj *syncobj,
struct drm_syncobj_cb *cb)
{
@@ -150,16 +163,17 @@ void drm_syncobj_remove_callback(struct drm_syncobj *syncobj,
list_del_init(&cb->node);
spin_unlock(&syncobj->lock);
}
-EXPORT_SYMBOL(drm_syncobj_remove_callback);
/**
* drm_syncobj_replace_fence - replace fence in a sync object.
* @syncobj: Sync object to replace fence in
+ * @point: timeline point
* @fence: fence to install in sync file.
*
- * This replaces the fence on a sync object.
+ * This replaces the fence on a sync object, or a timeline point fence.
*/
void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
+ u64 point,
struct dma_fence *fence)
{
struct dma_fence *old_fence;
@@ -187,42 +201,19 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
}
EXPORT_SYMBOL(drm_syncobj_replace_fence);
-struct drm_syncobj_null_fence {
- struct dma_fence base;
- spinlock_t lock;
-};
-
-static const char *drm_syncobj_null_fence_get_name(struct dma_fence *fence)
-{
- return "syncobjnull";
-}
-
-static bool drm_syncobj_null_fence_enable_signaling(struct dma_fence *fence)
-{
- dma_fence_enable_sw_signaling(fence);
- return !dma_fence_is_signaled(fence);
-}
-
-static const struct dma_fence_ops drm_syncobj_null_fence_ops = {
- .get_driver_name = drm_syncobj_null_fence_get_name,
- .get_timeline_name = drm_syncobj_null_fence_get_name,
- .enable_signaling = drm_syncobj_null_fence_enable_signaling,
- .release = NULL,
-};
-
static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
{
- struct drm_syncobj_null_fence *fence;
+ struct drm_syncobj_stub_fence *fence;
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (fence == NULL)
return -ENOMEM;
spin_lock_init(&fence->lock);
- dma_fence_init(&fence->base, &drm_syncobj_null_fence_ops,
+ dma_fence_init(&fence->base, &drm_syncobj_stub_fence_ops,
&fence->lock, 0, 0);
dma_fence_signal(&fence->base);
- drm_syncobj_replace_fence(syncobj, &fence->base);
+ drm_syncobj_replace_fence(syncobj, 0, &fence->base);
dma_fence_put(&fence->base);
@@ -233,6 +224,7 @@ static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
* drm_syncobj_find_fence - lookup and reference the fence in a sync object
* @file_private: drm file private pointer
* @handle: sync object handle to lookup.
+ * @point: timeline point
* @fence: out parameter for the fence
*
* This is just a convenience function that combines drm_syncobj_find() and
@@ -243,7 +235,7 @@ static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
* dma_fence_put().
*/
int drm_syncobj_find_fence(struct drm_file *file_private,
- u32 handle,
+ u32 handle, u64 point,
struct dma_fence **fence)
{
struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
@@ -272,7 +264,7 @@ void drm_syncobj_free(struct kref *kref)
struct drm_syncobj *syncobj = container_of(kref,
struct drm_syncobj,
refcount);
- drm_syncobj_replace_fence(syncobj, NULL);
+ drm_syncobj_replace_fence(syncobj, 0, NULL);
kfree(syncobj);
}
EXPORT_SYMBOL(drm_syncobj_free);
@@ -312,7 +304,7 @@ int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
}
if (fence)
- drm_syncobj_replace_fence(syncobj, fence);
+ drm_syncobj_replace_fence(syncobj, 0, fence);
*out_syncobj = syncobj;
return 0;
@@ -497,7 +489,7 @@ static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
return -ENOENT;
}
- drm_syncobj_replace_fence(syncobj, fence);
+ drm_syncobj_replace_fence(syncobj, 0, fence);
dma_fence_put(fence);
drm_syncobj_put(syncobj);
return 0;
@@ -514,7 +506,7 @@ static int drm_syncobj_export_sync_file(struct drm_file *file_private,
if (fd < 0)
return fd;
- ret = drm_syncobj_find_fence(file_private, handle, &fence);
+ ret = drm_syncobj_find_fence(file_private, handle, 0, &fence);
if (ret)
goto err_put_fd;
@@ -581,7 +573,7 @@ drm_syncobj_create_ioctl(struct drm_device *dev, void *data,
struct drm_syncobj_create *args = data;
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
- return -ENODEV;
+ return -EOPNOTSUPP;
/* no valid flags yet */
if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED)
@@ -598,7 +590,7 @@ drm_syncobj_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_syncobj_destroy *args = data;
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
- return -ENODEV;
+ return -EOPNOTSUPP;
/* make sure padding is empty */
if (args->pad)
@@ -613,7 +605,7 @@ drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
struct drm_syncobj_handle *args = data;
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
- return -ENODEV;
+ return -EOPNOTSUPP;
if (args->pad)
return -EINVAL;
@@ -637,7 +629,7 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
struct drm_syncobj_handle *args = data;
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
- return -ENODEV;
+ return -EOPNOTSUPP;
if (args->pad)
return -EINVAL;
@@ -929,7 +921,7 @@ drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
- return -ENODEV;
+ return -EOPNOTSUPP;
if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
@@ -963,7 +955,7 @@ drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
int ret;
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
- return -ENODEV;
+ return -EOPNOTSUPP;
if (args->pad != 0)
return -EINVAL;
@@ -979,7 +971,7 @@ drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
return ret;
for (i = 0; i < args->count_handles; i++)
- drm_syncobj_replace_fence(syncobjs[i], NULL);
+ drm_syncobj_replace_fence(syncobjs[i], 0, NULL);
drm_syncobj_array_free(syncobjs, args->count_handles);
@@ -996,7 +988,7 @@ drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
int ret;
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
- return -ENODEV;
+ return -EOPNOTSUPP;
if (args->pad != 0)
return -EINVAL;
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index 28cdcf76b6f9..98e091175921 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -873,8 +873,8 @@ static void send_vblank_event(struct drm_device *dev,
* handler by calling drm_crtc_send_vblank_event() and make sure that there's no
* possible race with the hardware committing the atomic update.
*
- * Caller must hold a vblank reference for the event @e, which will be dropped
- * when the next vblank arrives.
+ * Caller must hold a vblank reference for the event @e acquired by a
+ * drm_crtc_vblank_get(), which will be dropped when the next vblank arrives.
*/
void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
struct drm_pending_vblank_event *e)
@@ -1541,7 +1541,7 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
if (vblwait->request.type &
~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK |
_DRM_VBLANK_HIGH_CRTC_MASK)) {
- DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
+ DRM_DEBUG("Unsupported type value 0x%x, supported mask 0x%x\n",
vblwait->request.type,
(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK |
_DRM_VBLANK_HIGH_CRTC_MASK));
@@ -1771,7 +1771,7 @@ int drm_crtc_get_sequence_ioctl(struct drm_device *dev, void *data,
int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
+ return -EOPNOTSUPP;
if (!dev->irq_enabled)
return -EINVAL;
@@ -1829,7 +1829,7 @@ int drm_crtc_queue_sequence_ioctl(struct drm_device *dev, void *data,
unsigned long spin_flags;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
+ return -EOPNOTSUPP;
if (!dev->irq_enabled)
return -EINVAL;
diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
index a6b2fe36b025..c5d0d2358301 100644
--- a/drivers/gpu/drm/drm_vma_manager.c
+++ b/drivers/gpu/drm/drm_vma_manager.c
@@ -103,10 +103,7 @@ EXPORT_SYMBOL(drm_vma_offset_manager_init);
*/
void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr)
{
- /* take the lock to protect against buggy drivers */
- write_lock(&mgr->vm_lock);
drm_mm_takedown(&mgr->vm_addr_space_mm);
- write_unlock(&mgr->vm_lock);
}
EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 93d2f4000d2f..941b238bdcc9 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -24,7 +24,6 @@
#include <linux/mm_types.h>
#include <drm/drmP.h>
-#include <drm/drm_global.h>
#include <drm/gma_drm.h>
#include "psb_reg.h"
#include "psb_intel_drv.h"
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 459f8f88a34c..9e36ffb5eb7c 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -30,6 +30,7 @@ config DRM_I915_DEBUG
select SW_SYNC # signaling validation framework (igt/syncobj*)
select DRM_I915_SW_FENCE_DEBUG_OBJECTS
select DRM_I915_SELFTEST
+ select DRM_I915_DEBUG_RUNTIME_PM
default n
help
Choose this option to turn on extra driver debugging that may affect
@@ -167,3 +168,14 @@ config DRM_I915_DEBUG_VBLANK_EVADE
the vblank.
If in doubt, say "N".
+
+config DRM_I915_DEBUG_RUNTIME_PM
+ bool "Enable extra state checking for runtime PM"
+ depends on DRM_I915
+ default n
+ help
+ Choose this option to turn on extra state checking for the
+ runtime PM functionality. This may introduce overhead during
+ driver loading, suspend and resume operations.
+
+ If in doubt, say "N"
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index c62346fdc05d..19cf1bbe059d 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -56,6 +56,10 @@ static const u8 pci_cfg_space_rw_bmp[PCI_INTERRUPT_LINE + 4] = {
/**
* vgpu_pci_cfg_mem_write - write virtual cfg space memory
+ * @vgpu: target vgpu
+ * @off: offset
+ * @src: src ptr to write
+ * @bytes: number of bytes
*
* Use this function to write virtual cfg space memory.
* For standard cfg space, only RW bits can be changed,
@@ -91,6 +95,10 @@ static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off,
/**
* intel_vgpu_emulate_cfg_read - emulate vGPU configuration space read
+ * @vgpu: target vgpu
+ * @offset: offset
+ * @p_data: return data ptr
+ * @bytes: number of bytes to read
*
* Returns:
* Zero on success, negative error code if failed.
@@ -278,6 +286,10 @@ static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
/**
* intel_vgpu_emulate_cfg_read - emulate vGPU configuration space write
+ * @vgpu: target vgpu
+ * @offset: offset
+ * @p_data: write data ptr
+ * @bytes: number of bytes to write
*
* Returns:
* Zero on success, negative error code if failed.
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index a614db310ea2..77edbfcb0f75 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1840,6 +1840,8 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
return ret;
}
+static int mi_noop_index;
+
static struct cmd_info cmd_info[] = {
{"MI_NOOP", OP_MI_NOOP, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
@@ -2525,7 +2527,12 @@ static int cmd_parser_exec(struct parser_exec_state *s)
cmd = cmd_val(s, 0);
- info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+ /* fastpath for MI_NOOP */
+ if (cmd == MI_NOOP)
+ info = &cmd_info[mi_noop_index];
+ else
+ info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+
if (info == NULL) {
gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
cmd, get_opcode(cmd, s->ring_id),
@@ -2928,6 +2935,8 @@ static int init_cmd_table(struct intel_gvt *gvt)
kfree(e);
return -EEXIST;
}
+ if (cmd_info[i].opcode == OP_MI_NOOP)
+ mi_noop_index = i;
INIT_HLIST_NODE(&e->hlist);
add_cmd_entry(gvt, e);
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 3019dbc39aef..df1e14145747 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -462,6 +462,7 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
/**
* intel_vgpu_init_display- initialize vGPU virtual display emulation
* @vgpu: a vGPU
+ * @resolution: resolution index for intel_vgpu_edid
*
* This function is used to initialize vGPU virtual display emulation stuffs
*
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
index 4b98539025c5..5d4bb35bb889 100644
--- a/drivers/gpu/drm/i915/gvt/edid.c
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -340,6 +340,9 @@ static int gmbus2_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
/**
* intel_gvt_i2c_handle_gmbus_read - emulate gmbus register mmio read
* @vgpu: a vGPU
+ * @offset: reg offset
+ * @p_data: data return buffer
+ * @bytes: access data length
*
* This function is used to emulate gmbus register mmio read
*
@@ -365,6 +368,9 @@ int intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu *vgpu,
/**
* intel_gvt_i2c_handle_gmbus_write - emulate gmbus register mmio write
* @vgpu: a vGPU
+ * @offset: reg offset
+ * @p_data: data return buffer
+ * @bytes: access data length
*
* This function is used to emulate gmbus register mmio write
*
@@ -437,6 +443,9 @@ static inline int get_aux_ch_reg(unsigned int offset)
/**
* intel_gvt_i2c_handle_aux_ch_write - emulate AUX channel register write
* @vgpu: a vGPU
+ * @port_idx: port index
+ * @offset: reg offset
+ * @p_data: write ptr
*
* This function is used to emulate AUX channel register write
*
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 00aad8164dec..2402395a068d 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1113,6 +1113,10 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
}
/**
+ * Check if can do 2M page
+ * @vgpu: target vgpu
+ * @entry: target pfn's gtt entry
+ *
* Return 1 if 2MB huge gtt shadowing is possilbe, 0 if miscondition,
* negtive if found err.
*/
@@ -1945,7 +1949,7 @@ void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
/**
* intel_vgpu_pin_mm - increase the pin count of a vGPU mm object
- * @vgpu: a vGPU
+ * @mm: target vgpu mm
*
* This function is called when user wants to use a vGPU mm object. If this
* mm object hasn't been shadowed yet, the shadow will be populated at this
@@ -2521,8 +2525,7 @@ fail:
/**
* intel_vgpu_find_ppgtt_mm - find a PPGTT mm object
* @vgpu: a vGPU
- * @page_table_level: PPGTT page table level
- * @root_entry: PPGTT page table root pointers
+ * @pdps: pdp root array
*
* This function is used to find a PPGTT mm object from mm object pool
*
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 46c8b720e336..6ef5a7fc70df 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -189,7 +189,6 @@ static const struct intel_gvt_ops intel_gvt_ops = {
/**
* intel_gvt_init_host - Load MPT modules and detect if we're running in host
- * @gvt: intel gvt device
*
* This function is called at the driver loading stage. If failed to find a
* loadable MPT module or detect currently we're running in a VM, then GVT-g
@@ -303,7 +302,7 @@ static int init_service_thread(struct intel_gvt *gvt)
/**
* intel_gvt_clean_device - clean a GVT device
- * @gvt: intel gvt device
+ * @dev_priv: i915 private
*
* This function is called at the driver unloading stage, to free the
* resources owned by a GVT device.
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 7a58ca555197..d26258786e3f 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1287,12 +1287,13 @@ static int power_well_ctl_mmio_write(struct intel_vgpu *vgpu,
{
write_vreg(vgpu, offset, p_data, bytes);
- if (vgpu_vreg(vgpu, offset) & HSW_PWR_WELL_CTL_REQ(HSW_DISP_PW_GLOBAL))
+ if (vgpu_vreg(vgpu, offset) &
+ HSW_PWR_WELL_CTL_REQ(HSW_PW_CTL_IDX_GLOBAL))
vgpu_vreg(vgpu, offset) |=
- HSW_PWR_WELL_CTL_STATE(HSW_DISP_PW_GLOBAL);
+ HSW_PWR_WELL_CTL_STATE(HSW_PW_CTL_IDX_GLOBAL);
else
vgpu_vreg(vgpu, offset) &=
- ~HSW_PWR_WELL_CTL_STATE(HSW_DISP_PW_GLOBAL);
+ ~HSW_PWR_WELL_CTL_STATE(HSW_PW_CTL_IDX_GLOBAL);
return 0;
}
@@ -2118,7 +2119,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_F(PCH_GMBUS0, 4 * 4, 0, 0, 0, D_ALL, gmbus_mmio_read,
gmbus_mmio_write);
- MMIO_F(PCH_GPIOA, 6 * 4, F_UNALIGN, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(PCH_GPIO_BASE, 6 * 4, F_UNALIGN, 0, 0, D_ALL, NULL, NULL);
MMIO_F(_MMIO(0xe4f00), 0x28, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_F(_MMIO(_PCH_DPB_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
@@ -2443,17 +2444,10 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(GEN6_RC6p_THRESHOLD, D_ALL);
MMIO_D(GEN6_RC6pp_THRESHOLD, D_ALL);
MMIO_D(GEN6_PMINTRMSK, D_ALL);
- /*
- * Use an arbitrary power well controlled by the PWR_WELL_CTL
- * register.
- */
- MMIO_DH(HSW_PWR_WELL_CTL_BIOS(HSW_DISP_PW_GLOBAL), D_BDW, NULL,
- power_well_ctl_mmio_write);
- MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(HSW_DISP_PW_GLOBAL), D_BDW, NULL,
- power_well_ctl_mmio_write);
- MMIO_DH(HSW_PWR_WELL_CTL_KVMR, D_BDW, NULL, power_well_ctl_mmio_write);
- MMIO_DH(HSW_PWR_WELL_CTL_DEBUG(HSW_DISP_PW_GLOBAL), D_BDW, NULL,
- power_well_ctl_mmio_write);
+ MMIO_DH(HSW_PWR_WELL_CTL1, D_BDW, NULL, power_well_ctl_mmio_write);
+ MMIO_DH(HSW_PWR_WELL_CTL2, D_BDW, NULL, power_well_ctl_mmio_write);
+ MMIO_DH(HSW_PWR_WELL_CTL3, D_BDW, NULL, power_well_ctl_mmio_write);
+ MMIO_DH(HSW_PWR_WELL_CTL4, D_BDW, NULL, power_well_ctl_mmio_write);
MMIO_DH(HSW_PWR_WELL_CTL5, D_BDW, NULL, power_well_ctl_mmio_write);
MMIO_DH(HSW_PWR_WELL_CTL6, D_BDW, NULL, power_well_ctl_mmio_write);
@@ -2804,13 +2798,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_F(_MMIO(_DPD_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
dp_aux_ch_ctl_mmio_write);
- /*
- * Use an arbitrary power well controlled by the PWR_WELL_CTL
- * register.
- */
- MMIO_D(HSW_PWR_WELL_CTL_BIOS(SKL_DISP_PW_MISC_IO), D_SKL_PLUS);
- MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(SKL_DISP_PW_MISC_IO), D_SKL_PLUS, NULL,
- skl_power_well_ctl_write);
+ MMIO_D(HSW_PWR_WELL_CTL1, D_SKL_PLUS);
+ MMIO_DH(HSW_PWR_WELL_CTL2, D_SKL_PLUS, NULL, skl_power_well_ctl_write);
MMIO_D(_MMIO(0xa210), D_SKL_PLUS);
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
@@ -3434,6 +3423,7 @@ bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
* @offset: register offset
* @pdata: data buffer
* @bytes: data length
+ * @is_read: read or write
*
* Returns:
* Zero on success, negative error code if failed.
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index a45f46d8537f..71751be329e3 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -1712,7 +1712,7 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
return pfn;
}
-int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
+static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
unsigned long size, dma_addr_t *dma_addr)
{
struct kvmgt_guest_info *info;
@@ -1761,7 +1761,7 @@ static void __gvt_dma_release(struct kref *ref)
__gvt_cache_remove_entry(entry->vgpu, entry);
}
-void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr)
+static void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr)
{
struct kvmgt_guest_info *info;
struct gvt_dma *entry;
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 994366035364..4db817c21ed8 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -39,6 +39,7 @@
/**
* intel_vgpu_gpa_to_mmio_offset - translate a GPA to MMIO offset
* @vgpu: a vGPU
+ * @gpa: guest physical address
*
* Returns:
* Zero on success, negative error code if failed
@@ -228,7 +229,7 @@ out:
/**
* intel_vgpu_reset_mmio - reset virtual MMIO space
* @vgpu: a vGPU
- *
+ * @dmlr: whether this is device model level reset
*/
void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
{
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 42e1e6bdcc2c..7e702c6a32af 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -37,19 +37,6 @@
#include "gvt.h"
#include "trace.h"
-/**
- * Defined in Intel Open Source PRM.
- * Ref: https://01.org/linuxgraphics/documentation/hardware-specification-prms
- */
-#define TRVATTL3PTRDW(i) _MMIO(0x4de0 + (i)*4)
-#define TRNULLDETCT _MMIO(0x4de8)
-#define TRINVTILEDETCT _MMIO(0x4dec)
-#define TRVADR _MMIO(0x4df0)
-#define TRTTE _MMIO(0x4df4)
-#define RING_EXCC(base) _MMIO((base) + 0x28)
-#define RING_GFX_MODE(base) _MMIO((base) + 0x29c)
-#define VF_GUARDBAND _MMIO(0x83a4)
-
#define GEN9_MOCS_SIZE 64
/* Raw offset is appened to each line for convenience. */
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.h b/drivers/gpu/drm/i915/gvt/mmio_context.h
index 5c3b9ff9f96a..f7eaa442403f 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.h
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.h
@@ -53,5 +53,8 @@ bool is_inhibit_context(struct intel_context *ce);
int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
struct i915_request *req);
+#define IS_RESTORE_INHIBIT(a) \
+ (_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) == \
+ ((a) & _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT)))
#endif
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index fa75a2eead90..82586c8e434f 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -216,7 +216,6 @@ static void virt_vbt_generation(struct vbt *v)
/**
* intel_vgpu_init_opregion - initialize the stuff used to emulate opregion
* @vgpu: a vGPU
- * @gpa: guest physical address of opregion
*
* Returns:
* Zero on success, negative error code if failed.
diff --git a/drivers/gpu/drm/i915/gvt/page_track.c b/drivers/gpu/drm/i915/gvt/page_track.c
index 256d0db8bbb1..84856022528e 100644
--- a/drivers/gpu/drm/i915/gvt/page_track.c
+++ b/drivers/gpu/drm/i915/gvt/page_track.c
@@ -41,6 +41,8 @@ struct intel_vgpu_page_track *intel_vgpu_find_page_track(
* intel_vgpu_register_page_track - register a guest page to be tacked
* @vgpu: a vGPU
* @gfn: the gfn of guest page
+ * @handler: page track handler
+ * @priv: tracker private
*
* Returns:
* zero on success, negative error code if failed.
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h
index d4f7ce6dc1d7..428d252344f1 100644
--- a/drivers/gpu/drm/i915/gvt/reg.h
+++ b/drivers/gpu/drm/i915/gvt/reg.h
@@ -77,4 +77,22 @@
#define _RING_CTL_BUF_SIZE(ctl) (((ctl) & RB_TAIL_SIZE_MASK) + \
I915_GTT_PAGE_SIZE)
+#define PCH_GPIO_BASE _MMIO(0xc5010)
+
+#define PCH_GMBUS0 _MMIO(0xc5100)
+#define PCH_GMBUS1 _MMIO(0xc5104)
+#define PCH_GMBUS2 _MMIO(0xc5108)
+#define PCH_GMBUS3 _MMIO(0xc510c)
+#define PCH_GMBUS4 _MMIO(0xc5110)
+#define PCH_GMBUS5 _MMIO(0xc5120)
+
+#define TRVATTL3PTRDW(i) _MMIO(0x4de0 + (i) * 4)
+#define TRNULLDETCT _MMIO(0x4de8)
+#define TRINVTILEDETCT _MMIO(0x4dec)
+#define TRVADR _MMIO(0x4df0)
+#define TRTTE _MMIO(0x4df4)
+#define RING_EXCC(base) _MMIO((base) + 0x28)
+#define RING_GFX_MODE(base) _MMIO((base) + 0x29c)
+#define VF_GUARDBAND _MMIO(0x83a4)
+
#endif
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 43aa058e29fc..ea34003d6dd2 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -132,35 +132,6 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
unsigned long context_gpa, context_page_num;
int i;
- gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
- workload->ctx_desc.lrca);
-
- context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
-
- context_page_num = context_page_num >> PAGE_SHIFT;
-
- if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
- context_page_num = 19;
-
- i = 2;
-
- while (i < context_page_num) {
- context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
- (u32)((workload->ctx_desc.lrca + i) <<
- I915_GTT_PAGE_SHIFT));
- if (context_gpa == INTEL_GVT_INVALID_ADDR) {
- gvt_vgpu_err("Invalid guest context descriptor\n");
- return -EFAULT;
- }
-
- page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
- dst = kmap(page);
- intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
- I915_GTT_PAGE_SIZE);
- kunmap(page);
- i++;
- }
-
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
shadow_ring_context = kmap(page);
@@ -195,6 +166,37 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
sr_oa_regs(workload, (u32 *)shadow_ring_context, false);
kunmap(page);
+
+ if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val))
+ return 0;
+
+ gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
+ workload->ctx_desc.lrca);
+
+ context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
+
+ context_page_num = context_page_num >> PAGE_SHIFT;
+
+ if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
+ context_page_num = 19;
+
+ i = 2;
+ while (i < context_page_num) {
+ context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
+ (u32)((workload->ctx_desc.lrca + i) <<
+ I915_GTT_PAGE_SHIFT));
+ if (context_gpa == INTEL_GVT_INVALID_ADDR) {
+ gvt_vgpu_err("Invalid guest context descriptor\n");
+ return -EFAULT;
+ }
+
+ page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
+ dst = kmap(page);
+ intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
+ I915_GTT_PAGE_SIZE);
+ kunmap(page);
+ i++;
+ }
return 0;
}
@@ -1138,6 +1140,7 @@ out_shadow_ctx:
/**
* intel_vgpu_select_submission_ops - select virtual submission interface
* @vgpu: a vGPU
+ * @engine_mask: either ALL_ENGINES or target engine mask
* @interface: expected vGPU virtual submission interface
*
* This function is called when guest configures submission interface.
@@ -1190,7 +1193,7 @@ int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
/**
* intel_vgpu_destroy_workload - destroy a vGPU workload
- * @vgpu: a vGPU
+ * @workload: workload to destroy
*
* This function is called when destroy a vGPU workload.
*
@@ -1282,6 +1285,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
/**
* intel_vgpu_create_workload - create a vGPU workload
* @vgpu: a vGPU
+ * @ring_id: ring index
* @desc: a guest context descriptor
*
* This function is called when creating a vGPU workload.
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index f9ce35da4123..1f7051e97afb 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1953,7 +1953,10 @@ static int i915_context_status(struct seq_file *m, void *unused)
return ret;
list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
- seq_printf(m, "HW context %u ", ctx->hw_id);
+ seq_puts(m, "HW context ");
+ if (!list_empty(&ctx->hw_id_link))
+ seq_printf(m, "%x [pin %u]", ctx->hw_id,
+ atomic_read(&ctx->hw_id_pin_count));
if (ctx->pid) {
struct task_struct *task;
@@ -2708,7 +2711,9 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
intel_runtime_pm_get(dev_priv);
mutex_lock(&dev_priv->psr.lock);
- seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
+ seq_printf(m, "PSR mode: %s\n",
+ dev_priv->psr.psr2_enabled ? "PSR2" : "PSR1");
+ seq_printf(m, "Enabled: %s\n", yesno(dev_priv->psr.enabled));
seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
dev_priv->psr.busy_frontbuffer_bits);
@@ -2735,7 +2740,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
psr_source_status(dev_priv, m);
mutex_unlock(&dev_priv->psr.lock);
- if (READ_ONCE(dev_priv->psr.debug)) {
+ if (READ_ONCE(dev_priv->psr.debug) & I915_PSR_DEBUG_IRQ) {
seq_printf(m, "Last attempted entry at: %lld\n",
dev_priv->psr.last_entry_attempt);
seq_printf(m, "Last exit at: %lld\n",
@@ -2750,17 +2755,32 @@ static int
i915_edp_psr_debug_set(void *data, u64 val)
{
struct drm_i915_private *dev_priv = data;
+ struct drm_modeset_acquire_ctx ctx;
+ int ret;
if (!CAN_PSR(dev_priv))
return -ENODEV;
- DRM_DEBUG_KMS("PSR debug %s\n", enableddisabled(val));
+ DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
intel_runtime_pm_get(dev_priv);
- intel_psr_irq_control(dev_priv, !!val);
+
+ drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
+
+retry:
+ ret = intel_psr_set_debugfs_mode(dev_priv, &ctx, val);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry;
+ }
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
intel_runtime_pm_put(dev_priv);
- return 0;
+ return ret;
}
static int
@@ -2845,10 +2865,10 @@ static int i915_power_domain_info(struct seq_file *m, void *unused)
enum intel_display_power_domain power_domain;
power_well = &power_domains->power_wells[i];
- seq_printf(m, "%-25s %d\n", power_well->name,
+ seq_printf(m, "%-25s %d\n", power_well->desc->name,
power_well->count);
- for_each_power_domain(power_domain, power_well->domains)
+ for_each_power_domain(power_domain, power_well->desc->domains)
seq_printf(m, " %-23s %d\n",
intel_display_power_domain_str(power_domain),
power_domains->domain_use_count[power_domain]);
@@ -4114,13 +4134,17 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
#define DROP_FREED BIT(4)
#define DROP_SHRINK_ALL BIT(5)
#define DROP_IDLE BIT(6)
+#define DROP_RESET_ACTIVE BIT(7)
+#define DROP_RESET_SEQNO BIT(8)
#define DROP_ALL (DROP_UNBOUND | \
DROP_BOUND | \
DROP_RETIRE | \
DROP_ACTIVE | \
DROP_FREED | \
DROP_SHRINK_ALL |\
- DROP_IDLE)
+ DROP_IDLE | \
+ DROP_RESET_ACTIVE | \
+ DROP_RESET_SEQNO)
static int
i915_drop_caches_get(void *data, u64 *val)
{
@@ -4132,53 +4156,69 @@ i915_drop_caches_get(void *data, u64 *val)
static int
i915_drop_caches_set(void *data, u64 val)
{
- struct drm_i915_private *dev_priv = data;
- struct drm_device *dev = &dev_priv->drm;
+ struct drm_i915_private *i915 = data;
int ret = 0;
DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
val, val & DROP_ALL);
+ if (val & DROP_RESET_ACTIVE && !intel_engines_are_idle(i915))
+ i915_gem_set_wedged(i915);
+
/* No need to check and wait for gpu resets, only libdrm auto-restarts
* on ioctls on -EAGAIN. */
- if (val & (DROP_ACTIVE | DROP_RETIRE)) {
- ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) {
+ ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
if (ret)
return ret;
if (val & DROP_ACTIVE)
- ret = i915_gem_wait_for_idle(dev_priv,
+ ret = i915_gem_wait_for_idle(i915,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
+ if (val & DROP_RESET_SEQNO) {
+ intel_runtime_pm_get(i915);
+ ret = i915_gem_set_global_seqno(&i915->drm, 1);
+ intel_runtime_pm_put(i915);
+ }
+
if (val & DROP_RETIRE)
- i915_retire_requests(dev_priv);
+ i915_retire_requests(i915);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&i915->drm.struct_mutex);
+ }
+
+ if (val & DROP_RESET_ACTIVE &&
+ i915_terminally_wedged(&i915->gpu_error)) {
+ i915_handle_error(i915, ALL_ENGINES, 0, NULL);
+ wait_on_bit(&i915->gpu_error.flags,
+ I915_RESET_HANDOFF,
+ TASK_UNINTERRUPTIBLE);
}
fs_reclaim_acquire(GFP_KERNEL);
if (val & DROP_BOUND)
- i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_BOUND);
+ i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
if (val & DROP_UNBOUND)
- i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
+ i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
if (val & DROP_SHRINK_ALL)
- i915_gem_shrink_all(dev_priv);
+ i915_gem_shrink_all(i915);
fs_reclaim_release(GFP_KERNEL);
if (val & DROP_IDLE) {
do {
- if (READ_ONCE(dev_priv->gt.active_requests))
- flush_delayed_work(&dev_priv->gt.retire_work);
- drain_delayed_work(&dev_priv->gt.idle_work);
- } while (READ_ONCE(dev_priv->gt.awake));
+ if (READ_ONCE(i915->gt.active_requests))
+ flush_delayed_work(&i915->gt.retire_work);
+ drain_delayed_work(&i915->gt.idle_work);
+ } while (READ_ONCE(i915->gt.awake));
}
if (val & DROP_FREED)
- i915_gem_drain_freed_objects(dev_priv);
+ i915_gem_drain_freed_objects(i915);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f8cfd16be534..2ddf8538cb47 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -373,7 +373,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = 2;
break;
case I915_PARAM_HAS_RESOURCE_STREAMER:
- value = HAS_RESOURCE_STREAMER(dev_priv);
+ value = 0;
break;
case I915_PARAM_HAS_POOLED_EU:
value = HAS_POOLED_EU(dev_priv);
@@ -441,6 +441,9 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
case I915_PARAM_CS_TIMESTAMP_FREQUENCY:
value = 1000 * INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz;
break;
+ case I915_PARAM_MMAP_GTT_COHERENT:
+ value = INTEL_INFO(dev_priv)->has_coherent_ggtt;
+ break;
default:
DRM_DEBUG("Unknown parameter %d\n", param->param);
return -EINVAL;
@@ -709,7 +712,7 @@ cleanup_irq:
intel_teardown_gmbus(dev_priv);
cleanup_csr:
intel_csr_ucode_fini(dev_priv);
- intel_power_domains_fini(dev_priv);
+ intel_power_domains_fini_hw(dev_priv);
vga_switcheroo_unregister_client(pdev);
cleanup_vga_client:
vga_client_register(pdev, NULL, NULL, NULL);
@@ -867,7 +870,6 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
/**
* i915_driver_init_early - setup state not requiring device access
* @dev_priv: device private
- * @ent: the matching pci_device_id
*
* Initialize everything that is a "SW-only" state, that is state not
* requiring accessing the device or exposing the driver via kernel internal
@@ -875,25 +877,13 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
* system memory allocation, setting up device specific attributes and
* function hooks not requiring accessing the device.
*/
-static int i915_driver_init_early(struct drm_i915_private *dev_priv,
- const struct pci_device_id *ent)
+static int i915_driver_init_early(struct drm_i915_private *dev_priv)
{
- const struct intel_device_info *match_info =
- (struct intel_device_info *)ent->driver_data;
- struct intel_device_info *device_info;
int ret = 0;
if (i915_inject_load_failure())
return -ENODEV;
- /* Setup the write-once "constant" device info */
- device_info = mkwrite_device_info(dev_priv);
- memcpy(device_info, match_info, sizeof(*device_info));
- device_info->device_id = dev_priv->drm.pdev->device;
-
- BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
- sizeof(device_info->platform_mask) * BITS_PER_BYTE);
- BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE);
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
mutex_init(&dev_priv->backlight_lock);
@@ -921,7 +911,9 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
intel_uc_init_early(dev_priv);
intel_pm_setup(dev_priv);
intel_init_dpio(dev_priv);
- intel_power_domains_init(dev_priv);
+ ret = intel_power_domains_init(dev_priv);
+ if (ret < 0)
+ goto err_uc;
intel_irq_init(dev_priv);
intel_hangcheck_init(dev_priv);
intel_init_display_hooks(dev_priv);
@@ -933,6 +925,9 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
return 0;
+err_uc:
+ intel_uc_cleanup_early(dev_priv);
+ i915_gem_cleanup_early(dev_priv);
err_workqueues:
i915_workqueues_cleanup(dev_priv);
err_engines:
@@ -947,6 +942,7 @@ err_engines:
static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
{
intel_irq_fini(dev_priv);
+ intel_power_domains_cleanup(dev_priv);
intel_uc_cleanup_early(dev_priv);
i915_gem_cleanup_early(dev_priv);
i915_workqueues_cleanup(dev_priv);
@@ -1272,6 +1268,9 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
*/
if (INTEL_INFO(dev_priv)->num_pipes)
drm_kms_helper_poll_init(dev);
+
+ intel_power_domains_enable(dev_priv);
+ intel_runtime_pm_enable(dev_priv);
}
/**
@@ -1280,6 +1279,9 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
*/
static void i915_driver_unregister(struct drm_i915_private *dev_priv)
{
+ intel_runtime_pm_disable(dev_priv);
+ intel_power_domains_disable(dev_priv);
+
intel_fbdev_unregister(dev_priv);
intel_audio_deinit(dev_priv);
@@ -1316,6 +1318,52 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv)
DRM_INFO("DRM_I915_DEBUG enabled\n");
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
DRM_INFO("DRM_I915_DEBUG_GEM enabled\n");
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
+ DRM_INFO("DRM_I915_DEBUG_RUNTIME_PM enabled\n");
+}
+
+static struct drm_i915_private *
+i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ const struct intel_device_info *match_info =
+ (struct intel_device_info *)ent->driver_data;
+ struct intel_device_info *device_info;
+ struct drm_i915_private *i915;
+
+ i915 = kzalloc(sizeof(*i915), GFP_KERNEL);
+ if (!i915)
+ return NULL;
+
+ if (drm_dev_init(&i915->drm, &driver, &pdev->dev)) {
+ kfree(i915);
+ return NULL;
+ }
+
+ i915->drm.pdev = pdev;
+ i915->drm.dev_private = i915;
+ pci_set_drvdata(pdev, &i915->drm);
+
+ /* Setup the write-once "constant" device info */
+ device_info = mkwrite_device_info(i915);
+ memcpy(device_info, match_info, sizeof(*device_info));
+ device_info->device_id = pdev->device;
+
+ BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
+ sizeof(device_info->platform_mask) * BITS_PER_BYTE);
+ BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE);
+
+ return i915;
+}
+
+static void i915_driver_destroy(struct drm_i915_private *i915)
+{
+ struct pci_dev *pdev = i915->drm.pdev;
+
+ drm_dev_fini(&i915->drm);
+ kfree(i915);
+
+ /* And make sure we never chase our dangling pointer from pci_dev */
+ pci_set_drvdata(pdev, NULL);
}
/**
@@ -1336,42 +1384,23 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
struct drm_i915_private *dev_priv;
int ret;
- /* Enable nuclear pageflip on ILK+ */
- if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
- driver.driver_features &= ~DRIVER_ATOMIC;
-
- ret = -ENOMEM;
- dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
- if (dev_priv)
- ret = drm_dev_init(&dev_priv->drm, &driver, &pdev->dev);
- if (ret) {
- DRM_DEV_ERROR(&pdev->dev, "allocation failed\n");
- goto out_free;
- }
+ dev_priv = i915_driver_create(pdev, ent);
+ if (!dev_priv)
+ return -ENOMEM;
- dev_priv->drm.pdev = pdev;
- dev_priv->drm.dev_private = dev_priv;
+ /* Disable nuclear pageflip by default on pre-ILK */
+ if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
+ dev_priv->drm.driver_features &= ~DRIVER_ATOMIC;
ret = pci_enable_device(pdev);
if (ret)
goto out_fini;
- pci_set_drvdata(pdev, &dev_priv->drm);
- /*
- * Disable the system suspend direct complete optimization, which can
- * leave the device suspended skipping the driver's suspend handlers
- * if the device was already runtime suspended. This is needed due to
- * the difference in our runtime and system suspend sequence and
- * becaue the HDA driver may require us to enable the audio power
- * domain during system suspend.
- */
- dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
-
- ret = i915_driver_init_early(dev_priv, ent);
+ ret = i915_driver_init_early(dev_priv);
if (ret < 0)
goto out_pci_disable;
- intel_runtime_pm_get(dev_priv);
+ disable_rpm_wakeref_asserts(dev_priv);
ret = i915_driver_init_mmio(dev_priv);
if (ret < 0)
@@ -1399,11 +1428,9 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
i915_driver_register(dev_priv);
- intel_runtime_pm_enable(dev_priv);
-
intel_init_ipc(dev_priv);
- intel_runtime_pm_put(dev_priv);
+ enable_rpm_wakeref_asserts(dev_priv);
i915_welcome_messages(dev_priv);
@@ -1414,16 +1441,13 @@ out_cleanup_hw:
out_cleanup_mmio:
i915_driver_cleanup_mmio(dev_priv);
out_runtime_pm_put:
- intel_runtime_pm_put(dev_priv);
+ enable_rpm_wakeref_asserts(dev_priv);
i915_driver_cleanup_early(dev_priv);
out_pci_disable:
pci_disable_device(pdev);
out_fini:
i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret);
- drm_dev_fini(&dev_priv->drm);
-out_free:
- kfree(dev_priv);
- pci_set_drvdata(pdev, NULL);
+ i915_driver_destroy(dev_priv);
return ret;
}
@@ -1432,13 +1456,13 @@ void i915_driver_unload(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
+ disable_rpm_wakeref_asserts(dev_priv);
+
i915_driver_unregister(dev_priv);
if (i915_gem_suspend(dev_priv))
DRM_ERROR("failed to idle hardware; continuing to unload!\n");
- intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
-
drm_atomic_helper_shutdown(dev);
intel_gvt_cleanup(dev_priv);
@@ -1459,12 +1483,14 @@ void i915_driver_unload(struct drm_device *dev)
i915_gem_fini(dev_priv);
intel_fbc_cleanup_cfb(dev_priv);
- intel_power_domains_fini(dev_priv);
+ intel_power_domains_fini_hw(dev_priv);
i915_driver_cleanup_hw(dev_priv);
i915_driver_cleanup_mmio(dev_priv);
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+ enable_rpm_wakeref_asserts(dev_priv);
+
+ WARN_ON(atomic_read(&dev_priv->runtime_pm.wakeref_count));
}
static void i915_driver_release(struct drm_device *dev)
@@ -1472,9 +1498,7 @@ static void i915_driver_release(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
i915_driver_cleanup_early(dev_priv);
- drm_dev_fini(&dev_priv->drm);
-
- kfree(dev_priv);
+ i915_driver_destroy(dev_priv);
}
static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
@@ -1573,7 +1597,7 @@ static int i915_drm_suspend(struct drm_device *dev)
/* We do a lot of poking in a lot of registers, make sure they work
* properly. */
- intel_display_set_init_power(dev_priv, true);
+ intel_power_domains_disable(dev_priv);
drm_kms_helper_poll_disable(dev);
@@ -1610,6 +1634,18 @@ static int i915_drm_suspend(struct drm_device *dev)
return 0;
}
+static enum i915_drm_suspend_mode
+get_suspend_mode(struct drm_i915_private *dev_priv, bool hibernate)
+{
+ if (hibernate)
+ return I915_DRM_SUSPEND_HIBERNATE;
+
+ if (suspend_to_idle(dev_priv))
+ return I915_DRM_SUSPEND_IDLE;
+
+ return I915_DRM_SUSPEND_MEM;
+}
+
static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1620,21 +1656,10 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
i915_gem_suspend_late(dev_priv);
- intel_display_set_init_power(dev_priv, false);
intel_uncore_suspend(dev_priv);
- /*
- * In case of firmware assisted context save/restore don't manually
- * deinit the power domains. This also means the CSR/DMC firmware will
- * stay active, it will power down any HW resources as required and
- * also enable deeper system power states that would be blocked if the
- * firmware was inactive.
- */
- if (IS_GEN9_LP(dev_priv) || hibernation || !suspend_to_idle(dev_priv) ||
- dev_priv->csr.dmc_payload == NULL) {
- intel_power_domains_suspend(dev_priv);
- dev_priv->power_domains_suspended = true;
- }
+ intel_power_domains_suspend(dev_priv,
+ get_suspend_mode(dev_priv, hibernation));
ret = 0;
if (IS_GEN9_LP(dev_priv))
@@ -1646,10 +1671,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
if (ret) {
DRM_ERROR("Suspend complete failed: %d\n", ret);
- if (dev_priv->power_domains_suspended) {
- intel_power_domains_init_hw(dev_priv, true);
- dev_priv->power_domains_suspended = false;
- }
+ intel_power_domains_resume(dev_priv);
goto out;
}
@@ -1755,7 +1777,7 @@ static int i915_drm_resume(struct drm_device *dev)
/*
* ... but also need to make sure that hotplug processing
* doesn't cause havoc. Like in the driver load code we don't
- * bother with the tiny race here where we might loose hotplug
+ * bother with the tiny race here where we might lose hotplug
* notifications.
* */
intel_hpd_init(dev_priv);
@@ -1766,6 +1788,8 @@ static int i915_drm_resume(struct drm_device *dev)
intel_opregion_notify_adapter(dev_priv, PCI_D0);
+ intel_power_domains_enable(dev_priv);
+
enable_rpm_wakeref_asserts(dev_priv);
return 0;
@@ -1800,7 +1824,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
ret = pci_set_power_state(pdev, PCI_D0);
if (ret) {
DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
- goto out;
+ return ret;
}
/*
@@ -1816,10 +1840,8 @@ static int i915_drm_resume_early(struct drm_device *dev)
* depend on the device enable refcount we can't anyway depend on them
* disabling/enabling the device.
*/
- if (pci_enable_device(pdev)) {
- ret = -EIO;
- goto out;
- }
+ if (pci_enable_device(pdev))
+ return -EIO;
pci_set_master(pdev);
@@ -1842,18 +1864,12 @@ static int i915_drm_resume_early(struct drm_device *dev)
intel_uncore_sanitize(dev_priv);
- if (dev_priv->power_domains_suspended)
- intel_power_domains_init_hw(dev_priv, true);
- else
- intel_display_set_init_power(dev_priv, true);
+ intel_power_domains_resume(dev_priv);
intel_engines_sanitize(dev_priv);
enable_rpm_wakeref_asserts(dev_priv);
-out:
- dev_priv->power_domains_suspended = false;
-
return ret;
}
@@ -1915,7 +1931,6 @@ void i915_reset(struct drm_i915_private *i915,
dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason);
error->reset_count++;
- disable_irq(i915->drm.irq);
ret = i915_gem_reset_prepare(i915);
if (ret) {
dev_err(i915->drm.dev, "GPU recovery failed\n");
@@ -1977,8 +1992,6 @@ void i915_reset(struct drm_i915_private *i915,
finish:
i915_gem_reset_finish(i915);
- enable_irq(i915->drm.irq);
-
wakeup:
clear_bit(I915_RESET_HANDOFF, &error->flags);
wake_up_bit(&error->flags, I915_RESET_HANDOFF);
@@ -2073,6 +2086,7 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
goto out;
out:
+ intel_engine_cancel_stop_cs(engine);
i915_gem_reset_finish_engine(engine);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 4aca5344863d..7ea442033a57 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -52,6 +52,7 @@
#include <drm/drm_gem.h>
#include <drm/drm_auth.h>
#include <drm/drm_cache.h>
+#include <drm/drm_util.h>
#include "i915_params.h"
#include "i915_reg.h"
@@ -86,8 +87,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20180719"
-#define DRIVER_TIMESTAMP 1532015279
+#define DRIVER_DATE "20180906"
+#define DRIVER_TIMESTAMP 1536242083
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -611,8 +612,18 @@ struct i915_drrs {
struct i915_psr {
struct mutex lock;
+
+#define I915_PSR_DEBUG_MODE_MASK 0x0f
+#define I915_PSR_DEBUG_DEFAULT 0x00
+#define I915_PSR_DEBUG_DISABLE 0x01
+#define I915_PSR_DEBUG_ENABLE 0x02
+#define I915_PSR_DEBUG_FORCE_PSR1 0x03
+#define I915_PSR_DEBUG_IRQ 0x10
+
+ u32 debug;
bool sink_support;
- struct intel_dp *enabled;
+ bool prepared, enabled;
+ struct intel_dp *dp;
bool active;
struct work_struct work;
unsigned busy_frontbuffer_bits;
@@ -622,7 +633,6 @@ struct i915_psr {
bool alpm;
bool psr2_enabled;
u8 sink_sync_latency;
- bool debug;
ktime_t last_entry_attempt;
ktime_t last_exit;
};
@@ -867,14 +877,17 @@ struct i915_power_well_ops {
struct i915_power_well *power_well);
};
+struct i915_power_well_regs {
+ i915_reg_t bios;
+ i915_reg_t driver;
+ i915_reg_t kvmr;
+ i915_reg_t debug;
+};
+
/* Power well structure for haswell */
-struct i915_power_well {
+struct i915_power_well_desc {
const char *name;
bool always_on;
- /* power well enable/disable usage count */
- int count;
- /* cached hw enabled state */
- bool hw_enabled;
u64 domains;
/* unique identifier for this power well */
enum i915_power_well_id id;
@@ -884,9 +897,22 @@ struct i915_power_well {
*/
union {
struct {
+ /*
+ * request/status flag index in the PUNIT power well
+ * control/status registers.
+ */
+ u8 idx;
+ } vlv;
+ struct {
enum dpio_phy phy;
} bxt;
struct {
+ const struct i915_power_well_regs *regs;
+ /*
+ * request/status flag index in the power well
+ * constrol/status registers.
+ */
+ u8 idx;
/* Mask of pipes whose IRQ logic is backed by the pw */
u8 irq_pipe_mask;
/* The pw is backing the VGA functionality */
@@ -897,13 +923,21 @@ struct i915_power_well {
const struct i915_power_well_ops *ops;
};
+struct i915_power_well {
+ const struct i915_power_well_desc *desc;
+ /* power well enable/disable usage count */
+ int count;
+ /* cached hw enabled state */
+ bool hw_enabled;
+};
+
struct i915_power_domains {
/*
* Power wells needed for initialization at driver init and suspend
* time are on. They are kept on until after the first modeset.
*/
- bool init_power_on;
bool initializing;
+ bool display_core_suspended;
int power_well_count;
struct mutex lock;
@@ -1610,7 +1644,8 @@ struct drm_i915_private {
struct mutex gmbus_mutex;
/**
- * Base address of the gmbus and gpio block.
+ * Base address of where the gmbus and gpio blocks are located (either
+ * on PCH or on SoC for platforms without PCH).
*/
uint32_t gpio_mmio_base;
@@ -1632,7 +1667,6 @@ struct drm_i915_private {
struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
[MAX_ENGINE_INSTANCE + 1];
- struct drm_dma_handle *status_page_dmah;
struct resource mch_res;
/* protects the irq masks */
@@ -1828,6 +1862,7 @@ struct drm_i915_private {
struct mutex av_mutex;
struct {
+ struct mutex mutex;
struct list_head list;
struct llist_head free_list;
struct work_struct free_work;
@@ -1840,6 +1875,7 @@ struct drm_i915_private {
#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
#define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */
#define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */
+ struct list_head hw_id_list;
} contexts;
u32 fdi_rx_config;
@@ -2610,8 +2646,6 @@ intel_info(const struct drm_i915_private *dev_priv)
#define USES_GUC_SUBMISSION(dev_priv) intel_uc_is_using_guc_submission()
#define USES_HUC(dev_priv) intel_uc_is_using_huc()
-#define HAS_RESOURCE_STREAMER(dev_priv) ((dev_priv)->info.has_resource_streamer)
-
#define HAS_POOLED_EU(dev_priv) ((dev_priv)->info.has_pooled_eu)
#define INTEL_PCH_DEVICE_ID_MASK 0xff80
@@ -2775,6 +2809,8 @@ extern void intel_irq_fini(struct drm_i915_private *dev_priv);
int intel_irq_install(struct drm_i915_private *dev_priv);
void intel_irq_uninstall(struct drm_i915_private *dev_priv);
+void i915_clear_error_registers(struct drm_i915_private *dev_priv);
+
static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
{
return dev_priv->gvt;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index fcc73a6ab503..89834ce19acd 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -802,6 +802,11 @@ void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
* that was!).
*/
+ wmb();
+
+ if (INTEL_INFO(dev_priv)->has_coherent_ggtt)
+ return;
+
i915_gem_chipset_flush(dev_priv);
intel_runtime_pm_get(dev_priv);
@@ -1906,7 +1911,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
return 0;
}
-static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
+static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
{
return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
}
@@ -1965,7 +1970,7 @@ int i915_gem_mmap_gtt_version(void)
}
static inline struct i915_ggtt_view
-compute_partial_view(struct drm_i915_gem_object *obj,
+compute_partial_view(const struct drm_i915_gem_object *obj,
pgoff_t page_offset,
unsigned int chunk)
{
@@ -2013,7 +2018,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
- bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
+ bool write = area->vm_flags & VM_WRITE;
struct i915_vma *vma;
pgoff_t page_offset;
int ret;
@@ -2528,13 +2533,21 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
gfp_t noreclaim;
int ret;
- /* Assert that the object is not currently in any GPU domain. As it
+ /*
+ * Assert that the object is not currently in any GPU domain. As it
* wasn't in the GTT, there shouldn't be any way it could have been in
* a GPU cache
*/
GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
+ /*
+ * If there's no chance of allocating enough pages for the whole
+ * object, bail early.
+ */
+ if (page_count > totalram_pages)
+ return -ENOMEM;
+
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL)
return -ENOMEM;
@@ -2545,7 +2558,8 @@ rebuild_st:
return -ENOMEM;
}
- /* Get the list of pages out of our struct file. They'll be pinned
+ /*
+ * Get the list of pages out of our struct file. They'll be pinned
* at this point until we release them.
*
* Fail silently without starting the shrinker
@@ -2577,7 +2591,8 @@ rebuild_st:
i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++);
cond_resched();
- /* We've tried hard to allocate the memory by reaping
+ /*
+ * We've tried hard to allocate the memory by reaping
* our own buffer, now let the real VM do its job and
* go down in flames if truly OOM.
*
@@ -2589,7 +2604,8 @@ rebuild_st:
/* reclaim and warn, but no oom */
gfp = mapping_gfp_mask(mapping);
- /* Our bo are always dirty and so we require
+ /*
+ * Our bo are always dirty and so we require
* kswapd to reclaim our pages (direct reclaim
* does not effectively begin pageout of our
* buffers on its own). However, direct reclaim
@@ -2633,7 +2649,8 @@ rebuild_st:
ret = i915_gem_gtt_prepare_pages(obj, st);
if (ret) {
- /* DMA remapping failed? One possible cause is that
+ /*
+ * DMA remapping failed? One possible cause is that
* it could not reserve enough large entries, asking
* for PAGE_SIZE chunks instead may be helpful.
*/
@@ -2667,7 +2684,8 @@ err_pages:
sg_free_table(st);
kfree(st);
- /* shmemfs first checks if there is enough memory to allocate the page
+ /*
+ * shmemfs first checks if there is enough memory to allocate the page
* and reports ENOSPC should there be insufficient, along with the usual
* ENOMEM for a genuine allocation failure.
*
@@ -3307,8 +3325,8 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
intel_engine_dump(engine, &p, "%s\n", engine->name);
}
- set_bit(I915_WEDGED, &i915->gpu_error.flags);
- smp_mb__after_atomic();
+ if (test_and_set_bit(I915_WEDGED, &i915->gpu_error.flags))
+ goto out;
/*
* First, stop submission to hw, but do not yet complete requests by
@@ -3324,7 +3342,8 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
i915->caps.scheduler = 0;
/* Even if the GPU reset fails, it should still stop the engines */
- intel_gpu_reset(i915, ALL_ENGINES);
+ if (INTEL_GEN(i915) >= 5)
+ intel_gpu_reset(i915, ALL_ENGINES);
/*
* Make sure no one is running the old callback before we proceed with
@@ -3367,6 +3386,7 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
i915_gem_reset_finish_engine(engine);
}
+out:
GEM_TRACE("end\n");
wake_up_all(&i915->gpu_error.reset_queue);
@@ -3816,6 +3836,12 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915,
if (timeout < 0)
return timeout;
}
+ if (GEM_SHOW_DEBUG() && !timeout) {
+ /* Presume that timeout was non-zero to begin with! */
+ dev_warn(&i915->drm.pdev->dev,
+ "Missed idle-completion interrupt!\n");
+ GEM_TRACE_DUMP();
+ }
err = wait_for_engines(i915);
if (err)
@@ -5592,6 +5618,8 @@ err_uc_misc:
i915_gem_cleanup_userptr(dev_priv);
if (ret == -EIO) {
+ mutex_lock(&dev_priv->drm.struct_mutex);
+
/*
* Allow engine initialisation to fail by marking the GPU as
* wedged. But we only want to do this where the GPU is angry,
@@ -5602,7 +5630,14 @@ err_uc_misc:
"Failed to initialize GPU, declaring it wedged!\n");
i915_gem_set_wedged(dev_priv);
}
- ret = 0;
+
+ /* Minimal basic recovery for KMS */
+ ret = i915_ggtt_enable_hw(dev_priv);
+ i915_gem_restore_gtt_mappings(dev_priv);
+ i915_gem_restore_fences(dev_priv);
+ intel_init_clock_gating(dev_priv);
+
+ mutex_unlock(&dev_priv->drm.struct_mutex);
}
i915_gem_drain_freed_objects(dev_priv);
@@ -5612,6 +5647,7 @@ err_uc_misc:
void i915_gem_fini(struct drm_i915_private *dev_priv)
{
i915_gem_suspend_late(dev_priv);
+ intel_disable_gt_powersave(dev_priv);
/* Flush any outstanding unpin_work. */
i915_gem_drain_workqueue(dev_priv);
@@ -5623,6 +5659,8 @@ void i915_gem_fini(struct drm_i915_private *dev_priv)
i915_gem_contexts_fini(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
+ intel_cleanup_gt_powersave(dev_priv);
+
intel_uc_fini_misc(dev_priv);
i915_gem_cleanup_userptr(dev_priv);
@@ -6182,4 +6220,5 @@ err_unlock:
#include "selftests/huge_pages.c"
#include "selftests/i915_gem_object.c"
#include "selftests/i915_gem_coherency.c"
+#include "selftests/i915_gem.c"
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index e46592956872..599c4f6eb1ea 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -82,12 +82,6 @@ static inline void __tasklet_disable_sync_once(struct tasklet_struct *t)
tasklet_unlock_wait(t);
}
-static inline void __tasklet_enable_sync_once(struct tasklet_struct *t)
-{
- if (atomic_dec_return(&t->count) == 0)
- tasklet_kill(t);
-}
-
static inline bool __tasklet_is_enabled(const struct tasklet_struct *t)
{
return !atomic_read(&t->count);
diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.c b/drivers/gpu/drm/i915/i915_gem_clflush.c
index f5c570d35b2a..8e74c23cbd91 100644
--- a/drivers/gpu/drm/i915/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/i915_gem_clflush.c
@@ -45,11 +45,6 @@ static const char *i915_clflush_get_timeline_name(struct dma_fence *fence)
return "clflush";
}
-static bool i915_clflush_enable_signaling(struct dma_fence *fence)
-{
- return true;
-}
-
static void i915_clflush_release(struct dma_fence *fence)
{
struct clflush *clflush = container_of(fence, typeof(*clflush), dma);
@@ -63,8 +58,6 @@ static void i915_clflush_release(struct dma_fence *fence)
static const struct dma_fence_ops i915_clflush_ops = {
.get_driver_name = i915_clflush_get_driver_name,
.get_timeline_name = i915_clflush_get_timeline_name,
- .enable_signaling = i915_clflush_enable_signaling,
- .wait = dma_fence_default_wait,
.release = i915_clflush_release,
};
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index b10770cfccd2..747b8170a15a 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -115,6 +115,95 @@ static void lut_close(struct i915_gem_context *ctx)
rcu_read_unlock();
}
+static inline int new_hw_id(struct drm_i915_private *i915, gfp_t gfp)
+{
+ unsigned int max;
+
+ lockdep_assert_held(&i915->contexts.mutex);
+
+ if (INTEL_GEN(i915) >= 11)
+ max = GEN11_MAX_CONTEXT_HW_ID;
+ else if (USES_GUC_SUBMISSION(i915))
+ /*
+ * When using GuC in proxy submission, GuC consumes the
+ * highest bit in the context id to indicate proxy submission.
+ */
+ max = MAX_GUC_CONTEXT_HW_ID;
+ else
+ max = MAX_CONTEXT_HW_ID;
+
+ return ida_simple_get(&i915->contexts.hw_ida, 0, max, gfp);
+}
+
+static int steal_hw_id(struct drm_i915_private *i915)
+{
+ struct i915_gem_context *ctx, *cn;
+ LIST_HEAD(pinned);
+ int id = -ENOSPC;
+
+ lockdep_assert_held(&i915->contexts.mutex);
+
+ list_for_each_entry_safe(ctx, cn,
+ &i915->contexts.hw_id_list, hw_id_link) {
+ if (atomic_read(&ctx->hw_id_pin_count)) {
+ list_move_tail(&ctx->hw_id_link, &pinned);
+ continue;
+ }
+
+ GEM_BUG_ON(!ctx->hw_id); /* perma-pinned kernel context */
+ list_del_init(&ctx->hw_id_link);
+ id = ctx->hw_id;
+ break;
+ }
+
+ /*
+ * Remember how far we got up on the last repossesion scan, so the
+ * list is kept in a "least recently scanned" order.
+ */
+ list_splice_tail(&pinned, &i915->contexts.hw_id_list);
+ return id;
+}
+
+static int assign_hw_id(struct drm_i915_private *i915, unsigned int *out)
+{
+ int ret;
+
+ lockdep_assert_held(&i915->contexts.mutex);
+
+ /*
+ * We prefer to steal/stall ourselves and our users over that of the
+ * entire system. That may be a little unfair to our users, and
+ * even hurt high priority clients. The choice is whether to oomkill
+ * something else, or steal a context id.
+ */
+ ret = new_hw_id(i915, GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+ if (unlikely(ret < 0)) {
+ ret = steal_hw_id(i915);
+ if (ret < 0) /* once again for the correct errno code */
+ ret = new_hw_id(i915, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+ }
+
+ *out = ret;
+ return 0;
+}
+
+static void release_hw_id(struct i915_gem_context *ctx)
+{
+ struct drm_i915_private *i915 = ctx->i915;
+
+ if (list_empty(&ctx->hw_id_link))
+ return;
+
+ mutex_lock(&i915->contexts.mutex);
+ if (!list_empty(&ctx->hw_id_link)) {
+ ida_simple_remove(&i915->contexts.hw_ida, ctx->hw_id);
+ list_del_init(&ctx->hw_id_link);
+ }
+ mutex_unlock(&i915->contexts.mutex);
+}
+
static void i915_gem_context_free(struct i915_gem_context *ctx)
{
unsigned int n;
@@ -122,6 +211,7 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
+ release_hw_id(ctx);
i915_ppgtt_put(ctx->ppgtt);
for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
@@ -136,7 +226,6 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
list_del(&ctx->link);
- ida_simple_remove(&ctx->i915->contexts.hw_ida, ctx->hw_id);
kfree_rcu(ctx, rcu);
}
@@ -191,6 +280,12 @@ static void context_close(struct i915_gem_context *ctx)
i915_gem_context_set_closed(ctx);
/*
+ * This context will never again be assinged to HW, so we can
+ * reuse its ID for the next context.
+ */
+ release_hw_id(ctx);
+
+ /*
* The LUT uses the VMA as a backpointer to unref the object,
* so we need to clear the LUT before we close all the VMA (inside
* the ppgtt).
@@ -203,43 +298,6 @@ static void context_close(struct i915_gem_context *ctx)
i915_gem_context_put(ctx);
}
-static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
-{
- int ret;
- unsigned int max;
-
- if (INTEL_GEN(dev_priv) >= 11) {
- max = GEN11_MAX_CONTEXT_HW_ID;
- } else {
- /*
- * When using GuC in proxy submission, GuC consumes the
- * highest bit in the context id to indicate proxy submission.
- */
- if (USES_GUC_SUBMISSION(dev_priv))
- max = MAX_GUC_CONTEXT_HW_ID;
- else
- max = MAX_CONTEXT_HW_ID;
- }
-
-
- ret = ida_simple_get(&dev_priv->contexts.hw_ida,
- 0, max, GFP_KERNEL);
- if (ret < 0) {
- /* Contexts are only released when no longer active.
- * Flush any pending retires to hopefully release some
- * stale contexts and try again.
- */
- i915_retire_requests(dev_priv);
- ret = ida_simple_get(&dev_priv->contexts.hw_ida,
- 0, max, GFP_KERNEL);
- if (ret < 0)
- return ret;
- }
-
- *out = ret;
- return 0;
-}
-
static u32 default_desc_template(const struct drm_i915_private *i915,
const struct i915_hw_ppgtt *ppgtt)
{
@@ -276,12 +334,6 @@ __create_hw_context(struct drm_i915_private *dev_priv,
if (ctx == NULL)
return ERR_PTR(-ENOMEM);
- ret = assign_hw_id(dev_priv, &ctx->hw_id);
- if (ret) {
- kfree(ctx);
- return ERR_PTR(ret);
- }
-
kref_init(&ctx->ref);
list_add_tail(&ctx->link, &dev_priv->contexts.list);
ctx->i915 = dev_priv;
@@ -295,6 +347,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
INIT_LIST_HEAD(&ctx->handles_list);
+ INIT_LIST_HEAD(&ctx->hw_id_link);
/* Default context will never have a file_priv */
ret = DEFAULT_CONTEXT_HANDLE;
@@ -329,16 +382,6 @@ __create_hw_context(struct drm_i915_private *dev_priv,
ctx->desc_template =
default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt);
- /*
- * GuC requires the ring to be placed in Non-WOPCM memory. If GuC is not
- * present or not in use we still need a small bias as ring wraparound
- * at offset 0 sometimes hangs. No idea why.
- */
- if (USES_GUC(dev_priv))
- ctx->ggtt_offset_bias = dev_priv->guc.ggtt_pin_bias;
- else
- ctx->ggtt_offset_bias = I915_GTT_PAGE_SIZE;
-
return ctx;
err_pid:
@@ -431,15 +474,35 @@ out:
return ctx;
}
+static void
+destroy_kernel_context(struct i915_gem_context **ctxp)
+{
+ struct i915_gem_context *ctx;
+
+ /* Keep the context ref so that we can free it immediately ourselves */
+ ctx = i915_gem_context_get(fetch_and_zero(ctxp));
+ GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
+
+ context_close(ctx);
+ i915_gem_context_free(ctx);
+}
+
struct i915_gem_context *
i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
{
struct i915_gem_context *ctx;
+ int err;
ctx = i915_gem_create_context(i915, NULL);
if (IS_ERR(ctx))
return ctx;
+ err = i915_gem_context_pin_hw_id(ctx);
+ if (err) {
+ destroy_kernel_context(&ctx);
+ return ERR_PTR(err);
+ }
+
i915_gem_context_clear_bannable(ctx);
ctx->sched.priority = prio;
ctx->ring_size = PAGE_SIZE;
@@ -449,17 +512,19 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
return ctx;
}
-static void
-destroy_kernel_context(struct i915_gem_context **ctxp)
+static void init_contexts(struct drm_i915_private *i915)
{
- struct i915_gem_context *ctx;
+ mutex_init(&i915->contexts.mutex);
+ INIT_LIST_HEAD(&i915->contexts.list);
- /* Keep the context ref so that we can free it immediately ourselves */
- ctx = i915_gem_context_get(fetch_and_zero(ctxp));
- GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
+ /* Using the simple ida interface, the max is limited by sizeof(int) */
+ BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
+ BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > INT_MAX);
+ ida_init(&i915->contexts.hw_ida);
+ INIT_LIST_HEAD(&i915->contexts.hw_id_list);
- context_close(ctx);
- i915_gem_context_free(ctx);
+ INIT_WORK(&i915->contexts.free_work, contexts_free_worker);
+ init_llist_head(&i915->contexts.free_list);
}
static bool needs_preempt_context(struct drm_i915_private *i915)
@@ -480,14 +545,7 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
if (ret)
return ret;
- INIT_LIST_HEAD(&dev_priv->contexts.list);
- INIT_WORK(&dev_priv->contexts.free_work, contexts_free_worker);
- init_llist_head(&dev_priv->contexts.free_list);
-
- /* Using the simple ida interface, the max is limited by sizeof(int) */
- BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
- BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > INT_MAX);
- ida_init(&dev_priv->contexts.hw_ida);
+ init_contexts(dev_priv);
/* lowest priority; idle task */
ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN);
@@ -497,9 +555,13 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
}
/*
* For easy recognisablity, we want the kernel context to be 0 and then
- * all user contexts will have non-zero hw_id.
+ * all user contexts will have non-zero hw_id. Kernel contexts are
+ * permanently pinned, so that we never suffer a stall and can
+ * use them from any allocation context (e.g. for evicting other
+ * contexts and from inside the shrinker).
*/
GEM_BUG_ON(ctx->hw_id);
+ GEM_BUG_ON(!atomic_read(&ctx->hw_id_pin_count));
dev_priv->kernel_context = ctx;
/* highest priority; preempting task */
@@ -537,6 +599,7 @@ void i915_gem_contexts_fini(struct drm_i915_private *i915)
destroy_kernel_context(&i915->kernel_context);
/* Must free all deferred contexts (via flush_workqueue) first */
+ GEM_BUG_ON(!list_empty(&i915->contexts.hw_id_list));
ida_destroy(&i915->contexts.hw_ida);
}
@@ -942,6 +1005,33 @@ out:
return ret;
}
+int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
+{
+ struct drm_i915_private *i915 = ctx->i915;
+ int err = 0;
+
+ mutex_lock(&i915->contexts.mutex);
+
+ GEM_BUG_ON(i915_gem_context_is_closed(ctx));
+
+ if (list_empty(&ctx->hw_id_link)) {
+ GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count));
+
+ err = assign_hw_id(i915, &ctx->hw_id);
+ if (err)
+ goto out_unlock;
+
+ list_add_tail(&ctx->hw_id_link, &i915->contexts.hw_id_list);
+ }
+
+ GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == ~0u);
+ atomic_inc(&ctx->hw_id_pin_count);
+
+out_unlock:
+ mutex_unlock(&i915->contexts.mutex);
+ return err;
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_context.c"
#include "selftests/i915_gem_context.c"
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index b116e4942c10..e09673ca731d 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -134,8 +134,16 @@ struct i915_gem_context {
* functions like fault reporting, PASID, scheduling. The
* &drm_i915_private.context_hw_ida is used to assign a unqiue
* id for the lifetime of the context.
+ *
+ * @hw_id_pin_count: - number of times this context had been pinned
+ * for use (should be, at most, once per engine).
+ *
+ * @hw_id_link: - all contexts with an assigned id are tracked
+ * for possible repossession.
*/
unsigned int hw_id;
+ atomic_t hw_id_pin_count;
+ struct list_head hw_id_link;
/**
* @user_handle: userspace identifier
@@ -147,9 +155,6 @@ struct i915_gem_context {
struct i915_sched_attr sched;
- /** ggtt_offset_bias: placement restriction for context objects */
- u32 ggtt_offset_bias;
-
/** engine: per-engine logical HW state */
struct intel_context {
struct i915_gem_context *gem_context;
@@ -257,6 +262,21 @@ static inline void i915_gem_context_set_force_single_submission(struct i915_gem_
__set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
}
+int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx);
+static inline int i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
+{
+ if (atomic_inc_not_zero(&ctx->hw_id_pin_count))
+ return 0;
+
+ return __i915_gem_context_pin_hw_id(ctx);
+}
+
+static inline void i915_gem_context_unpin_hw_id(struct i915_gem_context *ctx)
+{
+ GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == 0u);
+ atomic_dec(&ctx->hw_id_pin_count);
+}
+
static inline bool i915_gem_context_is_default(const struct i915_gem_context *c)
{
return c->user_handle == DEFAULT_CONTEXT_HANDLE;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 3f0c612d42e7..22b4cb775576 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -64,7 +64,9 @@ enum {
#define BATCH_OFFSET_BIAS (256*1024)
#define __I915_EXEC_ILLEGAL_FLAGS \
- (__I915_EXEC_UNKNOWN_FLAGS | I915_EXEC_CONSTANTS_MASK)
+ (__I915_EXEC_UNKNOWN_FLAGS | \
+ I915_EXEC_CONSTANTS_MASK | \
+ I915_EXEC_RESOURCE_STREAMER)
/* Catch emission of unexpected errors for CI! */
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
@@ -733,7 +735,12 @@ static int eb_select_context(struct i915_execbuffer *eb)
return -ENOENT;
eb->ctx = ctx;
- eb->vm = ctx->ppgtt ? &ctx->ppgtt->vm : &eb->i915->ggtt.vm;
+ if (ctx->ppgtt) {
+ eb->vm = &ctx->ppgtt->vm;
+ eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
+ } else {
+ eb->vm = &eb->i915->ggtt.vm;
+ }
eb->context_flags = 0;
if (ctx->flags & CONTEXT_NO_ZEROMAP)
@@ -1120,6 +1127,13 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
u32 *cmd;
int err;
+ if (DBG_FORCE_RELOC == FORCE_GPU_RELOC) {
+ obj = vma->obj;
+ if (obj->cache_dirty & ~obj->cache_coherent)
+ i915_gem_clflush_object(obj, 0);
+ obj->write_domain = 0;
+ }
+
GEM_BUG_ON(vma->obj->write_domain & I915_GEM_DOMAIN_CPU);
obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, PAGE_SIZE);
@@ -1484,8 +1498,10 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
* can read from this userspace address.
*/
offset = gen8_canonical_addr(offset & ~UPDATE);
- __put_user(offset,
- &urelocs[r-stack].presumed_offset);
+ if (unlikely(__put_user(offset, &urelocs[r-stack].presumed_offset))) {
+ remain = -EFAULT;
+ goto out;
+ }
}
} while (r++, --count);
urelocs += ARRAY_SIZE(stack);
@@ -1570,7 +1586,6 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb)
relocs = kvmalloc_array(size, 1, GFP_KERNEL);
if (!relocs) {
- kvfree(relocs);
err = -ENOMEM;
goto err;
}
@@ -1584,6 +1599,7 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb)
if (__copy_from_user((char *)relocs + copied,
(char __user *)urelocs + copied,
len)) {
+end_user:
kvfree(relocs);
err = -EFAULT;
goto err;
@@ -1607,7 +1623,6 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb)
unsafe_put_user(-1,
&urelocs[copied].presumed_offset,
end_user);
-end_user:
user_access_end();
eb->exec[i].relocs_ptr = (uintptr_t)relocs;
@@ -2166,7 +2181,7 @@ signal_fence_array(struct i915_execbuffer *eb,
if (!(flags & I915_EXEC_FENCE_SIGNAL))
continue;
- drm_syncobj_replace_fence(syncobj, fence);
+ drm_syncobj_replace_fence(syncobj, 0, fence);
}
}
@@ -2199,8 +2214,6 @@ i915_gem_do_execbuffer(struct drm_device *dev,
eb.flags = (unsigned int *)(eb.vma + args->buffer_count + 1);
eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
- if (USES_FULL_PPGTT(eb.i915))
- eb.invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
reloc_cache_init(&eb.reloc_cache, eb.i915);
eb.buffer_count = args->buffer_count;
@@ -2221,20 +2234,6 @@ i915_gem_do_execbuffer(struct drm_device *dev,
if (!eb.engine)
return -EINVAL;
- if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
- if (!HAS_RESOURCE_STREAMER(eb.i915)) {
- DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
- return -EINVAL;
- }
- if (eb.engine->id != RCS) {
- DRM_DEBUG("RS is not available on %s\n",
- eb.engine->name);
- return -EINVAL;
- }
-
- eb.batch_flags |= I915_DISPATCH_RS;
- }
-
if (args->flags & I915_EXEC_FENCE_IN) {
in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
if (!in_fence)
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index f00c7fbef79e..eb0e446d6482 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -173,19 +173,11 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
return 0;
}
- /* Early VLV doesn't have this */
- if (IS_VALLEYVIEW(dev_priv) && dev_priv->drm.pdev->revision < 0xb) {
- DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
- return 0;
- }
-
- if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
- if (has_full_48bit_ppgtt)
- return 3;
+ if (has_full_48bit_ppgtt)
+ return 3;
- if (has_full_ppgtt)
- return 2;
- }
+ if (has_full_ppgtt)
+ return 2;
return 1;
}
@@ -1259,9 +1251,6 @@ static void gen8_free_page_tables(struct i915_address_space *vm,
{
int i;
- if (!px_page(pd))
- return;
-
for (i = 0; i < I915_PDES; i++) {
if (pd->page_table[i] != vm->scratch_pt)
free_pt(vm, pd->page_table[i]);
@@ -2348,7 +2337,7 @@ static bool needs_idle_maps(struct drm_i915_private *dev_priv)
return IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_vtd_active();
}
-static void gen6_check_and_clear_faults(struct drm_i915_private *dev_priv)
+static void gen6_check_faults(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -2366,15 +2355,11 @@ static void gen6_check_and_clear_faults(struct drm_i915_private *dev_priv)
fault & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
RING_FAULT_SRCID(fault),
RING_FAULT_FAULT_TYPE(fault));
- I915_WRITE(RING_FAULT_REG(engine),
- fault & ~RING_FAULT_VALID);
}
}
-
- POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
}
-static void gen8_check_and_clear_faults(struct drm_i915_private *dev_priv)
+static void gen8_check_faults(struct drm_i915_private *dev_priv)
{
u32 fault = I915_READ(GEN8_RING_FAULT_REG);
@@ -2399,22 +2384,20 @@ static void gen8_check_and_clear_faults(struct drm_i915_private *dev_priv)
GEN8_RING_FAULT_ENGINE_ID(fault),
RING_FAULT_SRCID(fault),
RING_FAULT_FAULT_TYPE(fault));
- I915_WRITE(GEN8_RING_FAULT_REG,
- fault & ~RING_FAULT_VALID);
}
-
- POSTING_READ(GEN8_RING_FAULT_REG);
}
void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
{
/* From GEN8 onwards we only have one 'All Engine Fault Register' */
if (INTEL_GEN(dev_priv) >= 8)
- gen8_check_and_clear_faults(dev_priv);
+ gen8_check_faults(dev_priv);
else if (INTEL_GEN(dev_priv) >= 6)
- gen6_check_and_clear_faults(dev_priv);
+ gen6_check_faults(dev_priv);
else
return;
+
+ i915_clear_error_registers(dev_priv);
}
void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
@@ -2937,6 +2920,15 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
struct drm_mm_node *entry;
int ret;
+ /*
+ * GuC requires all resources that we're sharing with it to be placed in
+ * non-WOPCM memory. If GuC is not present or not in use we still need a
+ * small bias as ring wraparound at offset 0 sometimes hangs. No idea
+ * why.
+ */
+ ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE,
+ intel_guc_reserved_gtt_size(&dev_priv->guc));
+
ret = intel_vgt_balloon(dev_priv);
if (ret)
return ret;
@@ -3612,6 +3604,8 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
mutex_lock(&dev_priv->drm.struct_mutex);
i915_address_space_init(&ggtt->vm, dev_priv);
+ ggtt->vm.is_ggtt = true;
+
/* Only VLV supports read-only GGTT mappings */
ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv);
@@ -3662,6 +3656,10 @@ void i915_ggtt_enable_guc(struct drm_i915_private *i915)
void i915_ggtt_disable_guc(struct drm_i915_private *i915)
{
+ /* XXX Temporary pardon for error unload */
+ if (i915->ggtt.invalidate == gen6_ggtt_invalidate)
+ return;
+
/* We should only be called after i915_ggtt_enable_guc() */
GEM_BUG_ON(i915->ggtt.invalidate != guc_ggtt_invalidate);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 2a116a91420b..7e2af5f4f39b 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -167,29 +167,22 @@ struct intel_rotation_info {
} plane[2];
} __packed;
-static inline void assert_intel_rotation_info_is_packed(void)
-{
- BUILD_BUG_ON(sizeof(struct intel_rotation_info) != 8*sizeof(unsigned int));
-}
-
struct intel_partial_info {
u64 offset;
unsigned int size;
} __packed;
-static inline void assert_intel_partial_info_is_packed(void)
-{
- BUILD_BUG_ON(sizeof(struct intel_partial_info) != sizeof(u64) + sizeof(unsigned int));
-}
-
enum i915_ggtt_view_type {
I915_GGTT_VIEW_NORMAL = 0,
I915_GGTT_VIEW_ROTATED = sizeof(struct intel_rotation_info),
I915_GGTT_VIEW_PARTIAL = sizeof(struct intel_partial_info),
};
-static inline void assert_i915_ggtt_view_type_is_unique(void)
+static inline void assert_i915_gem_gtt_types(void)
{
+ BUILD_BUG_ON(sizeof(struct intel_rotation_info) != 8*sizeof(unsigned int));
+ BUILD_BUG_ON(sizeof(struct intel_partial_info) != sizeof(u64) + sizeof(unsigned int));
+
/* As we encode the size of each branch inside the union into its type,
* we have to be careful that each branch has a unique size.
*/
@@ -229,7 +222,6 @@ struct i915_page_dma {
};
#define px_base(px) (&(px)->base)
-#define px_page(px) (px_base(px)->page)
#define px_dma(px) (px_base(px)->daddr)
struct i915_page_table {
@@ -332,6 +324,9 @@ struct i915_address_space {
struct pagestash free_pages;
+ /* Global GTT */
+ bool is_ggtt:1;
+
/* Some systems require uncached updates of the page directories */
bool pt_kmap_wc:1;
@@ -365,7 +360,7 @@ struct i915_address_space {
I915_SELFTEST_DECLARE(bool scrub_64K);
};
-#define i915_is_ggtt(V) (!(V)->file)
+#define i915_is_ggtt(vm) ((vm)->is_ggtt)
static inline bool
i915_vm_is_48bit(const struct i915_address_space *vm)
@@ -401,6 +396,8 @@ struct i915_ggtt {
int mtrr;
+ u32 pin_bias;
+
struct drm_mm_node error_capture;
};
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
index 83e5e01fa9ea..a6dd7c46de0d 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -421,19 +421,19 @@ i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
}
static inline unsigned int
-i915_gem_object_get_tiling(struct drm_i915_gem_object *obj)
+i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj)
{
return obj->tiling_and_stride & TILING_MASK;
}
static inline bool
-i915_gem_object_is_tiled(struct drm_i915_gem_object *obj)
+i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj)
{
return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
}
static inline unsigned int
-i915_gem_object_get_stride(struct drm_i915_gem_object *obj)
+i915_gem_object_get_stride(const struct drm_i915_gem_object *obj)
{
return obj->tiling_and_stride & STRIDE_MASK;
}
@@ -446,13 +446,13 @@ i915_gem_tile_height(unsigned int tiling)
}
static inline unsigned int
-i915_gem_object_get_tile_height(struct drm_i915_gem_object *obj)
+i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj)
{
return i915_gem_tile_height(i915_gem_object_get_tiling(obj));
}
static inline unsigned int
-i915_gem_object_get_tile_row_size(struct drm_i915_gem_object *obj)
+i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj)
{
return (i915_gem_object_get_stride(obj) *
i915_gem_object_get_tile_height(obj));
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 90628a47ae17..10f28a2ee2e6 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -478,7 +478,7 @@ void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv)
void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
{
spin_lock_irq(&dev_priv->irq_lock);
- gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events);
+ gen6_reset_pm_iir(dev_priv, GEN6_PM_RPS_EVENTS);
dev_priv->gt_pm.rps.pm_iir = 0;
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -516,7 +516,7 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
- gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ gen6_disable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
spin_unlock_irq(&dev_priv->irq_lock);
synchronize_irq(dev_priv->drm.irq);
@@ -1534,11 +1534,8 @@ static void gen8_gt_irq_ack(struct drm_i915_private *i915,
if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2));
- if (likely(gt_iir[2] & (i915->pm_rps_events |
- i915->pm_guc_events)))
- raw_reg_write(regs, GEN8_GT_IIR(2),
- gt_iir[2] & (i915->pm_rps_events |
- i915->pm_guc_events));
+ if (likely(gt_iir[2]))
+ raw_reg_write(regs, GEN8_GT_IIR(2), gt_iir[2]);
}
if (master_ctl & GEN8_GT_VECS_IRQ) {
@@ -3218,7 +3215,7 @@ static void i915_reset_device(struct drm_i915_private *dev_priv,
kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
}
-static void i915_clear_error_registers(struct drm_i915_private *dev_priv)
+void i915_clear_error_registers(struct drm_i915_private *dev_priv)
{
u32 eir;
@@ -3241,6 +3238,22 @@ static void i915_clear_error_registers(struct drm_i915_private *dev_priv)
I915_WRITE(EMR, I915_READ(EMR) | eir);
I915_WRITE(IIR, I915_MASTER_ERROR_INTERRUPT);
}
+
+ if (INTEL_GEN(dev_priv) >= 8) {
+ I915_WRITE(GEN8_RING_FAULT_REG,
+ I915_READ(GEN8_RING_FAULT_REG) & ~RING_FAULT_VALID);
+ POSTING_READ(GEN8_RING_FAULT_REG);
+ } else if (INTEL_GEN(dev_priv) >= 6) {
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, dev_priv, id) {
+ I915_WRITE(RING_FAULT_REG(engine),
+ I915_READ(RING_FAULT_REG(engine)) &
+ ~RING_FAULT_VALID);
+ }
+ POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
+ }
}
/**
@@ -3296,7 +3309,8 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
* Try engine reset when available. We fall back to full reset if
* single reset fails.
*/
- if (intel_has_reset_engine(dev_priv)) {
+ if (intel_has_reset_engine(dev_priv) &&
+ !i915_terminally_wedged(&dev_priv->gpu_error)) {
for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
@@ -4781,7 +4795,9 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
/* WaGsvRC0ResidencyMethod:vlv */
dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
else
- dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
+ dev_priv->pm_rps_events = (GEN6_PM_RP_UP_THRESHOLD |
+ GEN6_PM_RP_DOWN_THRESHOLD |
+ GEN6_PM_RP_DOWN_TIMEOUT);
rps->pm_intrmsk_mbz = 0;
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 6a4d1388ad2d..d6f7b9fe1d26 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -74,6 +74,7 @@
.unfenced_needs_alignment = 1, \
.ring_mask = RENDER_RING, \
.has_snoop = true, \
+ .has_coherent_ggtt = false, \
GEN_DEFAULT_PIPEOFFSETS, \
GEN_DEFAULT_PAGE_SIZES, \
CURSOR_OFFSETS
@@ -110,6 +111,7 @@ static const struct intel_device_info intel_i865g_info = {
.has_gmch_display = 1, \
.ring_mask = RENDER_RING, \
.has_snoop = true, \
+ .has_coherent_ggtt = true, \
GEN_DEFAULT_PIPEOFFSETS, \
GEN_DEFAULT_PAGE_SIZES, \
CURSOR_OFFSETS
@@ -117,6 +119,7 @@ static const struct intel_device_info intel_i865g_info = {
static const struct intel_device_info intel_i915g_info = {
GEN3_FEATURES,
PLATFORM(INTEL_I915G),
+ .has_coherent_ggtt = false,
.cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.hws_needs_physical = 1,
@@ -178,6 +181,7 @@ static const struct intel_device_info intel_pineview_info = {
.has_gmch_display = 1, \
.ring_mask = RENDER_RING, \
.has_snoop = true, \
+ .has_coherent_ggtt = true, \
GEN_DEFAULT_PIPEOFFSETS, \
GEN_DEFAULT_PAGE_SIZES, \
CURSOR_OFFSETS
@@ -220,6 +224,7 @@ static const struct intel_device_info intel_gm45_info = {
.has_hotplug = 1, \
.ring_mask = RENDER_RING | BSD_RING, \
.has_snoop = true, \
+ .has_coherent_ggtt = true, \
/* ilk does support rc6, but we do not implement [power] contexts */ \
.has_rc6 = 0, \
GEN_DEFAULT_PIPEOFFSETS, \
@@ -243,6 +248,7 @@ static const struct intel_device_info intel_ironlake_m_info = {
.has_hotplug = 1, \
.has_fbc = 1, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
+ .has_coherent_ggtt = true, \
.has_llc = 1, \
.has_rc6 = 1, \
.has_rc6p = 1, \
@@ -287,6 +293,7 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = {
.has_hotplug = 1, \
.has_fbc = 1, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
+ .has_coherent_ggtt = true, \
.has_llc = 1, \
.has_rc6 = 1, \
.has_rc6p = 1, \
@@ -347,6 +354,7 @@ static const struct intel_device_info intel_valleyview_info = {
.has_aliasing_ppgtt = 1,
.has_full_ppgtt = 1,
.has_snoop = true,
+ .has_coherent_ggtt = false,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
.display_mmio_offset = VLV_DISPLAY_BASE,
GEN_DEFAULT_PAGE_SIZES,
@@ -360,7 +368,6 @@ static const struct intel_device_info intel_valleyview_info = {
.has_ddi = 1, \
.has_fpga_dbg = 1, \
.has_psr = 1, \
- .has_resource_streamer = 1, \
.has_dp_mst = 1, \
.has_rc6p = 0 /* RC6p removed-by HSW */, \
.has_runtime_pm = 1
@@ -433,7 +440,6 @@ static const struct intel_device_info intel_cherryview_info = {
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.has_64bit_reloc = 1,
.has_runtime_pm = 1,
- .has_resource_streamer = 1,
.has_rc6 = 1,
.has_logical_ring_contexts = 1,
.has_gmch_display = 1,
@@ -441,6 +447,7 @@ static const struct intel_device_info intel_cherryview_info = {
.has_full_ppgtt = 1,
.has_reset_engine = 1,
.has_snoop = true,
+ .has_coherent_ggtt = false,
.display_mmio_offset = VLV_DISPLAY_BASE,
GEN_DEFAULT_PAGE_SIZES,
GEN_CHV_PIPEOFFSETS,
@@ -506,7 +513,6 @@ static const struct intel_device_info intel_skylake_gt4_info = {
.has_runtime_pm = 1, \
.has_pooled_eu = 0, \
.has_csr = 1, \
- .has_resource_streamer = 1, \
.has_rc6 = 1, \
.has_dp_mst = 1, \
.has_logical_ring_contexts = 1, \
@@ -517,6 +523,7 @@ static const struct intel_device_info intel_skylake_gt4_info = {
.has_full_48bit_ppgtt = 1, \
.has_reset_engine = 1, \
.has_snoop = true, \
+ .has_coherent_ggtt = false, \
.has_ipc = 1, \
GEN9_DEFAULT_PAGE_SIZES, \
GEN_DEFAULT_PIPEOFFSETS, \
@@ -580,6 +587,7 @@ static const struct intel_device_info intel_coffeelake_gt3_info = {
GEN9_FEATURES, \
GEN(10), \
.ddb_size = 1024, \
+ .has_coherent_ggtt = false, \
GLK_COLORS
static const struct intel_device_info intel_cannonlake_info = {
@@ -592,14 +600,12 @@ static const struct intel_device_info intel_cannonlake_info = {
GEN10_FEATURES, \
GEN(11), \
.ddb_size = 2048, \
- .has_csr = 0, \
.has_logical_ring_elsq = 1
static const struct intel_device_info intel_icelake_11_info = {
GEN11_FEATURES,
PLATFORM(INTEL_ICELAKE),
.is_alpha_support = 1,
- .has_resource_streamer = 0,
.ring_mask = RENDER_RING | BLT_RING | VEBOX_RING | BSD_RING | BSD3_RING,
};
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 6bf10952c724..ccb20230df2c 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -210,6 +210,7 @@
#include "i915_oa_cflgt3.h"
#include "i915_oa_cnl.h"
#include "i915_oa_icl.h"
+#include "intel_lrc_reg.h"
/* HW requires this to be a power of two, between 128k and 16M, though driver
* is currently generally designed assuming the largest 16M size is used such
@@ -1338,14 +1339,12 @@ free_oa_buffer(struct drm_i915_private *i915)
{
mutex_lock(&i915->drm.struct_mutex);
- i915_gem_object_unpin_map(i915->perf.oa.oa_buffer.vma->obj);
- i915_vma_unpin(i915->perf.oa.oa_buffer.vma);
- i915_gem_object_put(i915->perf.oa.oa_buffer.vma->obj);
-
- i915->perf.oa.oa_buffer.vma = NULL;
- i915->perf.oa.oa_buffer.vaddr = NULL;
+ i915_vma_unpin_and_release(&i915->perf.oa.oa_buffer.vma,
+ I915_VMA_RELEASE_MAP);
mutex_unlock(&i915->drm.struct_mutex);
+
+ i915->perf.oa.oa_buffer.vaddr = NULL;
}
static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
@@ -1638,27 +1637,25 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset;
u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset;
/* The MMIO offsets for Flex EU registers aren't contiguous */
- u32 flex_mmio[] = {
- i915_mmio_reg_offset(EU_PERF_CNTL0),
- i915_mmio_reg_offset(EU_PERF_CNTL1),
- i915_mmio_reg_offset(EU_PERF_CNTL2),
- i915_mmio_reg_offset(EU_PERF_CNTL3),
- i915_mmio_reg_offset(EU_PERF_CNTL4),
- i915_mmio_reg_offset(EU_PERF_CNTL5),
- i915_mmio_reg_offset(EU_PERF_CNTL6),
+ i915_reg_t flex_regs[] = {
+ EU_PERF_CNTL0,
+ EU_PERF_CNTL1,
+ EU_PERF_CNTL2,
+ EU_PERF_CNTL3,
+ EU_PERF_CNTL4,
+ EU_PERF_CNTL5,
+ EU_PERF_CNTL6,
};
int i;
- reg_state[ctx_oactxctrl] = i915_mmio_reg_offset(GEN8_OACTXCONTROL);
- reg_state[ctx_oactxctrl+1] = (dev_priv->perf.oa.period_exponent <<
- GEN8_OA_TIMER_PERIOD_SHIFT) |
- (dev_priv->perf.oa.periodic ?
- GEN8_OA_TIMER_ENABLE : 0) |
- GEN8_OA_COUNTER_RESUME;
+ CTX_REG(reg_state, ctx_oactxctrl, GEN8_OACTXCONTROL,
+ (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
+ (dev_priv->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) |
+ GEN8_OA_COUNTER_RESUME);
- for (i = 0; i < ARRAY_SIZE(flex_mmio); i++) {
+ for (i = 0; i < ARRAY_SIZE(flex_regs); i++) {
u32 state_offset = ctx_flexeu0 + i * 2;
- u32 mmio = flex_mmio[i];
+ u32 mmio = i915_mmio_reg_offset(flex_regs[i]);
/*
* This arbitrary default will select the 'EU FPU0 Pipeline
@@ -1678,8 +1675,7 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
}
}
- reg_state[state_offset] = mmio;
- reg_state[state_offset+1] = value;
+ CTX_REG(reg_state, state_offset, flex_regs[i], value);
}
}
@@ -1821,7 +1817,7 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
/* Switch away from any user context. */
ret = gen8_switch_to_updated_kernel_context(dev_priv, oa_config);
if (ret)
- goto out;
+ return ret;
/*
* The OA register config is setup through the context image. This image
@@ -1840,7 +1836,7 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
wait_flags,
MAX_SCHEDULE_TIMEOUT);
if (ret)
- goto out;
+ return ret;
/* Update all contexts now that we've stalled the submission. */
list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
@@ -1852,10 +1848,8 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
continue;
regs = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
- if (IS_ERR(regs)) {
- ret = PTR_ERR(regs);
- goto out;
- }
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
ce->state->obj->mm.dirty = true;
regs += LRC_STATE_PN * PAGE_SIZE / sizeof(*regs);
@@ -1865,7 +1859,6 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
i915_gem_object_unpin_map(ce->state->obj);
}
- out:
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 08ec7446282e..09bc8e730ee1 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -344,6 +344,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN8_RPCS_S_CNT_ENABLE (1 << 18)
#define GEN8_RPCS_S_CNT_SHIFT 15
#define GEN8_RPCS_S_CNT_MASK (0x7 << GEN8_RPCS_S_CNT_SHIFT)
+#define GEN11_RPCS_S_CNT_SHIFT 12
+#define GEN11_RPCS_S_CNT_MASK (0x3f << GEN11_RPCS_S_CNT_SHIFT)
#define GEN8_RPCS_SS_CNT_ENABLE (1 << 11)
#define GEN8_RPCS_SS_CNT_SHIFT 8
#define GEN8_RPCS_SS_CNT_MASK (0x7 << GEN8_RPCS_SS_CNT_SHIFT)
@@ -1029,126 +1031,43 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
/*
* i915_power_well_id:
*
- * Platform specific IDs used to look up power wells and - except for custom
- * power wells - to define request/status register flag bit positions. As such
- * the set of IDs on a given platform must be unique and except for custom
- * power wells their value must stay fixed.
+ * IDs used to look up power wells. Power wells accessed directly bypassing
+ * the power domains framework must be assigned a unique ID. The rest of power
+ * wells must be assigned DISP_PW_ID_NONE.
*/
enum i915_power_well_id {
- /*
- * I830
- * - custom power well
- */
- I830_DISP_PW_PIPES = 0,
-
- /*
- * VLV/CHV
- * - PUNIT_REG_PWRGT_CTRL (bit: id*2),
- * PUNIT_REG_PWRGT_STATUS (bit: id*2) (PUNIT HAS v0.8)
- */
- PUNIT_POWER_WELL_RENDER = 0,
- PUNIT_POWER_WELL_MEDIA = 1,
- PUNIT_POWER_WELL_DISP2D = 3,
- PUNIT_POWER_WELL_DPIO_CMN_BC = 5,
- PUNIT_POWER_WELL_DPIO_TX_B_LANES_01 = 6,
- PUNIT_POWER_WELL_DPIO_TX_B_LANES_23 = 7,
- PUNIT_POWER_WELL_DPIO_TX_C_LANES_01 = 8,
- PUNIT_POWER_WELL_DPIO_TX_C_LANES_23 = 9,
- PUNIT_POWER_WELL_DPIO_RX0 = 10,
- PUNIT_POWER_WELL_DPIO_RX1 = 11,
- PUNIT_POWER_WELL_DPIO_CMN_D = 12,
- /* - custom power well */
- CHV_DISP_PW_PIPE_A, /* 13 */
-
- /*
- * HSW/BDW
- * - _HSW_PWR_WELL_CTL1-4 (status bit: id*2, req bit: id*2+1)
- */
- HSW_DISP_PW_GLOBAL = 15,
-
- /*
- * GEN9+
- * - _HSW_PWR_WELL_CTL1-4 (status bit: id*2, req bit: id*2+1)
- */
- SKL_DISP_PW_MISC_IO = 0,
- SKL_DISP_PW_DDI_A_E,
- GLK_DISP_PW_DDI_A = SKL_DISP_PW_DDI_A_E,
- CNL_DISP_PW_DDI_A = SKL_DISP_PW_DDI_A_E,
- SKL_DISP_PW_DDI_B,
- SKL_DISP_PW_DDI_C,
- SKL_DISP_PW_DDI_D,
- CNL_DISP_PW_DDI_F = 6,
-
- GLK_DISP_PW_AUX_A = 8,
- GLK_DISP_PW_AUX_B,
- GLK_DISP_PW_AUX_C,
- CNL_DISP_PW_AUX_A = GLK_DISP_PW_AUX_A,
- CNL_DISP_PW_AUX_B = GLK_DISP_PW_AUX_B,
- CNL_DISP_PW_AUX_C = GLK_DISP_PW_AUX_C,
- CNL_DISP_PW_AUX_D,
- CNL_DISP_PW_AUX_F,
-
- SKL_DISP_PW_1 = 14,
+ DISP_PW_ID_NONE,
+
+ VLV_DISP_PW_DISP2D,
+ BXT_DISP_PW_DPIO_CMN_A,
+ VLV_DISP_PW_DPIO_CMN_BC,
+ GLK_DISP_PW_DPIO_CMN_C,
+ CHV_DISP_PW_DPIO_CMN_D,
+ HSW_DISP_PW_GLOBAL,
+ SKL_DISP_PW_MISC_IO,
+ SKL_DISP_PW_1,
SKL_DISP_PW_2,
-
- /* - custom power wells */
- BXT_DPIO_CMN_A,
- BXT_DPIO_CMN_BC,
- GLK_DPIO_CMN_C, /* 18 */
-
- /*
- * GEN11+
- * - _HSW_PWR_WELL_CTL1-4
- * (status bit: (id&15)*2, req bit:(id&15)*2+1)
- */
- ICL_DISP_PW_1 = 0,
- ICL_DISP_PW_2,
- ICL_DISP_PW_3,
- ICL_DISP_PW_4,
-
- /*
- * - _HSW_PWR_WELL_CTL_AUX1/2/4
- * (status bit: (id&15)*2, req bit:(id&15)*2+1)
- */
- ICL_DISP_PW_AUX_A = 16,
- ICL_DISP_PW_AUX_B,
- ICL_DISP_PW_AUX_C,
- ICL_DISP_PW_AUX_D,
- ICL_DISP_PW_AUX_E,
- ICL_DISP_PW_AUX_F,
-
- ICL_DISP_PW_AUX_TBT1 = 24,
- ICL_DISP_PW_AUX_TBT2,
- ICL_DISP_PW_AUX_TBT3,
- ICL_DISP_PW_AUX_TBT4,
-
- /*
- * - _HSW_PWR_WELL_CTL_DDI1/2/4
- * (status bit: (id&15)*2, req bit:(id&15)*2+1)
- */
- ICL_DISP_PW_DDI_A = 32,
- ICL_DISP_PW_DDI_B,
- ICL_DISP_PW_DDI_C,
- ICL_DISP_PW_DDI_D,
- ICL_DISP_PW_DDI_E,
- ICL_DISP_PW_DDI_F, /* 37 */
-
- /*
- * Multiple platforms.
- * Must start following the highest ID of any platform.
- * - custom power wells
- */
- SKL_DISP_PW_DC_OFF = 38,
- I915_DISP_PW_ALWAYS_ON,
};
#define PUNIT_REG_PWRGT_CTRL 0x60
#define PUNIT_REG_PWRGT_STATUS 0x61
-#define PUNIT_PWRGT_MASK(power_well) (3 << ((power_well) * 2))
-#define PUNIT_PWRGT_PWR_ON(power_well) (0 << ((power_well) * 2))
-#define PUNIT_PWRGT_CLK_GATE(power_well) (1 << ((power_well) * 2))
-#define PUNIT_PWRGT_RESET(power_well) (2 << ((power_well) * 2))
-#define PUNIT_PWRGT_PWR_GATE(power_well) (3 << ((power_well) * 2))
+#define PUNIT_PWRGT_MASK(pw_idx) (3 << ((pw_idx) * 2))
+#define PUNIT_PWRGT_PWR_ON(pw_idx) (0 << ((pw_idx) * 2))
+#define PUNIT_PWRGT_CLK_GATE(pw_idx) (1 << ((pw_idx) * 2))
+#define PUNIT_PWRGT_RESET(pw_idx) (2 << ((pw_idx) * 2))
+#define PUNIT_PWRGT_PWR_GATE(pw_idx) (3 << ((pw_idx) * 2))
+
+#define PUNIT_PWGT_IDX_RENDER 0
+#define PUNIT_PWGT_IDX_MEDIA 1
+#define PUNIT_PWGT_IDX_DISP2D 3
+#define PUNIT_PWGT_IDX_DPIO_CMN_BC 5
+#define PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01 6
+#define PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23 7
+#define PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01 8
+#define PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23 9
+#define PUNIT_PWGT_IDX_DPIO_RX0 10
+#define PUNIT_PWGT_IDX_DPIO_RX1 11
+#define PUNIT_PWGT_IDX_DPIO_CMN_D 12
#define PUNIT_REG_GPU_LFM 0xd3
#define PUNIT_REG_GPU_FREQ_REQ 0xd4
@@ -1932,121 +1851,200 @@ enum i915_power_well_id {
#define N_SCALAR(x) ((x) << 24)
#define N_SCALAR_MASK (0x7F << 24)
-#define _ICL_MG_PHY_PORT_LN(port, ln, ln0p1, ln0p2, ln1p1) \
+#define MG_PHY_PORT_LN(port, ln, ln0p1, ln0p2, ln1p1) \
_MMIO(_PORT((port) - PORT_C, ln0p1, ln0p2) + (ln) * ((ln1p1) - (ln0p1)))
-#define _ICL_MG_TX_LINK_PARAMS_TX1LN0_PORT1 0x16812C
-#define _ICL_MG_TX_LINK_PARAMS_TX1LN1_PORT1 0x16852C
-#define _ICL_MG_TX_LINK_PARAMS_TX1LN0_PORT2 0x16912C
-#define _ICL_MG_TX_LINK_PARAMS_TX1LN1_PORT2 0x16952C
-#define _ICL_MG_TX_LINK_PARAMS_TX1LN0_PORT3 0x16A12C
-#define _ICL_MG_TX_LINK_PARAMS_TX1LN1_PORT3 0x16A52C
-#define _ICL_MG_TX_LINK_PARAMS_TX1LN0_PORT4 0x16B12C
-#define _ICL_MG_TX_LINK_PARAMS_TX1LN1_PORT4 0x16B52C
-#define ICL_PORT_MG_TX1_LINK_PARAMS(port, ln) \
- _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_LINK_PARAMS_TX1LN0_PORT1, \
- _ICL_MG_TX_LINK_PARAMS_TX1LN0_PORT2, \
- _ICL_MG_TX_LINK_PARAMS_TX1LN1_PORT1)
-
-#define _ICL_MG_TX_LINK_PARAMS_TX2LN0_PORT1 0x1680AC
-#define _ICL_MG_TX_LINK_PARAMS_TX2LN1_PORT1 0x1684AC
-#define _ICL_MG_TX_LINK_PARAMS_TX2LN0_PORT2 0x1690AC
-#define _ICL_MG_TX_LINK_PARAMS_TX2LN1_PORT2 0x1694AC
-#define _ICL_MG_TX_LINK_PARAMS_TX2LN0_PORT3 0x16A0AC
-#define _ICL_MG_TX_LINK_PARAMS_TX2LN1_PORT3 0x16A4AC
-#define _ICL_MG_TX_LINK_PARAMS_TX2LN0_PORT4 0x16B0AC
-#define _ICL_MG_TX_LINK_PARAMS_TX2LN1_PORT4 0x16B4AC
-#define ICL_PORT_MG_TX2_LINK_PARAMS(port, ln) \
- _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_LINK_PARAMS_TX2LN0_PORT1, \
- _ICL_MG_TX_LINK_PARAMS_TX2LN0_PORT2, \
- _ICL_MG_TX_LINK_PARAMS_TX2LN1_PORT1)
-#define CRI_USE_FS32 (1 << 5)
-
-#define _ICL_MG_TX_PISO_READLOAD_TX1LN0_PORT1 0x16814C
-#define _ICL_MG_TX_PISO_READLOAD_TX1LN1_PORT1 0x16854C
-#define _ICL_MG_TX_PISO_READLOAD_TX1LN0_PORT2 0x16914C
-#define _ICL_MG_TX_PISO_READLOAD_TX1LN1_PORT2 0x16954C
-#define _ICL_MG_TX_PISO_READLOAD_TX1LN0_PORT3 0x16A14C
-#define _ICL_MG_TX_PISO_READLOAD_TX1LN1_PORT3 0x16A54C
-#define _ICL_MG_TX_PISO_READLOAD_TX1LN0_PORT4 0x16B14C
-#define _ICL_MG_TX_PISO_READLOAD_TX1LN1_PORT4 0x16B54C
-#define ICL_PORT_MG_TX1_PISO_READLOAD(port, ln) \
- _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_PISO_READLOAD_TX1LN0_PORT1, \
- _ICL_MG_TX_PISO_READLOAD_TX1LN0_PORT2, \
- _ICL_MG_TX_PISO_READLOAD_TX1LN1_PORT1)
-
-#define _ICL_MG_TX_PISO_READLOAD_TX2LN0_PORT1 0x1680CC
-#define _ICL_MG_TX_PISO_READLOAD_TX2LN1_PORT1 0x1684CC
-#define _ICL_MG_TX_PISO_READLOAD_TX2LN0_PORT2 0x1690CC
-#define _ICL_MG_TX_PISO_READLOAD_TX2LN1_PORT2 0x1694CC
-#define _ICL_MG_TX_PISO_READLOAD_TX2LN0_PORT3 0x16A0CC
-#define _ICL_MG_TX_PISO_READLOAD_TX2LN1_PORT3 0x16A4CC
-#define _ICL_MG_TX_PISO_READLOAD_TX2LN0_PORT4 0x16B0CC
-#define _ICL_MG_TX_PISO_READLOAD_TX2LN1_PORT4 0x16B4CC
-#define ICL_PORT_MG_TX2_PISO_READLOAD(port, ln) \
- _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_PISO_READLOAD_TX2LN0_PORT1, \
- _ICL_MG_TX_PISO_READLOAD_TX2LN0_PORT2, \
- _ICL_MG_TX_PISO_READLOAD_TX2LN1_PORT1)
-#define CRI_CALCINIT (1 << 1)
-
-#define _ICL_MG_TX_SWINGCTRL_TX1LN0_PORT1 0x168148
-#define _ICL_MG_TX_SWINGCTRL_TX1LN1_PORT1 0x168548
-#define _ICL_MG_TX_SWINGCTRL_TX1LN0_PORT2 0x169148
-#define _ICL_MG_TX_SWINGCTRL_TX1LN1_PORT2 0x169548
-#define _ICL_MG_TX_SWINGCTRL_TX1LN0_PORT3 0x16A148
-#define _ICL_MG_TX_SWINGCTRL_TX1LN1_PORT3 0x16A548
-#define _ICL_MG_TX_SWINGCTRL_TX1LN0_PORT4 0x16B148
-#define _ICL_MG_TX_SWINGCTRL_TX1LN1_PORT4 0x16B548
-#define ICL_PORT_MG_TX1_SWINGCTRL(port, ln) \
- _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_SWINGCTRL_TX1LN0_PORT1, \
- _ICL_MG_TX_SWINGCTRL_TX1LN0_PORT2, \
- _ICL_MG_TX_SWINGCTRL_TX1LN1_PORT1)
-
-#define _ICL_MG_TX_SWINGCTRL_TX2LN0_PORT1 0x1680C8
-#define _ICL_MG_TX_SWINGCTRL_TX2LN1_PORT1 0x1684C8
-#define _ICL_MG_TX_SWINGCTRL_TX2LN0_PORT2 0x1690C8
-#define _ICL_MG_TX_SWINGCTRL_TX2LN1_PORT2 0x1694C8
-#define _ICL_MG_TX_SWINGCTRL_TX2LN0_PORT3 0x16A0C8
-#define _ICL_MG_TX_SWINGCTRL_TX2LN1_PORT3 0x16A4C8
-#define _ICL_MG_TX_SWINGCTRL_TX2LN0_PORT4 0x16B0C8
-#define _ICL_MG_TX_SWINGCTRL_TX2LN1_PORT4 0x16B4C8
-#define ICL_PORT_MG_TX2_SWINGCTRL(port, ln) \
- _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_SWINGCTRL_TX2LN0_PORT1, \
- _ICL_MG_TX_SWINGCTRL_TX2LN0_PORT2, \
- _ICL_MG_TX_SWINGCTRL_TX2LN1_PORT1)
-#define CRI_TXDEEMPH_OVERRIDE_17_12(x) ((x) << 0)
-#define CRI_TXDEEMPH_OVERRIDE_17_12_MASK (0x3F << 0)
-
-#define _ICL_MG_TX_DRVCTRL_TX1LN0_PORT1 0x168144
-#define _ICL_MG_TX_DRVCTRL_TX1LN1_PORT1 0x168544
-#define _ICL_MG_TX_DRVCTRL_TX1LN0_PORT2 0x169144
-#define _ICL_MG_TX_DRVCTRL_TX1LN1_PORT2 0x169544
-#define _ICL_MG_TX_DRVCTRL_TX1LN0_PORT3 0x16A144
-#define _ICL_MG_TX_DRVCTRL_TX1LN1_PORT3 0x16A544
-#define _ICL_MG_TX_DRVCTRL_TX1LN0_PORT4 0x16B144
-#define _ICL_MG_TX_DRVCTRL_TX1LN1_PORT4 0x16B544
-#define ICL_PORT_MG_TX1_DRVCTRL(port, ln) \
- _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_DRVCTRL_TX1LN0_PORT1, \
- _ICL_MG_TX_DRVCTRL_TX1LN0_PORT2, \
- _ICL_MG_TX_DRVCTRL_TX1LN1_PORT1)
-
-#define _ICL_MG_TX_DRVCTRL_TX2LN0_PORT1 0x1680C4
-#define _ICL_MG_TX_DRVCTRL_TX2LN1_PORT1 0x1684C4
-#define _ICL_MG_TX_DRVCTRL_TX2LN0_PORT2 0x1690C4
-#define _ICL_MG_TX_DRVCTRL_TX2LN1_PORT2 0x1694C4
-#define _ICL_MG_TX_DRVCTRL_TX2LN0_PORT3 0x16A0C4
-#define _ICL_MG_TX_DRVCTRL_TX2LN1_PORT3 0x16A4C4
-#define _ICL_MG_TX_DRVCTRL_TX2LN0_PORT4 0x16B0C4
-#define _ICL_MG_TX_DRVCTRL_TX2LN1_PORT4 0x16B4C4
-#define ICL_PORT_MG_TX2_DRVCTRL(port, ln) \
- _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_DRVCTRL_TX2LN0_PORT1, \
- _ICL_MG_TX_DRVCTRL_TX2LN0_PORT2, \
- _ICL_MG_TX_DRVCTRL_TX2LN1_PORT1)
-#define CRI_TXDEEMPH_OVERRIDE_11_6(x) ((x) << 24)
-#define CRI_TXDEEMPH_OVERRIDE_11_6_MASK (0x3F << 24)
-#define CRI_TXDEEMPH_OVERRIDE_EN (1 << 22)
-#define CRI_TXDEEMPH_OVERRIDE_5_0(x) ((x) << 16)
-#define CRI_TXDEEMPH_OVERRIDE_5_0_MASK (0x3F << 16)
+#define MG_TX_LINK_PARAMS_TX1LN0_PORT1 0x16812C
+#define MG_TX_LINK_PARAMS_TX1LN1_PORT1 0x16852C
+#define MG_TX_LINK_PARAMS_TX1LN0_PORT2 0x16912C
+#define MG_TX_LINK_PARAMS_TX1LN1_PORT2 0x16952C
+#define MG_TX_LINK_PARAMS_TX1LN0_PORT3 0x16A12C
+#define MG_TX_LINK_PARAMS_TX1LN1_PORT3 0x16A52C
+#define MG_TX_LINK_PARAMS_TX1LN0_PORT4 0x16B12C
+#define MG_TX_LINK_PARAMS_TX1LN1_PORT4 0x16B52C
+#define MG_TX1_LINK_PARAMS(port, ln) \
+ MG_PHY_PORT_LN(port, ln, MG_TX_LINK_PARAMS_TX1LN0_PORT1, \
+ MG_TX_LINK_PARAMS_TX1LN0_PORT2, \
+ MG_TX_LINK_PARAMS_TX1LN1_PORT1)
+
+#define MG_TX_LINK_PARAMS_TX2LN0_PORT1 0x1680AC
+#define MG_TX_LINK_PARAMS_TX2LN1_PORT1 0x1684AC
+#define MG_TX_LINK_PARAMS_TX2LN0_PORT2 0x1690AC
+#define MG_TX_LINK_PARAMS_TX2LN1_PORT2 0x1694AC
+#define MG_TX_LINK_PARAMS_TX2LN0_PORT3 0x16A0AC
+#define MG_TX_LINK_PARAMS_TX2LN1_PORT3 0x16A4AC
+#define MG_TX_LINK_PARAMS_TX2LN0_PORT4 0x16B0AC
+#define MG_TX_LINK_PARAMS_TX2LN1_PORT4 0x16B4AC
+#define MG_TX2_LINK_PARAMS(port, ln) \
+ MG_PHY_PORT_LN(port, ln, MG_TX_LINK_PARAMS_TX2LN0_PORT1, \
+ MG_TX_LINK_PARAMS_TX2LN0_PORT2, \
+ MG_TX_LINK_PARAMS_TX2LN1_PORT1)
+#define CRI_USE_FS32 (1 << 5)
+
+#define MG_TX_PISO_READLOAD_TX1LN0_PORT1 0x16814C
+#define MG_TX_PISO_READLOAD_TX1LN1_PORT1 0x16854C
+#define MG_TX_PISO_READLOAD_TX1LN0_PORT2 0x16914C
+#define MG_TX_PISO_READLOAD_TX1LN1_PORT2 0x16954C
+#define MG_TX_PISO_READLOAD_TX1LN0_PORT3 0x16A14C
+#define MG_TX_PISO_READLOAD_TX1LN1_PORT3 0x16A54C
+#define MG_TX_PISO_READLOAD_TX1LN0_PORT4 0x16B14C
+#define MG_TX_PISO_READLOAD_TX1LN1_PORT4 0x16B54C
+#define MG_TX1_PISO_READLOAD(port, ln) \
+ MG_PHY_PORT_LN(port, ln, MG_TX_PISO_READLOAD_TX1LN0_PORT1, \
+ MG_TX_PISO_READLOAD_TX1LN0_PORT2, \
+ MG_TX_PISO_READLOAD_TX1LN1_PORT1)
+
+#define MG_TX_PISO_READLOAD_TX2LN0_PORT1 0x1680CC
+#define MG_TX_PISO_READLOAD_TX2LN1_PORT1 0x1684CC
+#define MG_TX_PISO_READLOAD_TX2LN0_PORT2 0x1690CC
+#define MG_TX_PISO_READLOAD_TX2LN1_PORT2 0x1694CC
+#define MG_TX_PISO_READLOAD_TX2LN0_PORT3 0x16A0CC
+#define MG_TX_PISO_READLOAD_TX2LN1_PORT3 0x16A4CC
+#define MG_TX_PISO_READLOAD_TX2LN0_PORT4 0x16B0CC
+#define MG_TX_PISO_READLOAD_TX2LN1_PORT4 0x16B4CC
+#define MG_TX2_PISO_READLOAD(port, ln) \
+ MG_PHY_PORT_LN(port, ln, MG_TX_PISO_READLOAD_TX2LN0_PORT1, \
+ MG_TX_PISO_READLOAD_TX2LN0_PORT2, \
+ MG_TX_PISO_READLOAD_TX2LN1_PORT1)
+#define CRI_CALCINIT (1 << 1)
+
+#define MG_TX_SWINGCTRL_TX1LN0_PORT1 0x168148
+#define MG_TX_SWINGCTRL_TX1LN1_PORT1 0x168548
+#define MG_TX_SWINGCTRL_TX1LN0_PORT2 0x169148
+#define MG_TX_SWINGCTRL_TX1LN1_PORT2 0x169548
+#define MG_TX_SWINGCTRL_TX1LN0_PORT3 0x16A148
+#define MG_TX_SWINGCTRL_TX1LN1_PORT3 0x16A548
+#define MG_TX_SWINGCTRL_TX1LN0_PORT4 0x16B148
+#define MG_TX_SWINGCTRL_TX1LN1_PORT4 0x16B548
+#define MG_TX1_SWINGCTRL(port, ln) \
+ MG_PHY_PORT_LN(port, ln, MG_TX_SWINGCTRL_TX1LN0_PORT1, \
+ MG_TX_SWINGCTRL_TX1LN0_PORT2, \
+ MG_TX_SWINGCTRL_TX1LN1_PORT1)
+
+#define MG_TX_SWINGCTRL_TX2LN0_PORT1 0x1680C8
+#define MG_TX_SWINGCTRL_TX2LN1_PORT1 0x1684C8
+#define MG_TX_SWINGCTRL_TX2LN0_PORT2 0x1690C8
+#define MG_TX_SWINGCTRL_TX2LN1_PORT2 0x1694C8
+#define MG_TX_SWINGCTRL_TX2LN0_PORT3 0x16A0C8
+#define MG_TX_SWINGCTRL_TX2LN1_PORT3 0x16A4C8
+#define MG_TX_SWINGCTRL_TX2LN0_PORT4 0x16B0C8
+#define MG_TX_SWINGCTRL_TX2LN1_PORT4 0x16B4C8
+#define MG_TX2_SWINGCTRL(port, ln) \
+ MG_PHY_PORT_LN(port, ln, MG_TX_SWINGCTRL_TX2LN0_PORT1, \
+ MG_TX_SWINGCTRL_TX2LN0_PORT2, \
+ MG_TX_SWINGCTRL_TX2LN1_PORT1)
+#define CRI_TXDEEMPH_OVERRIDE_17_12(x) ((x) << 0)
+#define CRI_TXDEEMPH_OVERRIDE_17_12_MASK (0x3F << 0)
+
+#define MG_TX_DRVCTRL_TX1LN0_TXPORT1 0x168144
+#define MG_TX_DRVCTRL_TX1LN1_TXPORT1 0x168544
+#define MG_TX_DRVCTRL_TX1LN0_TXPORT2 0x169144
+#define MG_TX_DRVCTRL_TX1LN1_TXPORT2 0x169544
+#define MG_TX_DRVCTRL_TX1LN0_TXPORT3 0x16A144
+#define MG_TX_DRVCTRL_TX1LN1_TXPORT3 0x16A544
+#define MG_TX_DRVCTRL_TX1LN0_TXPORT4 0x16B144
+#define MG_TX_DRVCTRL_TX1LN1_TXPORT4 0x16B544
+#define MG_TX1_DRVCTRL(port, ln) \
+ MG_PHY_PORT_LN(port, ln, MG_TX_DRVCTRL_TX1LN0_TXPORT1, \
+ MG_TX_DRVCTRL_TX1LN0_TXPORT2, \
+ MG_TX_DRVCTRL_TX1LN1_TXPORT1)
+
+#define MG_TX_DRVCTRL_TX2LN0_PORT1 0x1680C4
+#define MG_TX_DRVCTRL_TX2LN1_PORT1 0x1684C4
+#define MG_TX_DRVCTRL_TX2LN0_PORT2 0x1690C4
+#define MG_TX_DRVCTRL_TX2LN1_PORT2 0x1694C4
+#define MG_TX_DRVCTRL_TX2LN0_PORT3 0x16A0C4
+#define MG_TX_DRVCTRL_TX2LN1_PORT3 0x16A4C4
+#define MG_TX_DRVCTRL_TX2LN0_PORT4 0x16B0C4
+#define MG_TX_DRVCTRL_TX2LN1_PORT4 0x16B4C4
+#define MG_TX2_DRVCTRL(port, ln) \
+ MG_PHY_PORT_LN(port, ln, MG_TX_DRVCTRL_TX2LN0_PORT1, \
+ MG_TX_DRVCTRL_TX2LN0_PORT2, \
+ MG_TX_DRVCTRL_TX2LN1_PORT1)
+#define CRI_TXDEEMPH_OVERRIDE_11_6(x) ((x) << 24)
+#define CRI_TXDEEMPH_OVERRIDE_11_6_MASK (0x3F << 24)
+#define CRI_TXDEEMPH_OVERRIDE_EN (1 << 22)
+#define CRI_TXDEEMPH_OVERRIDE_5_0(x) ((x) << 16)
+#define CRI_TXDEEMPH_OVERRIDE_5_0_MASK (0x3F << 16)
+#define CRI_LOADGEN_SEL(x) ((x) << 12)
+#define CRI_LOADGEN_SEL_MASK (0x3 << 12)
+
+#define MG_CLKHUB_LN0_PORT1 0x16839C
+#define MG_CLKHUB_LN1_PORT1 0x16879C
+#define MG_CLKHUB_LN0_PORT2 0x16939C
+#define MG_CLKHUB_LN1_PORT2 0x16979C
+#define MG_CLKHUB_LN0_PORT3 0x16A39C
+#define MG_CLKHUB_LN1_PORT3 0x16A79C
+#define MG_CLKHUB_LN0_PORT4 0x16B39C
+#define MG_CLKHUB_LN1_PORT4 0x16B79C
+#define MG_CLKHUB(port, ln) \
+ MG_PHY_PORT_LN(port, ln, MG_CLKHUB_LN0_PORT1, \
+ MG_CLKHUB_LN0_PORT2, \
+ MG_CLKHUB_LN1_PORT1)
+#define CFG_LOW_RATE_LKREN_EN (1 << 11)
+
+#define MG_TX_DCC_TX1LN0_PORT1 0x168110
+#define MG_TX_DCC_TX1LN1_PORT1 0x168510
+#define MG_TX_DCC_TX1LN0_PORT2 0x169110
+#define MG_TX_DCC_TX1LN1_PORT2 0x169510
+#define MG_TX_DCC_TX1LN0_PORT3 0x16A110
+#define MG_TX_DCC_TX1LN1_PORT3 0x16A510
+#define MG_TX_DCC_TX1LN0_PORT4 0x16B110
+#define MG_TX_DCC_TX1LN1_PORT4 0x16B510
+#define MG_TX1_DCC(port, ln) \
+ MG_PHY_PORT_LN(port, ln, MG_TX_DCC_TX1LN0_PORT1, \
+ MG_TX_DCC_TX1LN0_PORT2, \
+ MG_TX_DCC_TX1LN1_PORT1)
+#define MG_TX_DCC_TX2LN0_PORT1 0x168090
+#define MG_TX_DCC_TX2LN1_PORT1 0x168490
+#define MG_TX_DCC_TX2LN0_PORT2 0x169090
+#define MG_TX_DCC_TX2LN1_PORT2 0x169490
+#define MG_TX_DCC_TX2LN0_PORT3 0x16A090
+#define MG_TX_DCC_TX2LN1_PORT3 0x16A490
+#define MG_TX_DCC_TX2LN0_PORT4 0x16B090
+#define MG_TX_DCC_TX2LN1_PORT4 0x16B490
+#define MG_TX2_DCC(port, ln) \
+ MG_PHY_PORT_LN(port, ln, MG_TX_DCC_TX2LN0_PORT1, \
+ MG_TX_DCC_TX2LN0_PORT2, \
+ MG_TX_DCC_TX2LN1_PORT1)
+#define CFG_AMI_CK_DIV_OVERRIDE_VAL(x) ((x) << 25)
+#define CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK (0x3 << 25)
+#define CFG_AMI_CK_DIV_OVERRIDE_EN (1 << 24)
+
+#define MG_DP_MODE_LN0_ACU_PORT1 0x1683A0
+#define MG_DP_MODE_LN1_ACU_PORT1 0x1687A0
+#define MG_DP_MODE_LN0_ACU_PORT2 0x1693A0
+#define MG_DP_MODE_LN1_ACU_PORT2 0x1697A0
+#define MG_DP_MODE_LN0_ACU_PORT3 0x16A3A0
+#define MG_DP_MODE_LN1_ACU_PORT3 0x16A7A0
+#define MG_DP_MODE_LN0_ACU_PORT4 0x16B3A0
+#define MG_DP_MODE_LN1_ACU_PORT4 0x16B7A0
+#define MG_DP_MODE(port, ln) \
+ MG_PHY_PORT_LN(port, ln, MG_DP_MODE_LN0_ACU_PORT1, \
+ MG_DP_MODE_LN0_ACU_PORT2, \
+ MG_DP_MODE_LN1_ACU_PORT1)
+#define MG_DP_MODE_CFG_DP_X2_MODE (1 << 7)
+#define MG_DP_MODE_CFG_DP_X1_MODE (1 << 6)
+#define MG_DP_MODE_CFG_TR2PWR_GATING (1 << 5)
+#define MG_DP_MODE_CFG_TRPWR_GATING (1 << 4)
+#define MG_DP_MODE_CFG_CLNPWR_GATING (1 << 3)
+#define MG_DP_MODE_CFG_DIGPWR_GATING (1 << 2)
+#define MG_DP_MODE_CFG_GAONPWR_GATING (1 << 1)
+
+#define MG_MISC_SUS0_PORT1 0x168814
+#define MG_MISC_SUS0_PORT2 0x169814
+#define MG_MISC_SUS0_PORT3 0x16A814
+#define MG_MISC_SUS0_PORT4 0x16B814
+#define MG_MISC_SUS0(tc_port) \
+ _MMIO(_PORT(tc_port, MG_MISC_SUS0_PORT1, MG_MISC_SUS0_PORT2))
+#define MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK (3 << 14)
+#define MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(x) ((x) << 14)
+#define MG_MISC_SUS0_CFG_TR2PWR_GATING (1 << 12)
+#define MG_MISC_SUS0_CFG_CL2PWR_GATING (1 << 11)
+#define MG_MISC_SUS0_CFG_GAONPWR_GATING (1 << 10)
+#define MG_MISC_SUS0_CFG_TRPWR_GATING (1 << 7)
+#define MG_MISC_SUS0_CFG_CL1PWR_GATING (1 << 6)
+#define MG_MISC_SUS0_CFG_DGPWR_GATING (1 << 5)
/* The spec defines this only for BXT PHY0, but lets assume that this
* would exist for PHY1 too if it had a second channel.
@@ -3086,18 +3084,9 @@ enum i915_power_well_id {
/*
* GPIO regs
*/
-#define GPIOA _MMIO(0x5010)
-#define GPIOB _MMIO(0x5014)
-#define GPIOC _MMIO(0x5018)
-#define GPIOD _MMIO(0x501c)
-#define GPIOE _MMIO(0x5020)
-#define GPIOF _MMIO(0x5024)
-#define GPIOG _MMIO(0x5028)
-#define GPIOH _MMIO(0x502c)
-#define GPIOJ _MMIO(0x5034)
-#define GPIOK _MMIO(0x5038)
-#define GPIOL _MMIO(0x503C)
-#define GPIOM _MMIO(0x5040)
+#define GPIO(gpio) _MMIO(dev_priv->gpio_mmio_base + 0x5010 + \
+ 4 * (gpio))
+
# define GPIO_CLOCK_DIR_MASK (1 << 0)
# define GPIO_CLOCK_DIR_IN (0 << 1)
# define GPIO_CLOCK_DIR_OUT (1 << 1)
@@ -5476,6 +5465,7 @@ enum {
#define DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL (1 << 14)
#define DP_AUX_CH_CTL_FS_DATA_AUX_REG_SKL (1 << 13)
#define DP_AUX_CH_CTL_GTC_DATA_AUX_REG_SKL (1 << 12)
+#define DP_AUX_CH_CTL_TBT_IO (1 << 11)
#define DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL_MASK (0x1f << 5)
#define DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(c) (((c) - 1) << 5)
#define DP_AUX_CH_CTL_SYNC_PULSE_SKL(c) ((c) - 1)
@@ -6527,7 +6517,7 @@ enum {
#define PLANE_CTL_YUV422_UYVY (1 << 16)
#define PLANE_CTL_YUV422_YVYU (2 << 16)
#define PLANE_CTL_YUV422_VYUY (3 << 16)
-#define PLANE_CTL_DECOMPRESSION_ENABLE (1 << 15)
+#define PLANE_CTL_RENDER_DECOMPRESSION_ENABLE (1 << 15)
#define PLANE_CTL_TRICKLE_FEED_DISABLE (1 << 14)
#define PLANE_CTL_PLANE_GAMMA_DISABLE (1 << 13) /* Pre-GLK */
#define PLANE_CTL_TILED_MASK (0x7 << 10)
@@ -7207,6 +7197,7 @@ enum {
#define GEN11_TC3_HOTPLUG (1 << 18)
#define GEN11_TC2_HOTPLUG (1 << 17)
#define GEN11_TC1_HOTPLUG (1 << 16)
+#define GEN11_TC_HOTPLUG(tc_port) (1 << ((tc_port) + 16))
#define GEN11_DE_TC_HOTPLUG_MASK (GEN11_TC4_HOTPLUG | \
GEN11_TC3_HOTPLUG | \
GEN11_TC2_HOTPLUG | \
@@ -7215,6 +7206,7 @@ enum {
#define GEN11_TBT3_HOTPLUG (1 << 2)
#define GEN11_TBT2_HOTPLUG (1 << 1)
#define GEN11_TBT1_HOTPLUG (1 << 0)
+#define GEN11_TBT_HOTPLUG(tc_port) (1 << (tc_port))
#define GEN11_DE_TBT_HOTPLUG_MASK (GEN11_TBT4_HOTPLUG | \
GEN11_TBT3_HOTPLUG | \
GEN11_TBT2_HOTPLUG | \
@@ -7490,6 +7482,8 @@ enum {
/* PCH */
+#define PCH_DISPLAY_BASE 0xc0000u
+
/* south display engine interrupt: IBX */
#define SDE_AUDIO_POWER_D (1 << 27)
#define SDE_AUDIO_POWER_C (1 << 26)
@@ -7587,6 +7581,8 @@ enum {
#define SDE_GMBUS_ICP (1 << 23)
#define SDE_DDIB_HOTPLUG_ICP (1 << 17)
#define SDE_DDIA_HOTPLUG_ICP (1 << 16)
+#define SDE_TC_HOTPLUG_ICP(tc_port) (1 << ((tc_port) + 24))
+#define SDE_DDI_HOTPLUG_ICP(port) (1 << ((port) + 16))
#define SDE_DDI_MASK_ICP (SDE_DDIB_HOTPLUG_ICP | \
SDE_DDIA_HOTPLUG_ICP)
#define SDE_TC_MASK_ICP (SDE_TC4_HOTPLUG_ICP | \
@@ -7782,20 +7778,6 @@ enum {
#define ICP_TC_HPD_LONG_DETECT(tc_port) (2 << (tc_port) * 4)
#define ICP_TC_HPD_SHORT_DETECT(tc_port) (1 << (tc_port) * 4)
-#define PCH_GPIOA _MMIO(0xc5010)
-#define PCH_GPIOB _MMIO(0xc5014)
-#define PCH_GPIOC _MMIO(0xc5018)
-#define PCH_GPIOD _MMIO(0xc501c)
-#define PCH_GPIOE _MMIO(0xc5020)
-#define PCH_GPIOF _MMIO(0xc5024)
-
-#define PCH_GMBUS0 _MMIO(0xc5100)
-#define PCH_GMBUS1 _MMIO(0xc5104)
-#define PCH_GMBUS2 _MMIO(0xc5108)
-#define PCH_GMBUS3 _MMIO(0xc510c)
-#define PCH_GMBUS4 _MMIO(0xc5110)
-#define PCH_GMBUS5 _MMIO(0xc5120)
-
#define _PCH_DPLL_A 0xc6014
#define _PCH_DPLL_B 0xc6018
#define PCH_DPLL(pll) _MMIO((pll) == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
@@ -8498,8 +8480,10 @@ enum {
#define GEN6_PM_RP_DOWN_THRESHOLD (1 << 4)
#define GEN6_PM_RP_UP_EI_EXPIRED (1 << 2)
#define GEN6_PM_RP_DOWN_EI_EXPIRED (1 << 1)
-#define GEN6_PM_RPS_EVENTS (GEN6_PM_RP_UP_THRESHOLD | \
- GEN6_PM_RP_DOWN_THRESHOLD | \
+#define GEN6_PM_RPS_EVENTS (GEN6_PM_RP_UP_EI_EXPIRED | \
+ GEN6_PM_RP_UP_THRESHOLD | \
+ GEN6_PM_RP_DOWN_EI_EXPIRED | \
+ GEN6_PM_RP_DOWN_THRESHOLD | \
GEN6_PM_RP_DOWN_TIMEOUT)
#define GEN7_GT_SCRATCH(i) _MMIO(0x4F100 + (i) * 4)
@@ -8827,46 +8811,78 @@ enum {
#define HSW_AUD_CHICKENBIT _MMIO(0x65f10)
#define SKL_AUD_CODEC_WAKE_SIGNAL (1 << 15)
-/* HSW Power Wells */
-#define _HSW_PWR_WELL_CTL1 0x45400
-#define _HSW_PWR_WELL_CTL2 0x45404
-#define _HSW_PWR_WELL_CTL3 0x45408
-#define _HSW_PWR_WELL_CTL4 0x4540C
-
-#define _ICL_PWR_WELL_CTL_AUX1 0x45440
-#define _ICL_PWR_WELL_CTL_AUX2 0x45444
-#define _ICL_PWR_WELL_CTL_AUX4 0x4544C
-
-#define _ICL_PWR_WELL_CTL_DDI1 0x45450
-#define _ICL_PWR_WELL_CTL_DDI2 0x45454
-#define _ICL_PWR_WELL_CTL_DDI4 0x4545C
-
/*
- * Each power well control register contains up to 16 (request, status) HW
- * flag tuples. The register index and HW flag shift is determined by the
- * power well ID (see i915_power_well_id). There are 4 possible sources of
- * power well requests each source having its own set of control registers:
- * BIOS, DRIVER, KVMR, DEBUG.
+ * HSW - ICL power wells
+ *
+ * Platforms have up to 3 power well control register sets, each set
+ * controlling up to 16 power wells via a request/status HW flag tuple:
+ * - main (HSW_PWR_WELL_CTL[1-4])
+ * - AUX (ICL_PWR_WELL_CTL_AUX[1-4])
+ * - DDI (ICL_PWR_WELL_CTL_DDI[1-4])
+ * Each control register set consists of up to 4 registers used by different
+ * sources that can request a power well to be enabled:
+ * - BIOS (HSW_PWR_WELL_CTL1/ICL_PWR_WELL_CTL_AUX1/ICL_PWR_WELL_CTL_DDI1)
+ * - DRIVER (HSW_PWR_WELL_CTL2/ICL_PWR_WELL_CTL_AUX2/ICL_PWR_WELL_CTL_DDI2)
+ * - KVMR (HSW_PWR_WELL_CTL3) (only in the main register set)
+ * - DEBUG (HSW_PWR_WELL_CTL4/ICL_PWR_WELL_CTL_AUX4/ICL_PWR_WELL_CTL_DDI4)
*/
-#define _HSW_PW_REG_IDX(pw) ((pw) >> 4)
-#define _HSW_PW_SHIFT(pw) (((pw) & 0xf) * 2)
-#define HSW_PWR_WELL_CTL_BIOS(pw) _MMIO(_PICK(_HSW_PW_REG_IDX(pw), \
- _HSW_PWR_WELL_CTL1, \
- _ICL_PWR_WELL_CTL_AUX1, \
- _ICL_PWR_WELL_CTL_DDI1))
-#define HSW_PWR_WELL_CTL_DRIVER(pw) _MMIO(_PICK(_HSW_PW_REG_IDX(pw), \
- _HSW_PWR_WELL_CTL2, \
- _ICL_PWR_WELL_CTL_AUX2, \
- _ICL_PWR_WELL_CTL_DDI2))
-/* KVMR doesn't have a reg for AUX or DDI power well control */
-#define HSW_PWR_WELL_CTL_KVMR _MMIO(_HSW_PWR_WELL_CTL3)
-#define HSW_PWR_WELL_CTL_DEBUG(pw) _MMIO(_PICK(_HSW_PW_REG_IDX(pw), \
- _HSW_PWR_WELL_CTL4, \
- _ICL_PWR_WELL_CTL_AUX4, \
- _ICL_PWR_WELL_CTL_DDI4))
-
-#define HSW_PWR_WELL_CTL_REQ(pw) (1 << (_HSW_PW_SHIFT(pw) + 1))
-#define HSW_PWR_WELL_CTL_STATE(pw) (1 << _HSW_PW_SHIFT(pw))
+#define HSW_PWR_WELL_CTL1 _MMIO(0x45400)
+#define HSW_PWR_WELL_CTL2 _MMIO(0x45404)
+#define HSW_PWR_WELL_CTL3 _MMIO(0x45408)
+#define HSW_PWR_WELL_CTL4 _MMIO(0x4540C)
+#define HSW_PWR_WELL_CTL_REQ(pw_idx) (0x2 << ((pw_idx) * 2))
+#define HSW_PWR_WELL_CTL_STATE(pw_idx) (0x1 << ((pw_idx) * 2))
+
+/* HSW/BDW power well */
+#define HSW_PW_CTL_IDX_GLOBAL 15
+
+/* SKL/BXT/GLK/CNL power wells */
+#define SKL_PW_CTL_IDX_PW_2 15
+#define SKL_PW_CTL_IDX_PW_1 14
+#define CNL_PW_CTL_IDX_AUX_F 12
+#define CNL_PW_CTL_IDX_AUX_D 11
+#define GLK_PW_CTL_IDX_AUX_C 10
+#define GLK_PW_CTL_IDX_AUX_B 9
+#define GLK_PW_CTL_IDX_AUX_A 8
+#define CNL_PW_CTL_IDX_DDI_F 6
+#define SKL_PW_CTL_IDX_DDI_D 4
+#define SKL_PW_CTL_IDX_DDI_C 3
+#define SKL_PW_CTL_IDX_DDI_B 2
+#define SKL_PW_CTL_IDX_DDI_A_E 1
+#define GLK_PW_CTL_IDX_DDI_A 1
+#define SKL_PW_CTL_IDX_MISC_IO 0
+
+/* ICL - power wells */
+#define ICL_PW_CTL_IDX_PW_4 3
+#define ICL_PW_CTL_IDX_PW_3 2
+#define ICL_PW_CTL_IDX_PW_2 1
+#define ICL_PW_CTL_IDX_PW_1 0
+
+#define ICL_PWR_WELL_CTL_AUX1 _MMIO(0x45440)
+#define ICL_PWR_WELL_CTL_AUX2 _MMIO(0x45444)
+#define ICL_PWR_WELL_CTL_AUX4 _MMIO(0x4544C)
+#define ICL_PW_CTL_IDX_AUX_TBT4 11
+#define ICL_PW_CTL_IDX_AUX_TBT3 10
+#define ICL_PW_CTL_IDX_AUX_TBT2 9
+#define ICL_PW_CTL_IDX_AUX_TBT1 8
+#define ICL_PW_CTL_IDX_AUX_F 5
+#define ICL_PW_CTL_IDX_AUX_E 4
+#define ICL_PW_CTL_IDX_AUX_D 3
+#define ICL_PW_CTL_IDX_AUX_C 2
+#define ICL_PW_CTL_IDX_AUX_B 1
+#define ICL_PW_CTL_IDX_AUX_A 0
+
+#define ICL_PWR_WELL_CTL_DDI1 _MMIO(0x45450)
+#define ICL_PWR_WELL_CTL_DDI2 _MMIO(0x45454)
+#define ICL_PWR_WELL_CTL_DDI4 _MMIO(0x4545C)
+#define ICL_PW_CTL_IDX_DDI_F 5
+#define ICL_PW_CTL_IDX_DDI_E 4
+#define ICL_PW_CTL_IDX_DDI_D 3
+#define ICL_PW_CTL_IDX_DDI_C 2
+#define ICL_PW_CTL_IDX_DDI_B 1
+#define ICL_PW_CTL_IDX_DDI_A 0
+
+/* HSW - power well misc debug registers */
#define HSW_PWR_WELL_CTL5 _MMIO(0x45410)
#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1 << 31)
#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1 << 20)
@@ -8878,22 +8894,32 @@ enum skl_power_gate {
SKL_PG0,
SKL_PG1,
SKL_PG2,
+ ICL_PG3,
+ ICL_PG4,
};
#define SKL_FUSE_STATUS _MMIO(0x42000)
#define SKL_FUSE_DOWNLOAD_STATUS (1 << 31)
-/* PG0 (HW control->no power well ID), PG1..PG2 (SKL_DISP_PW1..SKL_DISP_PW2) */
-#define SKL_PW_TO_PG(pw) ((pw) - SKL_DISP_PW_1 + SKL_PG1)
-/* PG0 (HW control->no power well ID), PG1..PG4 (ICL_DISP_PW1..ICL_DISP_PW4) */
-#define ICL_PW_TO_PG(pw) ((pw) - ICL_DISP_PW_1 + SKL_PG1)
+/*
+ * PG0 is HW controlled, so doesn't have a corresponding power well control knob
+ * SKL_DISP_PW1_IDX..SKL_DISP_PW2_IDX -> PG1..PG2
+ */
+#define SKL_PW_CTL_IDX_TO_PG(pw_idx) \
+ ((pw_idx) - SKL_PW_CTL_IDX_PW_1 + SKL_PG1)
+/*
+ * PG0 is HW controlled, so doesn't have a corresponding power well control knob
+ * ICL_DISP_PW1_IDX..ICL_DISP_PW4_IDX -> PG1..PG4
+ */
+#define ICL_PW_CTL_IDX_TO_PG(pw_idx) \
+ ((pw_idx) - ICL_PW_CTL_IDX_PW_1 + SKL_PG1)
#define SKL_FUSE_PG_DIST_STATUS(pg) (1 << (27 - (pg)))
-#define _CNL_AUX_REG_IDX(pw) ((pw) - 9)
+#define _CNL_AUX_REG_IDX(pw_idx) ((pw_idx) - GLK_PW_CTL_IDX_AUX_B)
#define _CNL_AUX_ANAOVRD1_B 0x162250
#define _CNL_AUX_ANAOVRD1_C 0x162210
#define _CNL_AUX_ANAOVRD1_D 0x1622D0
#define _CNL_AUX_ANAOVRD1_F 0x162A90
-#define CNL_AUX_ANAOVRD1(pw) _MMIO(_PICK(_CNL_AUX_REG_IDX(pw), \
+#define CNL_AUX_ANAOVRD1(pw_idx) _MMIO(_PICK(_CNL_AUX_REG_IDX(pw_idx), \
_CNL_AUX_ANAOVRD1_B, \
_CNL_AUX_ANAOVRD1_C, \
_CNL_AUX_ANAOVRD1_D, \
@@ -9367,9 +9393,13 @@ enum skl_power_gate {
#define MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK (0x1 << 16)
#define MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(x) ((x) << 14)
#define MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK (0x3 << 14)
-#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO(x) ((x) << 12)
#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK (0x3 << 12)
+#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2 (0 << 12)
+#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3 (1 << 12)
+#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5 (2 << 12)
+#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7 (3 << 12)
#define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(x) ((x) << 8)
+#define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT 8
#define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK (0xf << 8)
#define MG_CLKTOP2_HSCLKCTL(port) _MMIO_PORT((port) - PORT_C, \
_MG_CLKTOP2_HSCLKCTL_PORT1, \
@@ -9380,7 +9410,10 @@ enum skl_power_gate {
#define _MG_PLL_DIV0_PORT3 0x16AA00
#define _MG_PLL_DIV0_PORT4 0x16BA00
#define MG_PLL_DIV0_FRACNEN_H (1 << 30)
+#define MG_PLL_DIV0_FBDIV_FRAC_MASK (0x3fffff << 8)
+#define MG_PLL_DIV0_FBDIV_FRAC_SHIFT 8
#define MG_PLL_DIV0_FBDIV_FRAC(x) ((x) << 8)
+#define MG_PLL_DIV0_FBDIV_INT_MASK (0xff << 0)
#define MG_PLL_DIV0_FBDIV_INT(x) ((x) << 0)
#define MG_PLL_DIV0(port) _MMIO_PORT((port) - PORT_C, _MG_PLL_DIV0_PORT1, \
_MG_PLL_DIV0_PORT2)
@@ -9395,6 +9428,7 @@ enum skl_power_gate {
#define MG_PLL_DIV1_DITHER_DIV_4 (2 << 12)
#define MG_PLL_DIV1_DITHER_DIV_8 (3 << 12)
#define MG_PLL_DIV1_NDIVRATIO(x) ((x) << 4)
+#define MG_PLL_DIV1_FBPREDIV_MASK (0xf << 0)
#define MG_PLL_DIV1_FBPREDIV(x) ((x) << 0)
#define MG_PLL_DIV1(port) _MMIO_PORT((port) - PORT_C, _MG_PLL_DIV1_PORT1, \
_MG_PLL_DIV1_PORT2)
@@ -10347,8 +10381,8 @@ enum skl_power_gate {
#define ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN (1 << 23)
/* Icelake Display Stream Compression Registers */
-#define DSCA_PICTURE_PARAMETER_SET_0 0x6B200
-#define DSCC_PICTURE_PARAMETER_SET_0 0x6BA00
+#define DSCA_PICTURE_PARAMETER_SET_0 _MMIO(0x6B200)
+#define DSCC_PICTURE_PARAMETER_SET_0 _MMIO(0x6BA00)
#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB 0x78270
#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB 0x78370
#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC 0x78470
@@ -10368,8 +10402,8 @@ enum skl_power_gate {
#define DSC_VER_MIN_SHIFT 4
#define DSC_VER_MAJ (0x1 << 0)
-#define DSCA_PICTURE_PARAMETER_SET_1 0x6B204
-#define DSCC_PICTURE_PARAMETER_SET_1 0x6BA04
+#define DSCA_PICTURE_PARAMETER_SET_1 _MMIO(0x6B204)
+#define DSCC_PICTURE_PARAMETER_SET_1 _MMIO(0x6BA04)
#define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PB 0x78274
#define _ICL_DSC1_PICTURE_PARAMETER_SET_1_PB 0x78374
#define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PC 0x78474
@@ -10382,8 +10416,8 @@ enum skl_power_gate {
_ICL_DSC1_PICTURE_PARAMETER_SET_1_PC)
#define DSC_BPP(bpp) ((bpp) << 0)
-#define DSCA_PICTURE_PARAMETER_SET_2 0x6B208
-#define DSCC_PICTURE_PARAMETER_SET_2 0x6BA08
+#define DSCA_PICTURE_PARAMETER_SET_2 _MMIO(0x6B208)
+#define DSCC_PICTURE_PARAMETER_SET_2 _MMIO(0x6BA08)
#define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PB 0x78278
#define _ICL_DSC1_PICTURE_PARAMETER_SET_2_PB 0x78378
#define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PC 0x78478
@@ -10397,8 +10431,8 @@ enum skl_power_gate {
#define DSC_PIC_WIDTH(pic_width) ((pic_width) << 16)
#define DSC_PIC_HEIGHT(pic_height) ((pic_height) << 0)
-#define DSCA_PICTURE_PARAMETER_SET_3 0x6B20C
-#define DSCC_PICTURE_PARAMETER_SET_3 0x6BA0C
+#define DSCA_PICTURE_PARAMETER_SET_3 _MMIO(0x6B20C)
+#define DSCC_PICTURE_PARAMETER_SET_3 _MMIO(0x6BA0C)
#define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PB 0x7827C
#define _ICL_DSC1_PICTURE_PARAMETER_SET_3_PB 0x7837C
#define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PC 0x7847C
@@ -10412,8 +10446,8 @@ enum skl_power_gate {
#define DSC_SLICE_WIDTH(slice_width) ((slice_width) << 16)
#define DSC_SLICE_HEIGHT(slice_height) ((slice_height) << 0)
-#define DSCA_PICTURE_PARAMETER_SET_4 0x6B210
-#define DSCC_PICTURE_PARAMETER_SET_4 0x6BA10
+#define DSCA_PICTURE_PARAMETER_SET_4 _MMIO(0x6B210)
+#define DSCC_PICTURE_PARAMETER_SET_4 _MMIO(0x6BA10)
#define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB 0x78280
#define _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB 0x78380
#define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC 0x78480
@@ -10422,13 +10456,13 @@ enum skl_power_gate {
_ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \
_ICL_DSC0_PICTURE_PARAMETER_SET_4_PC)
#define ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB, \
_ICL_DSC1_PICTURE_PARAMETER_SET_4_PC)
#define DSC_INITIAL_DEC_DELAY(dec_delay) ((dec_delay) << 16)
#define DSC_INITIAL_XMIT_DELAY(xmit_delay) ((xmit_delay) << 0)
-#define DSCA_PICTURE_PARAMETER_SET_5 0x6B214
-#define DSCC_PICTURE_PARAMETER_SET_5 0x6BA14
+#define DSCA_PICTURE_PARAMETER_SET_5 _MMIO(0x6B214)
+#define DSCC_PICTURE_PARAMETER_SET_5 _MMIO(0x6BA14)
#define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB 0x78284
#define _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB 0x78384
#define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC 0x78484
@@ -10437,13 +10471,13 @@ enum skl_power_gate {
_ICL_DSC0_PICTURE_PARAMETER_SET_5_PB, \
_ICL_DSC0_PICTURE_PARAMETER_SET_5_PC)
#define ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
- _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC, \
+ _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB, \
_ICL_DSC1_PICTURE_PARAMETER_SET_5_PC)
-#define DSC_SCALE_DEC_INTINT(scale_dec) ((scale_dec) << 16)
+#define DSC_SCALE_DEC_INT(scale_dec) ((scale_dec) << 16)
#define DSC_SCALE_INC_INT(scale_inc) ((scale_inc) << 0)
-#define DSCA_PICTURE_PARAMETER_SET_6 0x6B218
-#define DSCC_PICTURE_PARAMETER_SET_6 0x6BA18
+#define DSCA_PICTURE_PARAMETER_SET_6 _MMIO(0x6B218)
+#define DSCC_PICTURE_PARAMETER_SET_6 _MMIO(0x6BA18)
#define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PB 0x78288
#define _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB 0x78388
#define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PC 0x78488
@@ -10454,13 +10488,13 @@ enum skl_power_gate {
#define ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
_ICL_DSC1_PICTURE_PARAMETER_SET_6_PB, \
_ICL_DSC1_PICTURE_PARAMETER_SET_6_PC)
-#define DSC_FLATNESS_MAX_QP(max_qp) (qp << 24)
-#define DSC_FLATNESS_MIN_QP(min_qp) (qp << 16)
+#define DSC_FLATNESS_MAX_QP(max_qp) ((max_qp) << 24)
+#define DSC_FLATNESS_MIN_QP(min_qp) ((min_qp) << 16)
#define DSC_FIRST_LINE_BPG_OFFSET(offset) ((offset) << 8)
#define DSC_INITIAL_SCALE_VALUE(value) ((value) << 0)
-#define DSCA_PICTURE_PARAMETER_SET_7 0x6B21C
-#define DSCC_PICTURE_PARAMETER_SET_7 0x6BA1C
+#define DSCA_PICTURE_PARAMETER_SET_7 _MMIO(0x6B21C)
+#define DSCC_PICTURE_PARAMETER_SET_7 _MMIO(0x6BA1C)
#define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PB 0x7828C
#define _ICL_DSC1_PICTURE_PARAMETER_SET_7_PB 0x7838C
#define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PC 0x7848C
@@ -10474,8 +10508,8 @@ enum skl_power_gate {
#define DSC_NFL_BPG_OFFSET(bpg_offset) ((bpg_offset) << 16)
#define DSC_SLICE_BPG_OFFSET(bpg_offset) ((bpg_offset) << 0)
-#define DSCA_PICTURE_PARAMETER_SET_8 0x6B220
-#define DSCC_PICTURE_PARAMETER_SET_8 0x6BA20
+#define DSCA_PICTURE_PARAMETER_SET_8 _MMIO(0x6B220)
+#define DSCC_PICTURE_PARAMETER_SET_8 _MMIO(0x6BA20)
#define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PB 0x78290
#define _ICL_DSC1_PICTURE_PARAMETER_SET_8_PB 0x78390
#define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PC 0x78490
@@ -10489,8 +10523,8 @@ enum skl_power_gate {
#define DSC_INITIAL_OFFSET(initial_offset) ((initial_offset) << 16)
#define DSC_FINAL_OFFSET(final_offset) ((final_offset) << 0)
-#define DSCA_PICTURE_PARAMETER_SET_9 0x6B224
-#define DSCC_PICTURE_PARAMETER_SET_9 0x6BA24
+#define DSCA_PICTURE_PARAMETER_SET_9 _MMIO(0x6B224)
+#define DSCC_PICTURE_PARAMETER_SET_9 _MMIO(0x6BA24)
#define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PB 0x78294
#define _ICL_DSC1_PICTURE_PARAMETER_SET_9_PB 0x78394
#define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PC 0x78494
@@ -10504,8 +10538,8 @@ enum skl_power_gate {
#define DSC_RC_EDGE_FACTOR(rc_edge_fact) ((rc_edge_fact) << 16)
#define DSC_RC_MODEL_SIZE(rc_model_size) ((rc_model_size) << 0)
-#define DSCA_PICTURE_PARAMETER_SET_10 0x6B228
-#define DSCC_PICTURE_PARAMETER_SET_10 0x6BA28
+#define DSCA_PICTURE_PARAMETER_SET_10 _MMIO(0x6B228)
+#define DSCC_PICTURE_PARAMETER_SET_10 _MMIO(0x6BA28)
#define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PB 0x78298
#define _ICL_DSC1_PICTURE_PARAMETER_SET_10_PB 0x78398
#define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PC 0x78498
@@ -10521,8 +10555,8 @@ enum skl_power_gate {
#define DSC_RC_QUANT_INC_LIMIT1(lim) ((lim) << 8)
#define DSC_RC_QUANT_INC_LIMIT0(lim) ((lim) << 0)
-#define DSCA_PICTURE_PARAMETER_SET_11 0x6B22C
-#define DSCC_PICTURE_PARAMETER_SET_11 0x6BA2C
+#define DSCA_PICTURE_PARAMETER_SET_11 _MMIO(0x6B22C)
+#define DSCC_PICTURE_PARAMETER_SET_11 _MMIO(0x6BA2C)
#define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PB 0x7829C
#define _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB 0x7839C
#define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PC 0x7849C
@@ -10534,8 +10568,8 @@ enum skl_power_gate {
_ICL_DSC1_PICTURE_PARAMETER_SET_11_PB, \
_ICL_DSC1_PICTURE_PARAMETER_SET_11_PC)
-#define DSCA_PICTURE_PARAMETER_SET_12 0x6B260
-#define DSCC_PICTURE_PARAMETER_SET_12 0x6BA60
+#define DSCA_PICTURE_PARAMETER_SET_12 _MMIO(0x6B260)
+#define DSCC_PICTURE_PARAMETER_SET_12 _MMIO(0x6BA60)
#define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PB 0x782A0
#define _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB 0x783A0
#define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PC 0x784A0
@@ -10547,8 +10581,8 @@ enum skl_power_gate {
_ICL_DSC1_PICTURE_PARAMETER_SET_12_PB, \
_ICL_DSC1_PICTURE_PARAMETER_SET_12_PC)
-#define DSCA_PICTURE_PARAMETER_SET_13 0x6B264
-#define DSCC_PICTURE_PARAMETER_SET_13 0x6BA64
+#define DSCA_PICTURE_PARAMETER_SET_13 _MMIO(0x6B264)
+#define DSCC_PICTURE_PARAMETER_SET_13 _MMIO(0x6BA64)
#define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PB 0x782A4
#define _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB 0x783A4
#define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PC 0x784A4
@@ -10560,8 +10594,8 @@ enum skl_power_gate {
_ICL_DSC1_PICTURE_PARAMETER_SET_13_PB, \
_ICL_DSC1_PICTURE_PARAMETER_SET_13_PC)
-#define DSCA_PICTURE_PARAMETER_SET_14 0x6B268
-#define DSCC_PICTURE_PARAMETER_SET_14 0x6BA68
+#define DSCA_PICTURE_PARAMETER_SET_14 _MMIO(0x6B268)
+#define DSCC_PICTURE_PARAMETER_SET_14 _MMIO(0x6BA68)
#define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PB 0x782A8
#define _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB 0x783A8
#define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PC 0x784A8
@@ -10573,8 +10607,8 @@ enum skl_power_gate {
_ICL_DSC1_PICTURE_PARAMETER_SET_14_PB, \
_ICL_DSC1_PICTURE_PARAMETER_SET_14_PC)
-#define DSCA_PICTURE_PARAMETER_SET_15 0x6B26C
-#define DSCC_PICTURE_PARAMETER_SET_15 0x6BA6C
+#define DSCA_PICTURE_PARAMETER_SET_15 _MMIO(0x6B26C)
+#define DSCC_PICTURE_PARAMETER_SET_15 _MMIO(0x6BA6C)
#define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PB 0x782AC
#define _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB 0x783AC
#define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PC 0x784AC
@@ -10586,8 +10620,8 @@ enum skl_power_gate {
_ICL_DSC1_PICTURE_PARAMETER_SET_15_PB, \
_ICL_DSC1_PICTURE_PARAMETER_SET_15_PC)
-#define DSCA_PICTURE_PARAMETER_SET_16 0x6B270
-#define DSCC_PICTURE_PARAMETER_SET_16 0x6BA70
+#define DSCA_PICTURE_PARAMETER_SET_16 _MMIO(0x6B270)
+#define DSCC_PICTURE_PARAMETER_SET_16 _MMIO(0x6BA70)
#define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PB 0x782B0
#define _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB 0x783B0
#define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PC 0x784B0
@@ -10599,7 +10633,7 @@ enum skl_power_gate {
_ICL_DSC1_PICTURE_PARAMETER_SET_16_PB, \
_ICL_DSC1_PICTURE_PARAMETER_SET_16_PC)
#define DSC_SLICE_PER_LINE(slice_per_line) ((slice_per_line) << 16)
-#define DSC_SLICE_CHUNK_SIZE(slice_chunk_aize) (slice_chunk_size << 0)
+#define DSC_SLICE_CHUNK_SIZE(slice_chunk_size) ((slice_chunk_size) << 0)
/* Icelake Rate Control Buffer Threshold Registers */
#define DSCA_RC_BUF_THRESH_0 _MMIO(0x6B230)
@@ -10652,4 +10686,17 @@ enum skl_power_gate {
_ICL_DSC1_RC_BUF_THRESH_1_UDW_PB, \
_ICL_DSC1_RC_BUF_THRESH_1_UDW_PC)
+#define PORT_TX_DFLEXDPSP _MMIO(0x1638A0)
+#define TC_LIVE_STATE_TBT(tc_port) (1 << ((tc_port) * 8 + 6))
+#define TC_LIVE_STATE_TC(tc_port) (1 << ((tc_port) * 8 + 5))
+#define DP_LANE_ASSIGNMENT_SHIFT(tc_port) ((tc_port) * 8)
+#define DP_LANE_ASSIGNMENT_MASK(tc_port) (0xf << ((tc_port) * 8))
+#define DP_LANE_ASSIGNMENT(tc_port, x) ((x) << ((tc_port) * 8))
+
+#define PORT_TX_DFLEXDPPMS _MMIO(0x163890)
+#define DP_PHY_MODE_STATUS_COMPLETED(tc_port) (1 << (tc_port))
+
+#define PORT_TX_DFLEXDPCSSS _MMIO(0x163894)
+#define DP_PHY_MODE_STATUS_NOT_SAFE(tc_port) (1 << (tc_port))
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 5c2c93cbab12..09ed48833b54 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -527,7 +527,7 @@ void __i915_request_submit(struct i915_request *request)
seqno = timeline_get_seqno(&engine->timeline);
GEM_BUG_ON(!seqno);
- GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine), seqno));
+ GEM_BUG_ON(intel_engine_signaled(engine, seqno));
/* We may be recursing from the signal callback of another i915 fence */
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
@@ -579,8 +579,7 @@ void __i915_request_unsubmit(struct i915_request *request)
*/
GEM_BUG_ON(!request->global_seqno);
GEM_BUG_ON(request->global_seqno != engine->timeline.seqno);
- GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine),
- request->global_seqno));
+ GEM_BUG_ON(intel_engine_has_completed(engine, request->global_seqno));
engine->timeline.seqno--;
/* We may be recursing from the signal callback of another i915 fence */
@@ -1205,7 +1204,7 @@ static bool __i915_spin_request(const struct i915_request *rq,
* it is a fair assumption that it will not complete within our
* relatively short timeout.
*/
- if (!i915_seqno_passed(intel_engine_get_seqno(engine), seqno - 1))
+ if (!intel_engine_has_started(engine, seqno))
return false;
/*
@@ -1222,7 +1221,7 @@ static bool __i915_spin_request(const struct i915_request *rq,
irq = READ_ONCE(engine->breadcrumbs.irq_count);
timeout_us += local_clock_us(&cpu);
do {
- if (i915_seqno_passed(intel_engine_get_seqno(engine), seqno))
+ if (intel_engine_has_completed(engine, seqno))
return seqno == i915_request_global_seqno(rq);
/*
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index e1c9365dfefb..9898301ab7ef 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -272,7 +272,10 @@ long i915_request_wait(struct i915_request *rq,
#define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */
#define I915_WAIT_FOR_IDLE_BOOST BIT(3)
-static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
+static inline bool intel_engine_has_started(struct intel_engine_cs *engine,
+ u32 seqno);
+static inline bool intel_engine_has_completed(struct intel_engine_cs *engine,
+ u32 seqno);
/**
* Returns true if seq1 is later than seq2.
@@ -282,11 +285,31 @@ static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
return (s32)(seq1 - seq2) >= 0;
}
+/**
+ * i915_request_started - check if the request has begun being executed
+ * @rq: the request
+ *
+ * Returns true if the request has been submitted to hardware, and the hardware
+ * has advanced passed the end of the previous request and so should be either
+ * currently processing the request (though it may be preempted and so
+ * not necessarily the next request to complete) or have completed the request.
+ */
+static inline bool i915_request_started(const struct i915_request *rq)
+{
+ u32 seqno;
+
+ seqno = i915_request_global_seqno(rq);
+ if (!seqno) /* not yet submitted to HW */
+ return false;
+
+ return intel_engine_has_started(rq->engine, seqno);
+}
+
static inline bool
__i915_request_completed(const struct i915_request *rq, u32 seqno)
{
GEM_BUG_ON(!seqno);
- return i915_seqno_passed(intel_engine_get_seqno(rq->engine), seqno) &&
+ return intel_engine_has_completed(rq->engine, seqno) &&
seqno == i915_request_global_seqno(rq);
}
@@ -301,18 +324,6 @@ static inline bool i915_request_completed(const struct i915_request *rq)
return __i915_request_completed(rq, seqno);
}
-static inline bool i915_request_started(const struct i915_request *rq)
-{
- u32 seqno;
-
- seqno = i915_request_global_seqno(rq);
- if (!seqno)
- return false;
-
- return i915_seqno_passed(intel_engine_get_seqno(rq->engine),
- seqno - 1);
-}
-
static inline bool i915_sched_node_signaled(const struct i915_sched_node *node)
{
const struct i915_request *rq =
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 11d834f94220..31efc971a3a8 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -199,7 +199,6 @@ vma_create(struct drm_i915_gem_object *obj,
vma->flags |= I915_VMA_GGTT;
list_add(&vma->obj_link, &obj->vma_list);
} else {
- i915_ppgtt_get(i915_vm_to_ppgtt(vm));
list_add_tail(&vma->obj_link, &obj->vma_list);
}
@@ -406,7 +405,7 @@ void i915_vma_unpin_iomap(struct i915_vma *vma)
i915_vma_unpin(vma);
}
-void i915_vma_unpin_and_release(struct i915_vma **p_vma)
+void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
{
struct i915_vma *vma;
struct drm_i915_gem_object *obj;
@@ -421,6 +420,9 @@ void i915_vma_unpin_and_release(struct i915_vma **p_vma)
i915_vma_unpin(vma);
i915_vma_close(vma);
+ if (flags & I915_VMA_RELEASE_MAP)
+ i915_gem_object_unpin_map(obj);
+
__i915_gem_object_release_unless_active(obj);
}
@@ -807,9 +809,6 @@ static void __i915_vma_destroy(struct i915_vma *vma)
if (vma->obj)
rb_erase(&vma->obj_node, &vma->obj->vma_tree);
- if (!i915_vma_is_ggtt(vma))
- i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
-
rbtree_postorder_for_each_entry_safe(iter, n, &vma->active, node) {
GEM_BUG_ON(i915_gem_active_isset(&iter->base));
kfree(iter);
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index f06d66377107..4f7c1c7599f4 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -138,7 +138,8 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_ggtt_view *view);
-void i915_vma_unpin_and_release(struct i915_vma **p_vma);
+void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags);
+#define I915_VMA_RELEASE_MAP BIT(0)
static inline bool i915_vma_is_active(struct i915_vma *vma)
{
@@ -207,6 +208,11 @@ static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
return lower_32_bits(vma->node.start);
}
+static inline u32 i915_ggtt_pin_bias(struct i915_vma *vma)
+{
+ return i915_vm_to_ggtt(vma->vm)->pin_bias;
+}
+
static inline struct i915_vma *i915_vma_get(struct i915_vma *vma)
{
i915_gem_object_get(vma->obj);
@@ -245,6 +251,8 @@ i915_vma_compare(struct i915_vma *vma,
if (cmp)
return cmp;
+ assert_i915_gem_gtt_types();
+
/* ggtt_view.type also encodes its size so that we both distinguish
* different views using it as a "type" and also use a compact (no
* accessing of uninitialised padding bytes) memcmp without storing
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index dcba645cabb8..fa7df5fe154b 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -159,7 +159,7 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
}
intel_state->base.visible = false;
- ret = intel_plane->check_plane(intel_plane, crtc_state, intel_state);
+ ret = intel_plane->check_plane(crtc_state, intel_state);
if (ret)
return ret;
@@ -170,7 +170,9 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
if (state->fb && INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
if (state->fb->modifier == I915_FORMAT_MOD_Y_TILED ||
- state->fb->modifier == I915_FORMAT_MOD_Yf_TILED) {
+ state->fb->modifier == I915_FORMAT_MOD_Yf_TILED ||
+ state->fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ state->fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) {
DRM_DEBUG_KMS("Y/Yf tiling not supported in IF-ID mode\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index b725835b47ef..769f3f586661 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -962,9 +962,6 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv)
{
int ret;
- if (INTEL_INFO(dev_priv)->num_pipes == 0)
- return;
-
ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops);
if (ret < 0) {
DRM_ERROR("failed to add audio component (%d)\n", ret);
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 1db6ba7d926e..84bf8d827136 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -256,8 +256,7 @@ void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
spin_unlock(&b->irq_lock);
rbtree_postorder_for_each_entry_safe(wait, n, &b->waiters, node) {
- GEM_BUG_ON(!i915_seqno_passed(intel_engine_get_seqno(engine),
- wait->seqno));
+ GEM_BUG_ON(!intel_engine_signaled(engine, wait->seqno));
RB_CLEAR_NODE(&wait->node);
wake_up_process(wait->tsk);
}
@@ -508,8 +507,7 @@ bool intel_engine_add_wait(struct intel_engine_cs *engine,
return armed;
/* Make the caller recheck if its request has already started. */
- return i915_seqno_passed(intel_engine_get_seqno(engine),
- wait->seqno - 1);
+ return intel_engine_has_started(engine, wait->seqno);
}
static inline bool chain_wakeup(struct rb_node *rb, int priority)
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index cf9b600cca79..14cf4c367e36 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -55,7 +55,9 @@ MODULE_FIRMWARE(I915_CSR_BXT);
#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
-#define CSR_MAX_FW_SIZE 0x2FFF
+#define BXT_CSR_MAX_FW_SIZE 0x3000
+#define GLK_CSR_MAX_FW_SIZE 0x4000
+#define ICL_CSR_MAX_FW_SIZE 0x6000
#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF
struct intel_css_header {
@@ -279,6 +281,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
struct intel_csr *csr = &dev_priv->csr;
const struct stepping_info *si = intel_get_stepping_info(dev_priv);
uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
+ uint32_t max_fw_size = 0;
uint32_t i;
uint32_t *dmc_payload;
uint32_t required_version;
@@ -359,6 +362,8 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
si->stepping);
return NULL;
}
+ /* Convert dmc_offset into number of bytes. By default it is in dwords*/
+ dmc_offset *= 4;
readcount += dmc_offset;
/* Extract dmc_header information. */
@@ -391,8 +396,16 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
/* fw_size is in dwords, so multiplied by 4 to convert into bytes. */
nbytes = dmc_header->fw_size * 4;
- if (nbytes > CSR_MAX_FW_SIZE) {
- DRM_ERROR("DMC firmware too big (%u bytes)\n", nbytes);
+ if (INTEL_GEN(dev_priv) >= 11)
+ max_fw_size = ICL_CSR_MAX_FW_SIZE;
+ else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+ max_fw_size = GLK_CSR_MAX_FW_SIZE;
+ else if (IS_GEN9(dev_priv))
+ max_fw_size = BXT_CSR_MAX_FW_SIZE;
+ else
+ MISSING_CASE(INTEL_REVID(dev_priv));
+ if (nbytes > max_fw_size) {
+ DRM_ERROR("DMC FW too big (%u bytes)\n", nbytes);
return NULL;
}
csr->dmc_fw_size = dmc_header->fw_size;
@@ -468,12 +481,6 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
csr->fw_path = I915_CSR_SKL;
else if (IS_BROXTON(dev_priv))
csr->fw_path = I915_CSR_BXT;
- else {
- DRM_ERROR("Unexpected: no known CSR firmware for platform\n");
- return;
- }
-
- DRM_DEBUG_KMS("Loading %s\n", csr->fw_path);
/*
* Obtain a runtime pm reference, until CSR is loaded,
@@ -481,6 +488,14 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
*/
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+ if (csr->fw_path == NULL) {
+ DRM_DEBUG_KMS("No known CSR firmware for platform, disabling runtime PM\n");
+ WARN_ON(!IS_ALPHA_SUPPORT(INTEL_INFO(dev_priv)));
+
+ return;
+ }
+
+ DRM_DEBUG_KMS("Loading %s\n", csr->fw_path);
schedule_work(&dev_priv->csr.work);
}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 8761513f3532..cd01a09c5e0f 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1414,7 +1414,7 @@ static int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
break;
}
- ref_clock = dev_priv->cdclk.hw.ref;
+ ref_clock = cnl_hdmi_pll_ref_clock(dev_priv);
dco_freq = (cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) * ref_clock;
@@ -1427,6 +1427,81 @@ static int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
return dco_freq / (p0 * p1 * p2 * 5);
}
+static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ u32 val = I915_READ(DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
+
+ switch (val) {
+ case DDI_CLK_SEL_NONE:
+ return 0;
+ case DDI_CLK_SEL_TBT_162:
+ return 162000;
+ case DDI_CLK_SEL_TBT_270:
+ return 270000;
+ case DDI_CLK_SEL_TBT_540:
+ return 540000;
+ case DDI_CLK_SEL_TBT_810:
+ return 810000;
+ default:
+ MISSING_CASE(val);
+ return 0;
+ }
+}
+
+static int icl_calc_mg_pll_link(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ u32 mg_pll_div0, mg_clktop_hsclkctl;
+ u32 m1, m2_int, m2_frac, div1, div2, refclk;
+ u64 tmp;
+
+ refclk = dev_priv->cdclk.hw.ref;
+
+ mg_pll_div0 = I915_READ(MG_PLL_DIV0(port));
+ mg_clktop_hsclkctl = I915_READ(MG_CLKTOP2_HSCLKCTL(port));
+
+ m1 = I915_READ(MG_PLL_DIV1(port)) & MG_PLL_DIV1_FBPREDIV_MASK;
+ m2_int = mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
+ m2_frac = (mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) ?
+ (mg_pll_div0 & MG_PLL_DIV0_FBDIV_FRAC_MASK) >>
+ MG_PLL_DIV0_FBDIV_FRAC_SHIFT : 0;
+
+ switch (mg_clktop_hsclkctl & MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
+ case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
+ div1 = 2;
+ break;
+ case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
+ div1 = 3;
+ break;
+ case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
+ div1 = 5;
+ break;
+ case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
+ div1 = 7;
+ break;
+ default:
+ MISSING_CASE(mg_clktop_hsclkctl);
+ return 0;
+ }
+
+ div2 = (mg_clktop_hsclkctl & MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
+ MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
+ /* div2 value of 0 is same as 1 means no div */
+ if (div2 == 0)
+ div2 = 1;
+
+ /*
+ * Adjust the original formula to delay the division by 2^22 in order to
+ * minimize possible rounding errors.
+ */
+ tmp = (u64)m1 * m2_int * refclk +
+ (((u64)m1 * m2_frac * refclk) >> 22);
+ tmp = div_u64(tmp, 5 * div1 * div2);
+
+ return tmp;
+}
+
static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
{
int dotclock;
@@ -1467,8 +1542,10 @@ static void icl_ddi_clock_get(struct intel_encoder *encoder,
link_clock = icl_calc_dp_combo_pll_link(dev_priv,
pll_id);
} else {
- /* FIXME - Add for MG PLL */
- WARN(1, "MG PLL clock_get code not implemented yet\n");
+ if (pll_id == DPLL_ID_ICL_TBTPLL)
+ link_clock = icl_calc_tbt_pll_link(dev_priv, port);
+ else
+ link_clock = icl_calc_mg_pll_link(dev_priv, port);
}
pipe_config->port_clock = link_clock;
@@ -2468,7 +2545,128 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
}
-static void icl_ddi_vswing_sequence(struct intel_encoder *encoder, u32 level,
+static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
+ int link_clock,
+ u32 level)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ const struct icl_mg_phy_ddi_buf_trans *ddi_translations;
+ u32 n_entries, val;
+ int ln;
+
+ n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
+ ddi_translations = icl_mg_phy_ddi_translations;
+ /* The table does not have values for level 3 and level 9. */
+ if (level >= n_entries || level == 3 || level == 9) {
+ DRM_DEBUG_KMS("DDI translation not found for level %d. Using %d instead.",
+ level, n_entries - 2);
+ level = n_entries - 2;
+ }
+
+ /* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */
+ for (ln = 0; ln < 2; ln++) {
+ val = I915_READ(MG_TX1_LINK_PARAMS(port, ln));
+ val &= ~CRI_USE_FS32;
+ I915_WRITE(MG_TX1_LINK_PARAMS(port, ln), val);
+
+ val = I915_READ(MG_TX2_LINK_PARAMS(port, ln));
+ val &= ~CRI_USE_FS32;
+ I915_WRITE(MG_TX2_LINK_PARAMS(port, ln), val);
+ }
+
+ /* Program MG_TX_SWINGCTRL with values from vswing table */
+ for (ln = 0; ln < 2; ln++) {
+ val = I915_READ(MG_TX1_SWINGCTRL(port, ln));
+ val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
+ val |= CRI_TXDEEMPH_OVERRIDE_17_12(
+ ddi_translations[level].cri_txdeemph_override_17_12);
+ I915_WRITE(MG_TX1_SWINGCTRL(port, ln), val);
+
+ val = I915_READ(MG_TX2_SWINGCTRL(port, ln));
+ val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
+ val |= CRI_TXDEEMPH_OVERRIDE_17_12(
+ ddi_translations[level].cri_txdeemph_override_17_12);
+ I915_WRITE(MG_TX2_SWINGCTRL(port, ln), val);
+ }
+
+ /* Program MG_TX_DRVCTRL with values from vswing table */
+ for (ln = 0; ln < 2; ln++) {
+ val = I915_READ(MG_TX1_DRVCTRL(port, ln));
+ val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
+ CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
+ val |= CRI_TXDEEMPH_OVERRIDE_5_0(
+ ddi_translations[level].cri_txdeemph_override_5_0) |
+ CRI_TXDEEMPH_OVERRIDE_11_6(
+ ddi_translations[level].cri_txdeemph_override_11_6) |
+ CRI_TXDEEMPH_OVERRIDE_EN;
+ I915_WRITE(MG_TX1_DRVCTRL(port, ln), val);
+
+ val = I915_READ(MG_TX2_DRVCTRL(port, ln));
+ val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
+ CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
+ val |= CRI_TXDEEMPH_OVERRIDE_5_0(
+ ddi_translations[level].cri_txdeemph_override_5_0) |
+ CRI_TXDEEMPH_OVERRIDE_11_6(
+ ddi_translations[level].cri_txdeemph_override_11_6) |
+ CRI_TXDEEMPH_OVERRIDE_EN;
+ I915_WRITE(MG_TX2_DRVCTRL(port, ln), val);
+
+ /* FIXME: Program CRI_LOADGEN_SEL after the spec is updated */
+ }
+
+ /*
+ * Program MG_CLKHUB<LN, port being used> with value from frequency table
+ * In case of Legacy mode on MG PHY, both TX1 and TX2 enabled so use the
+ * values from table for which TX1 and TX2 enabled.
+ */
+ for (ln = 0; ln < 2; ln++) {
+ val = I915_READ(MG_CLKHUB(port, ln));
+ if (link_clock < 300000)
+ val |= CFG_LOW_RATE_LKREN_EN;
+ else
+ val &= ~CFG_LOW_RATE_LKREN_EN;
+ I915_WRITE(MG_CLKHUB(port, ln), val);
+ }
+
+ /* Program the MG_TX_DCC<LN, port being used> based on the link frequency */
+ for (ln = 0; ln < 2; ln++) {
+ val = I915_READ(MG_TX1_DCC(port, ln));
+ val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK;
+ if (link_clock <= 500000) {
+ val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN;
+ } else {
+ val |= CFG_AMI_CK_DIV_OVERRIDE_EN |
+ CFG_AMI_CK_DIV_OVERRIDE_VAL(1);
+ }
+ I915_WRITE(MG_TX1_DCC(port, ln), val);
+
+ val = I915_READ(MG_TX2_DCC(port, ln));
+ val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK;
+ if (link_clock <= 500000) {
+ val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN;
+ } else {
+ val |= CFG_AMI_CK_DIV_OVERRIDE_EN |
+ CFG_AMI_CK_DIV_OVERRIDE_VAL(1);
+ }
+ I915_WRITE(MG_TX2_DCC(port, ln), val);
+ }
+
+ /* Program MG_TX_PISO_READLOAD with values from vswing table */
+ for (ln = 0; ln < 2; ln++) {
+ val = I915_READ(MG_TX1_PISO_READLOAD(port, ln));
+ val |= CRI_CALCINIT;
+ I915_WRITE(MG_TX1_PISO_READLOAD(port, ln), val);
+
+ val = I915_READ(MG_TX2_PISO_READLOAD(port, ln));
+ val |= CRI_CALCINIT;
+ I915_WRITE(MG_TX2_PISO_READLOAD(port, ln), val);
+ }
+}
+
+static void icl_ddi_vswing_sequence(struct intel_encoder *encoder,
+ int link_clock,
+ u32 level,
enum intel_output_type type)
{
enum port port = encoder->port;
@@ -2476,8 +2674,7 @@ static void icl_ddi_vswing_sequence(struct intel_encoder *encoder, u32 level,
if (port == PORT_A || port == PORT_B)
icl_combo_phy_ddi_vswing_sequence(encoder, level, type);
else
- /* Not Implemented Yet */
- WARN_ON(1);
+ icl_mg_phy_ddi_vswing_sequence(encoder, link_clock, level);
}
static uint32_t translate_signal_level(int signal_levels)
@@ -2512,7 +2709,8 @@ u32 bxt_signal_levels(struct intel_dp *intel_dp)
int level = intel_ddi_dp_level(intel_dp);
if (IS_ICELAKE(dev_priv))
- icl_ddi_vswing_sequence(encoder, level, encoder->type);
+ icl_ddi_vswing_sequence(encoder, intel_dp->link_rate,
+ level, encoder->type);
else if (IS_CANNONLAKE(dev_priv))
cnl_ddi_vswing_sequence(encoder, level, encoder->type);
else
@@ -2692,8 +2890,12 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
+ icl_program_mg_dp_mode(intel_dp);
+ icl_disable_phy_clock_gating(dig_port);
+
if (IS_ICELAKE(dev_priv))
- icl_ddi_vswing_sequence(encoder, level, encoder->type);
+ icl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
+ level, encoder->type);
else if (IS_CANNONLAKE(dev_priv))
cnl_ddi_vswing_sequence(encoder, level, encoder->type);
else if (IS_GEN9_LP(dev_priv))
@@ -2708,7 +2910,10 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
intel_dp_stop_link_train(intel_dp);
- intel_ddi_enable_pipe_clock(crtc_state);
+ icl_enable_phy_clock_gating(dig_port);
+
+ if (!is_mst)
+ intel_ddi_enable_pipe_clock(crtc_state);
}
static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
@@ -2728,7 +2933,8 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
if (IS_ICELAKE(dev_priv))
- icl_ddi_vswing_sequence(encoder, level, INTEL_OUTPUT_HDMI);
+ icl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
+ level, INTEL_OUTPUT_HDMI);
else if (IS_CANNONLAKE(dev_priv))
cnl_ddi_vswing_sequence(encoder, level, INTEL_OUTPUT_HDMI);
else if (IS_GEN9_LP(dev_priv))
@@ -2810,14 +3016,14 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
bool is_mst = intel_crtc_has_type(old_crtc_state,
INTEL_OUTPUT_DP_MST);
- intel_ddi_disable_pipe_clock(old_crtc_state);
-
- /*
- * Power down sink before disabling the port, otherwise we end
- * up getting interrupts from the sink on detecting link loss.
- */
- if (!is_mst)
+ if (!is_mst) {
+ intel_ddi_disable_pipe_clock(old_crtc_state);
+ /*
+ * Power down sink before disabling the port, otherwise we end
+ * up getting interrupts from the sink on detecting link loss.
+ */
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
+ }
intel_disable_ddi_buf(encoder);
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 633f9fbf72ea..6eecd64734d5 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -103,9 +103,9 @@ enum intel_platform {
func(has_psr); \
func(has_rc6); \
func(has_rc6p); \
- func(has_resource_streamer); \
func(has_runtime_pm); \
func(has_snoop); \
+ func(has_coherent_ggtt); \
func(unfenced_needs_alignment); \
func(cursor_needs_physical); \
func(hws_needs_physical); \
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index ed3fa1c8a983..5711cb701760 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -46,6 +46,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_rect.h>
+#include <drm/drm_atomic_uapi.h>
#include <linux/dma_remapping.h>
#include <linux/reservation.h>
@@ -2474,6 +2475,12 @@ intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
}
}
+bool is_ccs_modifier(u64 modifier)
+{
+ return modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
+}
+
static int
intel_fill_fb_info(struct drm_i915_private *dev_priv,
struct drm_framebuffer *fb)
@@ -2504,8 +2511,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
return ret;
}
- if ((fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) && i == 1) {
+ if (is_ccs_modifier(fb->modifier) && i == 1) {
int hsub = fb->format->hsub;
int vsub = fb->format->vsub;
int tile_width, tile_height;
@@ -2988,6 +2994,7 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state,
int w = drm_rect_width(&plane_state->base.src) >> 16;
int h = drm_rect_height(&plane_state->base.src) >> 16;
int dst_x = plane_state->base.dst.x1;
+ int dst_w = drm_rect_width(&plane_state->base.dst);
int pipe_src_w = crtc_state->pipe_src_w;
int max_width = skl_max_plane_width(fb, 0, rotation);
int max_height = 4096;
@@ -3009,10 +3016,10 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state,
* screen may cause FIFO underflow and display corruption.
*/
if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
- (dst_x + w < 4 || dst_x > pipe_src_w - 4)) {
+ (dst_x + dst_w < 4 || dst_x > pipe_src_w - 4)) {
DRM_DEBUG_KMS("requested plane X %s position %d invalid (valid range %d-%d)\n",
- dst_x + w < 4 ? "end" : "start",
- dst_x + w < 4 ? dst_x + w : dst_x,
+ dst_x + dst_w < 4 ? "end" : "start",
+ dst_x + dst_w < 4 ? dst_x + dst_w : dst_x,
4, pipe_src_w - 4);
return -ERANGE;
}
@@ -3054,8 +3061,7 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state,
* CCS AUX surface doesn't have its own x/y offsets, we must make sure
* they match with the main surface x/y offsets.
*/
- if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) {
+ if (is_ccs_modifier(fb->modifier)) {
while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
if (offset == 0)
break;
@@ -3189,8 +3195,7 @@ int skl_check_plane_surface(const struct intel_crtc_state *crtc_state,
ret = skl_check_nv12_aux_surface(plane_state);
if (ret)
return ret;
- } else if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) {
+ } else if (is_ccs_modifier(fb->modifier)) {
ret = skl_check_ccs_aux_surface(plane_state);
if (ret)
return ret;
@@ -3551,11 +3556,11 @@ static u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
case I915_FORMAT_MOD_Y_TILED:
return PLANE_CTL_TILED_Y;
case I915_FORMAT_MOD_Y_TILED_CCS:
- return PLANE_CTL_TILED_Y | PLANE_CTL_DECOMPRESSION_ENABLE;
+ return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
case I915_FORMAT_MOD_Yf_TILED:
return PLANE_CTL_TILED_YF;
case I915_FORMAT_MOD_Yf_TILED_CCS:
- return PLANE_CTL_TILED_YF | PLANE_CTL_DECOMPRESSION_ENABLE;
+ return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
default:
MISSING_CASE(fb_modifier);
}
@@ -5078,10 +5083,14 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
mutex_lock(&dev_priv->pcu_lock);
WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
mutex_unlock(&dev_priv->pcu_lock);
- /* wait for pcode to finish disabling IPS, which may take up to 42ms */
+ /*
+ * Wait for PCODE to finish disabling IPS. The BSpec specified
+ * 42ms timeout value leads to occasional timeouts so use 100ms
+ * instead.
+ */
if (intel_wait_for_register(dev_priv,
IPS_CTL, IPS_ENABLE, 0,
- 42))
+ 100))
DRM_ERROR("Timed out waiting for IPS disable\n");
} else {
I915_WRITE(IPS_CTL, 0);
@@ -6676,22 +6685,20 @@ intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
static void compute_m_n(unsigned int m, unsigned int n,
uint32_t *ret_m, uint32_t *ret_n,
- bool reduce_m_n)
+ bool constant_n)
{
/*
- * Reduce M/N as much as possible without loss in precision. Several DP
- * dongles in particular seem to be fussy about too large *link* M/N
- * values. The passed in values are more likely to have the least
- * significant bits zero than M after rounding below, so do this first.
+ * Several DP dongles in particular seem to be fussy about
+ * too large link M/N values. Give N value as 0x8000 that
+ * should be acceptable by specific devices. 0x8000 is the
+ * specified fixed N value for asynchronous clock mode,
+ * which the devices expect also in synchronous clock mode.
*/
- if (reduce_m_n) {
- while ((m & 1) == 0 && (n & 1) == 0) {
- m >>= 1;
- n >>= 1;
- }
- }
+ if (constant_n)
+ *ret_n = 0x8000;
+ else
+ *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
- *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
*ret_m = div_u64((uint64_t) m * *ret_n, n);
intel_reduce_m_n_ratio(ret_m, ret_n);
}
@@ -6700,18 +6707,18 @@ void
intel_link_compute_m_n(int bits_per_pixel, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n,
- bool reduce_m_n)
+ bool constant_n)
{
m_n->tu = 64;
compute_m_n(bits_per_pixel * pixel_clock,
link_clock * nlanes * 8,
&m_n->gmch_m, &m_n->gmch_n,
- reduce_m_n);
+ constant_n);
compute_m_n(pixel_clock, link_clock,
&m_n->link_m, &m_n->link_n,
- reduce_m_n);
+ constant_n);
}
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
@@ -8798,13 +8805,13 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
fb->modifier = I915_FORMAT_MOD_X_TILED;
break;
case PLANE_CTL_TILED_Y:
- if (val & PLANE_CTL_DECOMPRESSION_ENABLE)
+ if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
else
fb->modifier = I915_FORMAT_MOD_Y_TILED;
break;
case PLANE_CTL_TILED_YF:
- if (val & PLANE_CTL_DECOMPRESSION_ENABLE)
+ if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
else
fb->modifier = I915_FORMAT_MOD_Yf_TILED;
@@ -8973,7 +8980,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
pipe_name(crtc->pipe));
- I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL_DRIVER(HSW_DISP_PW_GLOBAL)),
+ I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2),
"Display power well on\n");
I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
@@ -9690,8 +9697,7 @@ static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
}
-static int i845_check_cursor(struct intel_plane *plane,
- struct intel_crtc_state *crtc_state,
+static int i845_check_cursor(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->base.fb;
@@ -9881,10 +9887,10 @@ static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
return true;
}
-static int i9xx_check_cursor(struct intel_plane *plane,
- struct intel_crtc_state *crtc_state,
+static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->base.fb;
enum pipe pipe = plane->pipe;
@@ -12738,7 +12744,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
* down.
*/
INIT_WORK(&state->commit_work, intel_atomic_cleanup_work);
- schedule_work(&state->commit_work);
+ queue_work(system_highpri_wq, &state->commit_work);
}
static void intel_atomic_commit_work(struct work_struct *work)
@@ -12895,6 +12901,8 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
.atomic_duplicate_state = intel_crtc_duplicate_state,
.atomic_destroy_state = intel_crtc_destroy_state,
.set_crc_source = intel_crtc_set_crc_source,
+ .verify_crc_source = intel_crtc_verify_crc_source,
+ .get_crc_sources = intel_crtc_get_crc_sources,
};
struct wait_rps_boost {
@@ -12966,8 +12974,11 @@ static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
INTEL_INFO(dev_priv)->cursor_needs_physical) {
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
const int align = intel_cursor_alignment(dev_priv);
+ int err;
- return i915_gem_object_attach_phys(obj, align);
+ err = i915_gem_object_attach_phys(obj, align);
+ if (err)
+ return err;
}
vma = intel_pin_and_fence_fb_obj(fb,
@@ -13186,10 +13197,10 @@ skl_max_scale(struct intel_crtc *intel_crtc,
}
static int
-intel_check_primary_plane(struct intel_plane *plane,
- struct intel_crtc_state *crtc_state,
+intel_check_primary_plane(struct intel_crtc_state *crtc_state,
struct intel_plane_state *state)
{
+ struct intel_plane *plane = to_intel_plane(state->base.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
struct drm_crtc *crtc = state->base.crtc;
int min_scale = DRM_PLANE_HELPER_NO_SCALING;
@@ -13397,8 +13408,7 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_ABGR8888:
- if (modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
- modifier == I915_FORMAT_MOD_Y_TILED_CCS)
+ if (is_ccs_modifier(modifier))
return true;
/* fall through */
case DRM_FORMAT_RGB565:
@@ -13617,24 +13627,22 @@ static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id)
{
- if (plane_id == PLANE_PRIMARY) {
- if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
- return false;
- else if ((INTEL_GEN(dev_priv) == 9 && pipe == PIPE_C) &&
- !IS_GEMINILAKE(dev_priv))
- return false;
- } else if (plane_id >= PLANE_SPRITE0) {
- if (plane_id == PLANE_CURSOR)
- return false;
- if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) == 10) {
- if (plane_id != PLANE_SPRITE0)
- return false;
- } else {
- if (plane_id != PLANE_SPRITE0 || pipe == PIPE_C ||
- IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
- return false;
- }
- }
+ /*
+ * FIXME: ICL requires two hardware planes for scanning out NV12
+ * framebuffers. Do not advertize support until this is implemented.
+ */
+ if (INTEL_GEN(dev_priv) >= 11)
+ return false;
+
+ if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
+ return false;
+
+ if (INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C)
+ return false;
+
+ if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0)
+ return false;
+
return true;
}
@@ -14128,6 +14136,9 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
intel_pps_init(dev_priv);
+ if (INTEL_INFO(dev_priv)->num_pipes == 0)
+ return;
+
/*
* intel_edp_init_connector() depends on this completing first, to
* prevent the registeration of both eDP and LVDS and the incorrect
@@ -14544,7 +14555,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
break;
case DRM_FORMAT_NV12:
if (INTEL_GEN(dev_priv) < 9 || IS_SKYLAKE(dev_priv) ||
- IS_BROXTON(dev_priv)) {
+ IS_BROXTON(dev_priv) || INTEL_GEN(dev_priv) >= 11) {
DRM_DEBUG_KMS("unsupported pixel format: %s\n",
drm_get_format_name(mode_cmd->pixel_format,
&format_name));
@@ -14591,8 +14602,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
* potential runtime errors at plane configuration time.
*/
if (IS_GEN9(dev_priv) && i == 0 && fb->width > 3840 &&
- (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS))
+ is_ccs_modifier(fb->modifier))
stride_alignment *= 4;
if (fb->pitches[i] & (stride_alignment - 1)) {
@@ -15128,12 +15138,61 @@ static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
}
+static int intel_initial_commit(struct drm_device *dev)
+{
+ struct drm_atomic_state *state = NULL;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ int ret = 0;
+
+ state = drm_atomic_state_alloc(dev);
+ if (!state)
+ return -ENOMEM;
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry:
+ state->acquire_ctx = &ctx;
+
+ drm_for_each_crtc(crtc, dev) {
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ goto out;
+ }
+
+ if (crtc_state->active) {
+ ret = drm_atomic_add_affected_planes(state, crtc);
+ if (ret)
+ goto out;
+ }
+ }
+
+ ret = drm_atomic_commit(state);
+
+out:
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
+
+ drm_atomic_state_put(state);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ return ret;
+}
+
int intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
enum pipe pipe;
struct intel_crtc *crtc;
+ int ret;
dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
@@ -15157,9 +15216,6 @@ int intel_modeset_init(struct drm_device *dev)
intel_init_pm(dev_priv);
- if (INTEL_INFO(dev_priv)->num_pipes == 0)
- return 0;
-
/*
* There may be no VBT; and if the BIOS enabled SSC we can
* just keep using it to avoid unnecessary flicker. Whereas if the
@@ -15208,8 +15264,6 @@ int intel_modeset_init(struct drm_device *dev)
INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
for_each_pipe(dev_priv, pipe) {
- int ret;
-
ret = intel_crtc_init(dev_priv, pipe);
if (ret) {
drm_mode_config_cleanup(dev);
@@ -15265,6 +15319,16 @@ int intel_modeset_init(struct drm_device *dev)
if (!HAS_GMCH_DISPLAY(dev_priv))
sanitize_watermarks(dev);
+ /*
+ * Force all active planes to recompute their states. So that on
+ * mode_setcrtc after probe, all the intel_plane_state variables
+ * are already calculated and there is no assert_plane warnings
+ * during bootup.
+ */
+ ret = intel_initial_commit(dev);
+ if (ret)
+ DRM_DEBUG_KMS("Initial commit in probe failed.\n");
+
return 0;
}
@@ -15789,6 +15853,8 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
struct intel_encoder *encoder;
int i;
+ intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+
intel_early_display_was(dev_priv);
intel_modeset_readout_hw_state(dev);
@@ -15843,9 +15909,8 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
if (WARN_ON(put_domains))
modeset_put_power_domains(dev_priv, put_domains);
}
- intel_display_set_init_power(dev_priv, false);
- intel_power_domains_verify_state(dev_priv);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
intel_fbc_init_pipe_state(dev_priv);
}
@@ -15934,8 +15999,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
flush_work(&dev_priv->atomic_helper.free_work);
WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
- intel_disable_gt_powersave(dev_priv);
-
/*
* Interrupts and polling as the first thing to avoid creating havoc.
* Too much stuff here (turning of connectors, ...) would
@@ -15963,8 +16026,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
intel_cleanup_overlay(dev_priv);
- intel_cleanup_gt_powersave(dev_priv);
-
intel_teardown_gmbus(dev_priv);
destroy_workqueue(dev_priv->modeset_wq);
@@ -16072,8 +16133,7 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
return NULL;
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- error->power_well_driver =
- I915_READ(HSW_PWR_WELL_CTL_DRIVER(HSW_DISP_PW_GLOBAL));
+ error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2);
for_each_pipe(dev_priv, i) {
error->pipe[i].power_domain_on =
diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h
index 138a1bc1818c..9fac67e31205 100644
--- a/drivers/gpu/drm/i915/intel_display.h
+++ b/drivers/gpu/drm/i915/intel_display.h
@@ -25,6 +25,24 @@
#ifndef _INTEL_DISPLAY_H_
#define _INTEL_DISPLAY_H_
+#include <drm/drm_util.h>
+
+enum i915_gpio {
+ GPIOA,
+ GPIOB,
+ GPIOC,
+ GPIOD,
+ GPIOE,
+ GPIOF,
+ GPIOG,
+ GPIOH,
+ __GPIOI_UNUSED,
+ GPIOJ,
+ GPIOK,
+ GPIOL,
+ GPIOM,
+};
+
enum pipe {
INVALID_PIPE = -1,
@@ -161,6 +179,13 @@ enum tc_port {
I915_MAX_TC_PORTS
};
+enum tc_port_type {
+ TC_PORT_UNKNOWN = 0,
+ TC_PORT_TYPEC,
+ TC_PORT_TBT,
+ TC_PORT_LEGACY,
+};
+
enum dpio_channel {
DPIO_CH0,
DPIO_CH1
@@ -346,11 +371,11 @@ struct intel_link_m_n {
#define for_each_power_domain_well(__dev_priv, __power_well, __domain_mask) \
for_each_power_well(__dev_priv, __power_well) \
- for_each_if((__power_well)->domains & (__domain_mask))
+ for_each_if((__power_well)->desc->domains & (__domain_mask))
#define for_each_power_domain_well_rev(__dev_priv, __power_well, __domain_mask) \
for_each_power_well_rev(__dev_priv, __power_well) \
- for_each_if((__power_well)->domains & (__domain_mask))
+ for_each_if((__power_well)->desc->domains & (__domain_mask))
#define for_each_new_intel_plane_in_state(__state, plane, new_plane_state, __i) \
for ((__i) = 0; \
@@ -380,6 +405,7 @@ struct intel_link_m_n {
void intel_link_compute_m_n(int bpp, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n,
- bool reduce_m_n);
+ bool constant_n);
+bool is_ccs_modifier(u64 modifier);
#endif
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index cd0f649b57a5..6b4c19123f2a 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -107,13 +107,6 @@ bool intel_dp_is_edp(struct intel_dp *intel_dp)
return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
}
-static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-
- return intel_dig_port->base.base.dev;
-}
-
static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
{
return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
@@ -176,14 +169,45 @@ static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
return intel_dp->common_rates[intel_dp->num_common_rates - 1];
}
+static int intel_dp_get_fia_supported_lane_count(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
+ u32 lane_info;
+
+ if (tc_port == PORT_TC_NONE || dig_port->tc_type != TC_PORT_TYPEC)
+ return 4;
+
+ lane_info = (I915_READ(PORT_TX_DFLEXDPSP) &
+ DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
+ DP_LANE_ASSIGNMENT_SHIFT(tc_port);
+
+ switch (lane_info) {
+ default:
+ MISSING_CASE(lane_info);
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ return 1;
+ case 3:
+ case 12:
+ return 2;
+ case 15:
+ return 4;
+ }
+}
+
/* Theoretical max between source and sink */
static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
int source_max = intel_dig_port->max_lanes;
int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
+ int fia_max = intel_dp_get_fia_supported_lane_count(intel_dp);
- return min(source_max, sink_max);
+ return min3(source_max, sink_max, fia_max);
}
int intel_dp_max_lane_count(struct intel_dp *intel_dp)
@@ -198,6 +222,138 @@ intel_dp_link_required(int pixel_clock, int bpp)
return DIV_ROUND_UP(pixel_clock * bpp, 8);
}
+void icl_program_mg_dp_mode(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ enum port port = intel_dig_port->base.port;
+ enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
+ u32 ln0, ln1, lane_info;
+
+ if (tc_port == PORT_TC_NONE || intel_dig_port->tc_type == TC_PORT_TBT)
+ return;
+
+ ln0 = I915_READ(MG_DP_MODE(port, 0));
+ ln1 = I915_READ(MG_DP_MODE(port, 1));
+
+ switch (intel_dig_port->tc_type) {
+ case TC_PORT_TYPEC:
+ ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
+ ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
+
+ lane_info = (I915_READ(PORT_TX_DFLEXDPSP) &
+ DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
+ DP_LANE_ASSIGNMENT_SHIFT(tc_port);
+
+ switch (lane_info) {
+ case 0x1:
+ case 0x4:
+ break;
+ case 0x2:
+ ln0 |= MG_DP_MODE_CFG_DP_X1_MODE;
+ break;
+ case 0x3:
+ ln0 |= MG_DP_MODE_CFG_DP_X1_MODE |
+ MG_DP_MODE_CFG_DP_X2_MODE;
+ break;
+ case 0x8:
+ ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
+ break;
+ case 0xC:
+ ln1 |= MG_DP_MODE_CFG_DP_X1_MODE |
+ MG_DP_MODE_CFG_DP_X2_MODE;
+ break;
+ case 0xF:
+ ln0 |= MG_DP_MODE_CFG_DP_X1_MODE |
+ MG_DP_MODE_CFG_DP_X2_MODE;
+ ln1 |= MG_DP_MODE_CFG_DP_X1_MODE |
+ MG_DP_MODE_CFG_DP_X2_MODE;
+ break;
+ default:
+ MISSING_CASE(lane_info);
+ }
+ break;
+
+ case TC_PORT_LEGACY:
+ ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE;
+ ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE;
+ break;
+
+ default:
+ MISSING_CASE(intel_dig_port->tc_type);
+ return;
+ }
+
+ I915_WRITE(MG_DP_MODE(port, 0), ln0);
+ I915_WRITE(MG_DP_MODE(port, 1), ln1);
+}
+
+void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ enum port port = dig_port->base.port;
+ enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
+ i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) };
+ u32 val;
+ int i;
+
+ if (tc_port == PORT_TC_NONE)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(mg_regs); i++) {
+ val = I915_READ(mg_regs[i]);
+ val |= MG_DP_MODE_CFG_TR2PWR_GATING |
+ MG_DP_MODE_CFG_TRPWR_GATING |
+ MG_DP_MODE_CFG_CLNPWR_GATING |
+ MG_DP_MODE_CFG_DIGPWR_GATING |
+ MG_DP_MODE_CFG_GAONPWR_GATING;
+ I915_WRITE(mg_regs[i], val);
+ }
+
+ val = I915_READ(MG_MISC_SUS0(tc_port));
+ val |= MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(3) |
+ MG_MISC_SUS0_CFG_TR2PWR_GATING |
+ MG_MISC_SUS0_CFG_CL2PWR_GATING |
+ MG_MISC_SUS0_CFG_GAONPWR_GATING |
+ MG_MISC_SUS0_CFG_TRPWR_GATING |
+ MG_MISC_SUS0_CFG_CL1PWR_GATING |
+ MG_MISC_SUS0_CFG_DGPWR_GATING;
+ I915_WRITE(MG_MISC_SUS0(tc_port), val);
+}
+
+void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ enum port port = dig_port->base.port;
+ enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
+ i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) };
+ u32 val;
+ int i;
+
+ if (tc_port == PORT_TC_NONE)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(mg_regs); i++) {
+ val = I915_READ(mg_regs[i]);
+ val &= ~(MG_DP_MODE_CFG_TR2PWR_GATING |
+ MG_DP_MODE_CFG_TRPWR_GATING |
+ MG_DP_MODE_CFG_CLNPWR_GATING |
+ MG_DP_MODE_CFG_DIGPWR_GATING |
+ MG_DP_MODE_CFG_GAONPWR_GATING);
+ I915_WRITE(mg_regs[i], val);
+ }
+
+ val = I915_READ(MG_MISC_SUS0(tc_port));
+ val &= ~(MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK |
+ MG_MISC_SUS0_CFG_TR2PWR_GATING |
+ MG_MISC_SUS0_CFG_CL2PWR_GATING |
+ MG_MISC_SUS0_CFG_GAONPWR_GATING |
+ MG_MISC_SUS0_CFG_TRPWR_GATING |
+ MG_MISC_SUS0_CFG_CL1PWR_GATING |
+ MG_MISC_SUS0_CFG_DGPWR_GATING);
+ I915_WRITE(MG_MISC_SUS0(tc_port), val);
+}
+
int
intel_dp_max_data_rate(int max_link_clock, int max_lanes)
{
@@ -498,7 +654,7 @@ intel_dp_pps_init(struct intel_dp *intel_dp);
static void pps_lock(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
/*
* See intel_power_sequencer_reset() why we need
@@ -511,7 +667,7 @@ static void pps_lock(struct intel_dp *intel_dp)
static void pps_unlock(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
mutex_unlock(&dev_priv->pps_mutex);
@@ -521,7 +677,7 @@ static void pps_unlock(struct intel_dp *intel_dp)
static void
vlv_power_sequencer_kick(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum pipe pipe = intel_dp->pps_pipe;
bool pll_enabled, release_cl_override = false;
@@ -626,7 +782,7 @@ static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
static enum pipe
vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum pipe pipe;
@@ -673,7 +829,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
static int
bxt_power_sequencer_idx(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
int backlight_controller = dev_priv->vbt.backlight.controller;
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -742,7 +898,7 @@ vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
static void
vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum port port = intel_dig_port->base.port;
@@ -819,7 +975,7 @@ struct pps_registers {
static void intel_pps_get_registers(struct intel_dp *intel_dp,
struct pps_registers *regs)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
int pps_idx = 0;
memset(regs, 0, sizeof(*regs));
@@ -865,7 +1021,7 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
{
struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
edp_notifier);
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART)
return 0;
@@ -895,7 +1051,7 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
static bool edp_have_panel_power(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -908,7 +1064,7 @@ static bool edp_have_panel_power(struct intel_dp *intel_dp)
static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -922,7 +1078,7 @@ static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
static void
intel_dp_check_edp(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
if (!intel_dp_is_edp(intel_dp))
return;
@@ -938,7 +1094,7 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
static uint32_t
intel_dp_aux_wait_done(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
uint32_t status;
bool done;
@@ -955,7 +1111,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp)
static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
if (index)
return 0;
@@ -969,7 +1125,7 @@ static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
if (index)
return 0;
@@ -987,7 +1143,7 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
if (intel_dp->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
/* Workaround for non-ULT HSW */
@@ -1045,15 +1201,23 @@ static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
int send_bytes,
uint32_t unused)
{
- return DP_AUX_CH_CTL_SEND_BUSY |
- DP_AUX_CH_CTL_DONE |
- DP_AUX_CH_CTL_INTERRUPT |
- DP_AUX_CH_CTL_TIME_OUT_ERROR |
- DP_AUX_CH_CTL_TIME_OUT_MAX |
- DP_AUX_CH_CTL_RECEIVE_ERROR |
- (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
- DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
- DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ uint32_t ret;
+
+ ret = DP_AUX_CH_CTL_SEND_BUSY |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_INTERRUPT |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_TIME_OUT_MAX |
+ DP_AUX_CH_CTL_RECEIVE_ERROR |
+ (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+ DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
+ DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
+
+ if (intel_dig_port->tc_type == TC_PORT_TBT)
+ ret |= DP_AUX_CH_CTL_TBT_IO;
+
+ return ret;
}
static int
@@ -1381,7 +1545,7 @@ intel_aux_power_domain(struct intel_dp *intel_dp)
static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
enum aux_ch aux_ch = intel_dp->aux_ch;
switch (aux_ch) {
@@ -1397,7 +1561,7 @@ static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
enum aux_ch aux_ch = intel_dp->aux_ch;
switch (aux_ch) {
@@ -1413,7 +1577,7 @@ static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
enum aux_ch aux_ch = intel_dp->aux_ch;
switch (aux_ch) {
@@ -1431,7 +1595,7 @@ static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
enum aux_ch aux_ch = intel_dp->aux_ch;
switch (aux_ch) {
@@ -1449,7 +1613,7 @@ static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
enum aux_ch aux_ch = intel_dp->aux_ch;
switch (aux_ch) {
@@ -1468,7 +1632,7 @@ static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
enum aux_ch aux_ch = intel_dp->aux_ch;
switch (aux_ch) {
@@ -1494,7 +1658,7 @@ intel_dp_aux_fini(struct intel_dp *intel_dp)
static void
intel_dp_aux_init(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
intel_dp->aux_ch = intel_aux_ch(intel_dp);
@@ -1662,7 +1826,7 @@ struct link_config_limits {
static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_connector *intel_connector = intel_dp->attached_connector;
int bpp, bpc;
@@ -1834,8 +1998,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_connector *intel_connector = intel_dp->attached_connector;
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
- bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc,
- DP_DPCD_QUIRK_LIMITED_M_N);
+ bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
+ DP_DPCD_QUIRK_CONSTANT_N);
if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
pipe_config->has_pch_encoder = true;
@@ -1900,7 +2064,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
adjusted_mode->crtc_clock,
pipe_config->port_clock,
&pipe_config->dp_m_n,
- reduce_m_n);
+ constant_n);
if (intel_connector->panel.downclock_mode != NULL &&
dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
@@ -1910,7 +2074,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_connector->panel.downclock_mode->clock,
pipe_config->port_clock,
&pipe_config->dp_m2_n2,
- reduce_m_n);
+ constant_n);
}
if (!HAS_DDI(dev_priv))
@@ -2030,7 +2194,7 @@ static void wait_panel_status(struct intel_dp *intel_dp,
u32 mask,
u32 value)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
i915_reg_t pp_stat_reg, pp_ctrl_reg;
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -2106,7 +2270,7 @@ static void edp_wait_backlight_off(struct intel_dp *intel_dp)
static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 control;
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -2127,7 +2291,7 @@ static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
*/
static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
u32 pp;
i915_reg_t pp_stat_reg, pp_ctrl_reg;
@@ -2198,7 +2362,7 @@ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *intel_dig_port =
dp_to_dig_port(intel_dp);
u32 pp;
@@ -2264,7 +2428,7 @@ static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
*/
static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -2284,7 +2448,7 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
static void edp_panel_on(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 pp;
i915_reg_t pp_ctrl_reg;
@@ -2342,7 +2506,7 @@ void intel_edp_panel_on(struct intel_dp *intel_dp)
static void edp_panel_off(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 pp;
i915_reg_t pp_ctrl_reg;
@@ -2390,7 +2554,7 @@ void intel_edp_panel_off(struct intel_dp *intel_dp)
/* Enable backlight in the panel power control. */
static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 pp;
i915_reg_t pp_ctrl_reg;
@@ -2433,7 +2597,7 @@ void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
/* Disable backlight in the panel power control. */
static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 pp;
i915_reg_t pp_ctrl_reg;
@@ -2864,7 +3028,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
uint32_t *DP,
uint8_t dp_train_pat)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum port port = intel_dig_port->base.port;
uint8_t train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
@@ -2946,7 +3110,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
static void intel_dp_enable_port(struct intel_dp *intel_dp,
const struct intel_crtc_state *old_crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
/* enable with pattern 1 (as per spec) */
@@ -3203,7 +3367,7 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_
uint8_t
intel_dp_voltage_max(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
enum port port = encoder->port;
@@ -3222,7 +3386,7 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
uint8_t
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
enum port port = encoder->port;
@@ -3534,7 +3698,7 @@ ivb_cpu_edp_signal_levels(uint8_t train_set)
void
intel_dp_set_signal_levels(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum port port = intel_dig_port->base.port;
uint32_t signal_levels, mask = 0;
@@ -3591,7 +3755,7 @@ intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum port port = intel_dig_port->base.port;
uint32_t val;
@@ -4090,12 +4254,14 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
int ret = 0;
int retry;
bool handled;
+
+ WARN_ON_ONCE(intel_dp->active_mst_links < 0);
bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
go_again:
if (bret == true) {
/* check link status - esi[10] = 0x200c */
- if (intel_dp->active_mst_links &&
+ if (intel_dp->active_mst_links > 0 &&
!drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
intel_dp_start_link_train(intel_dp);
@@ -4160,18 +4326,6 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
}
-/*
- * If display is now connected check links status,
- * there has been known issues of link loss triggering
- * long pulse.
- *
- * Some sinks (eg. ASUS PB287Q) seem to perform some
- * weird HPD ping pong during modesets. So we can apparently
- * end up with HPD going low during a modeset, and then
- * going back up soon after. And once that happens we must
- * retrain the link to get a picture. That's in case no
- * userspace component reacted to intermittent HPD dip.
- */
int intel_dp_retrain_link(struct intel_encoder *encoder,
struct drm_modeset_acquire_ctx *ctx)
{
@@ -4294,7 +4448,7 @@ static bool intel_dp_hotplug(struct intel_encoder *encoder,
static bool
intel_dp_short_pulse(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u8 sink_irq_vector = 0;
u8 old_sink_count = intel_dp->sink_count;
bool ret;
@@ -4586,10 +4740,205 @@ static bool bxt_digital_port_connected(struct intel_encoder *encoder)
return I915_READ(GEN8_DE_PORT_ISR) & bit;
}
+static bool icl_combo_port_connected(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *intel_dig_port)
+{
+ enum port port = intel_dig_port->base.port;
+
+ return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(port);
+}
+
+static void icl_update_tc_port_type(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *intel_dig_port,
+ bool is_legacy, bool is_typec, bool is_tbt)
+{
+ enum port port = intel_dig_port->base.port;
+ enum tc_port_type old_type = intel_dig_port->tc_type;
+ const char *type_str;
+
+ WARN_ON(is_legacy + is_typec + is_tbt != 1);
+
+ if (is_legacy) {
+ intel_dig_port->tc_type = TC_PORT_LEGACY;
+ type_str = "legacy";
+ } else if (is_typec) {
+ intel_dig_port->tc_type = TC_PORT_TYPEC;
+ type_str = "typec";
+ } else if (is_tbt) {
+ intel_dig_port->tc_type = TC_PORT_TBT;
+ type_str = "tbt";
+ } else {
+ return;
+ }
+
+ /* Types are not supposed to be changed at runtime. */
+ WARN_ON(old_type != TC_PORT_UNKNOWN &&
+ old_type != intel_dig_port->tc_type);
+
+ if (old_type != intel_dig_port->tc_type)
+ DRM_DEBUG_KMS("Port %c has TC type %s\n", port_name(port),
+ type_str);
+}
+
+/*
+ * This function implements the first part of the Connect Flow described by our
+ * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
+ * lanes, EDID, etc) is done as needed in the typical places.
+ *
+ * Unlike the other ports, type-C ports are not available to use as soon as we
+ * get a hotplug. The type-C PHYs can be shared between multiple controllers:
+ * display, USB, etc. As a result, handshaking through FIA is required around
+ * connect and disconnect to cleanly transfer ownership with the controller and
+ * set the type-C power state.
+ *
+ * We could opt to only do the connect flow when we actually try to use the AUX
+ * channels or do a modeset, then immediately run the disconnect flow after
+ * usage, but there are some implications on this for a dynamic environment:
+ * things may go away or change behind our backs. So for now our driver is
+ * always trying to acquire ownership of the controller as soon as it gets an
+ * interrupt (or polls state and sees a port is connected) and only gives it
+ * back when it sees a disconnect. Implementation of a more fine-grained model
+ * will require a lot of coordination with user space and thorough testing for
+ * the extra possible cases.
+ */
+static bool icl_tc_phy_connect(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *dig_port)
+{
+ enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
+ u32 val;
+
+ if (dig_port->tc_type != TC_PORT_LEGACY &&
+ dig_port->tc_type != TC_PORT_TYPEC)
+ return true;
+
+ val = I915_READ(PORT_TX_DFLEXDPPMS);
+ if (!(val & DP_PHY_MODE_STATUS_COMPLETED(tc_port))) {
+ DRM_DEBUG_KMS("DP PHY for TC port %d not ready\n", tc_port);
+ return false;
+ }
+
+ /*
+ * This function may be called many times in a row without an HPD event
+ * in between, so try to avoid the write when we can.
+ */
+ val = I915_READ(PORT_TX_DFLEXDPCSSS);
+ if (!(val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port))) {
+ val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
+ I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
+ }
+
+ /*
+ * Now we have to re-check the live state, in case the port recently
+ * became disconnected. Not necessary for legacy mode.
+ */
+ if (dig_port->tc_type == TC_PORT_TYPEC &&
+ !(I915_READ(PORT_TX_DFLEXDPSP) & TC_LIVE_STATE_TC(tc_port))) {
+ DRM_DEBUG_KMS("TC PHY %d sudden disconnect.\n", tc_port);
+ val = I915_READ(PORT_TX_DFLEXDPCSSS);
+ val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
+ I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * See the comment at the connect function. This implements the Disconnect
+ * Flow.
+ */
+static void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *dig_port)
+{
+ enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
+ u32 val;
+
+ if (dig_port->tc_type != TC_PORT_LEGACY &&
+ dig_port->tc_type != TC_PORT_TYPEC)
+ return;
+
+ /*
+ * This function may be called many times in a row without an HPD event
+ * in between, so try to avoid the write when we can.
+ */
+ val = I915_READ(PORT_TX_DFLEXDPCSSS);
+ if (val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port)) {
+ val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
+ I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
+ }
+}
+
+/*
+ * The type-C ports are different because even when they are connected, they may
+ * not be available/usable by the graphics driver: see the comment on
+ * icl_tc_phy_connect(). So in our driver instead of adding the additional
+ * concept of "usable" and make everything check for "connected and usable" we
+ * define a port as "connected" when it is not only connected, but also when it
+ * is usable by the rest of the driver. That maintains the old assumption that
+ * connected ports are usable, and avoids exposing to the users objects they
+ * can't really use.
+ */
+static bool icl_tc_port_connected(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *intel_dig_port)
+{
+ enum port port = intel_dig_port->base.port;
+ enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
+ bool is_legacy, is_typec, is_tbt;
+ u32 dpsp;
+
+ is_legacy = I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port);
+
+ /*
+ * The spec says we shouldn't be using the ISR bits for detecting
+ * between TC and TBT. We should use DFLEXDPSP.
+ */
+ dpsp = I915_READ(PORT_TX_DFLEXDPSP);
+ is_typec = dpsp & TC_LIVE_STATE_TC(tc_port);
+ is_tbt = dpsp & TC_LIVE_STATE_TBT(tc_port);
+
+ if (!is_legacy && !is_typec && !is_tbt) {
+ icl_tc_phy_disconnect(dev_priv, intel_dig_port);
+ return false;
+ }
+
+ icl_update_tc_port_type(dev_priv, intel_dig_port, is_legacy, is_typec,
+ is_tbt);
+
+ if (!icl_tc_phy_connect(dev_priv, intel_dig_port))
+ return false;
+
+ return true;
+}
+
+static bool icl_digital_port_connected(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+
+ switch (encoder->hpd_pin) {
+ case HPD_PORT_A:
+ case HPD_PORT_B:
+ return icl_combo_port_connected(dev_priv, dig_port);
+ case HPD_PORT_C:
+ case HPD_PORT_D:
+ case HPD_PORT_E:
+ case HPD_PORT_F:
+ return icl_tc_port_connected(dev_priv, dig_port);
+ default:
+ MISSING_CASE(encoder->hpd_pin);
+ return false;
+ }
+}
+
/*
* intel_digital_port_connected - is the specified port connected?
* @encoder: intel_encoder
*
+ * In cases where there's a connector physically connected but it can't be used
+ * by our hardware we also return false, since the rest of the driver should
+ * pretty much treat the port as disconnected. This is relevant for type-C
+ * (starting on ICL) where there's ownership involved.
+ *
* Return %true if port is connected, %false otherwise.
*/
bool intel_digital_port_connected(struct intel_encoder *encoder)
@@ -4613,8 +4962,10 @@ bool intel_digital_port_connected(struct intel_encoder *encoder)
return bdw_digital_port_connected(encoder);
else if (IS_GEN9_LP(dev_priv))
return bxt_digital_port_connected(encoder);
- else
+ else if (IS_GEN9_BC(dev_priv) || IS_GEN10(dev_priv))
return spt_digital_port_connected(encoder);
+ else
+ return icl_digital_port_connected(encoder);
}
static struct edid *
@@ -4661,7 +5012,8 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
}
static int
-intel_dp_long_pulse(struct intel_connector *connector)
+intel_dp_long_pulse(struct intel_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
@@ -4720,6 +5072,22 @@ intel_dp_long_pulse(struct intel_connector *connector)
*/
status = connector_status_disconnected;
goto out;
+ } else {
+ /*
+ * If display is now connected check links status,
+ * there has been known issues of link loss triggering
+ * long pulse.
+ *
+ * Some sinks (eg. ASUS PB287Q) seem to perform some
+ * weird HPD ping pong during modesets. So we can apparently
+ * end up with HPD going low during a modeset, and then
+ * going back up soon after. And once that happens we must
+ * retrain the link to get a picture. That's in case no
+ * userspace component reacted to intermittent HPD dip.
+ */
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+
+ intel_dp_retrain_link(encoder, ctx);
}
/*
@@ -4781,7 +5149,7 @@ intel_dp_detect(struct drm_connector *connector,
return ret;
}
- status = intel_dp_long_pulse(intel_dp->attached_connector);
+ status = intel_dp_long_pulse(intel_dp->attached_connector, ctx);
}
intel_dp->detect_done = false;
@@ -5172,7 +5540,7 @@ static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -5193,7 +5561,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
enum pipe pipe;
@@ -5260,7 +5628,7 @@ enum irqreturn
intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
{
struct intel_dp *intel_dp = &intel_dig_port->dp;
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
enum irqreturn ret = IRQ_NONE;
if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
@@ -5376,7 +5744,7 @@ static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
static void
intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
struct pps_registers regs;
@@ -5444,7 +5812,7 @@ intel_pps_verify_state(struct intel_dp *intel_dp)
static void
intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct edp_power_seq cur, vbt, spec,
*final = &intel_dp->pps_delays;
@@ -5537,7 +5905,7 @@ static void
intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
bool force_disable_vdd)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 pp_on, pp_off, pp_div, port_sel = 0;
int div = dev_priv->rawclk_freq / 1000;
struct pps_registers regs;
@@ -5633,7 +6001,7 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
static void intel_dp_pps_init(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
vlv_initial_power_sequencer_setup(intel_dp);
@@ -5750,7 +6118,7 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
void intel_edp_drrs_enable(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
if (!crtc_state->has_drrs) {
DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
@@ -5785,7 +6153,7 @@ unlock:
void intel_edp_drrs_disable(struct intel_dp *intel_dp,
const struct intel_crtc_state *old_crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
if (!old_crtc_state->has_drrs)
return;
@@ -6017,8 +6385,8 @@ intel_dp_drrs_init(struct intel_connector *connector,
static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct intel_connector *intel_connector)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct drm_device *dev = &dev_priv->drm;
struct drm_connector *connector = &intel_connector->base;
struct drm_display_mode *fixed_mode = NULL;
struct drm_display_mode *downclock_mode = NULL;
diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c
index 4da6e33c7fa1..a9f40985a621 100644
--- a/drivers/gpu/drm/i915/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/intel_dp_link_training.c
@@ -129,7 +129,8 @@ static bool
intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
{
uint8_t voltage;
- int voltage_tries, max_vswing_tries;
+ int voltage_tries, cr_tries, max_cr_tries;
+ bool max_vswing_reached = false;
uint8_t link_config[2];
uint8_t link_bw, rate_select;
@@ -170,9 +171,21 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
return false;
}
+ /*
+ * The DP 1.4 spec defines the max clock recovery retries value
+ * as 10 but for pre-DP 1.4 devices we set a very tolerant
+ * retry limit of 80 (4 voltage levels x 4 preemphasis levels x
+ * x 5 identical voltage retries). Since the previous specs didn't
+ * define a limit and created the possibility of an infinite loop
+ * we want to prevent any sync from triggering that corner case.
+ */
+ if (intel_dp->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14)
+ max_cr_tries = 10;
+ else
+ max_cr_tries = 80;
+
voltage_tries = 1;
- max_vswing_tries = 0;
- for (;;) {
+ for (cr_tries = 0; cr_tries < max_cr_tries; ++cr_tries) {
uint8_t link_status[DP_LINK_STATUS_SIZE];
drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
@@ -192,7 +205,7 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
return false;
}
- if (max_vswing_tries == 1) {
+ if (max_vswing_reached) {
DRM_DEBUG_KMS("Max Voltage Swing reached\n");
return false;
}
@@ -213,9 +226,11 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
voltage_tries = 1;
if (intel_dp_link_max_vswing_reached(intel_dp))
- ++max_vswing_tries;
+ max_vswing_reached = true;
}
+ DRM_ERROR("Failed clock recovery %d times, giving up!\n", max_cr_tries);
+ return false;
}
/*
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 7e3e01607643..43db2e9ac575 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -45,8 +45,8 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
int lane_count, slots;
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
int mst_pbn;
- bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc,
- DP_DPCD_QUIRK_LIMITED_M_N);
+ bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
+ DP_DPCD_QUIRK_CONSTANT_N);
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return false;
@@ -87,7 +87,7 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
adjusted_mode->crtc_clock,
pipe_config->port_clock,
&pipe_config->dp_m_n,
- reduce_m_n);
+ constant_n);
pipe_config->dp_m_n.tu = slots;
@@ -166,6 +166,8 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
+ intel_ddi_disable_pipe_clock(old_crtc_state);
+
/* this can fail */
drm_dp_check_act_status(&intel_dp->mst_mgr);
/* and this can also fail */
@@ -241,17 +243,16 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
connector->port,
pipe_config->pbn,
pipe_config->dp_m_n.tu);
- if (ret == false) {
+ if (!ret)
DRM_ERROR("failed to allocate vcpi\n");
- return;
- }
-
intel_dp->active_mst_links++;
temp = I915_READ(DP_TP_STATUS(port));
I915_WRITE(DP_TP_STATUS(port), temp);
ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
+
+ intel_ddi_enable_pipe_clock(pipe_config);
}
static void intel_mst_enable_dp(struct intel_encoder *encoder,
@@ -263,7 +264,6 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = intel_dig_port->base.port;
- int ret;
DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
@@ -274,9 +274,9 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
1))
DRM_ERROR("Timed out waiting for ACT sent\n");
- ret = drm_dp_check_act_status(&intel_dp->mst_mgr);
+ drm_dp_check_act_status(&intel_dp->mst_mgr);
- ret = drm_dp_update_payload_part2(&intel_dp->mst_mgr);
+ drm_dp_update_payload_part2(&intel_dp->mst_mgr);
if (pipe_config->has_audio)
intel_audio_codec_enable(encoder, pipe_config, conn_state);
}
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index b51ad2917dbe..e6cac9225536 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -2212,6 +2212,20 @@ static void cnl_wrpll_params_populate(struct skl_wrpll_params *params,
params->dco_fraction = dco & 0x7fff;
}
+int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv)
+{
+ int ref_clock = dev_priv->cdclk.hw.ref;
+
+ /*
+ * For ICL+, the spec states: if reference frequency is 38.4,
+ * use 19.2 because the DPLL automatically divides that by 2.
+ */
+ if (INTEL_GEN(dev_priv) >= 11 && ref_clock == 38400)
+ ref_clock = 19200;
+
+ return ref_clock;
+}
+
static bool
cnl_ddi_calculate_wrpll(int clock,
struct drm_i915_private *dev_priv,
@@ -2251,14 +2265,7 @@ cnl_ddi_calculate_wrpll(int clock,
cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
- ref_clock = dev_priv->cdclk.hw.ref;
-
- /*
- * For ICL, the spec states: if reference frequency is 38.4, use 19.2
- * because the DPLL automatically divides that by 2.
- */
- if (IS_ICELAKE(dev_priv) && ref_clock == 38400)
- ref_clock = 19200;
+ ref_clock = cnl_hdmi_pll_ref_clock(dev_priv);
cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock, pdiv, qdiv,
kdiv);
@@ -2452,6 +2459,16 @@ static const struct skl_wrpll_params icl_dp_combo_pll_19_2MHz_values[] = {
.pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
};
+static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
+ .dco_integer = 0x151, .dco_fraction = 0x4000,
+ .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
+};
+
+static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
+ .dco_integer = 0x1A5, .dco_fraction = 0x7000,
+ .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
+};
+
static bool icl_calc_dp_combo_pll(struct drm_i915_private *dev_priv, int clock,
struct skl_wrpll_params *pll_params)
{
@@ -2494,6 +2511,14 @@ static bool icl_calc_dp_combo_pll(struct drm_i915_private *dev_priv, int clock,
return true;
}
+static bool icl_calc_tbt_pll(struct drm_i915_private *dev_priv, int clock,
+ struct skl_wrpll_params *pll_params)
+{
+ *pll_params = dev_priv->cdclk.hw.ref == 24000 ?
+ icl_tbt_pll_24MHz_values : icl_tbt_pll_19_2MHz_values;
+ return true;
+}
+
static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder, int clock,
struct intel_dpll_hw_state *pll_state)
@@ -2503,7 +2528,9 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
struct skl_wrpll_params pll_params = { 0 };
bool ret;
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ if (intel_port_is_tc(dev_priv, encoder->port))
+ ret = icl_calc_tbt_pll(dev_priv, clock, &pll_params);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
ret = cnl_ddi_calculate_wrpll(clock, dev_priv, &pll_params);
else
ret = icl_calc_dp_combo_pll(dev_priv, clock, &pll_params);
@@ -2623,7 +2650,8 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
for (div2 = 10; div2 > 0; div2--) {
int dco = div1 * div2 * clock_khz * 5;
- int a_divratio, tlinedrv, inputsel, hsdiv;
+ int a_divratio, tlinedrv, inputsel;
+ u32 hsdiv;
if (dco < dco_min_freq || dco > dco_max_freq)
continue;
@@ -2642,16 +2670,16 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
MISSING_CASE(div1);
/* fall through */
case 2:
- hsdiv = 0;
+ hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
break;
case 3:
- hsdiv = 1;
+ hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
break;
case 5:
- hsdiv = 2;
+ hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
break;
case 7:
- hsdiv = 3;
+ hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
break;
}
@@ -2665,7 +2693,7 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
state->mg_clktop2_hsclkctl =
MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
- MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO(hsdiv) |
+ hsdiv |
MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
return true;
@@ -2846,6 +2874,8 @@ static struct intel_shared_dpll *
icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
+ struct intel_digital_port *intel_dig_port =
+ enc_to_dig_port(&encoder->base);
struct intel_shared_dpll *pll;
struct intel_dpll_hw_state pll_state = {};
enum port port = encoder->port;
@@ -2865,7 +2895,7 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
case PORT_D:
case PORT_E:
case PORT_F:
- if (0 /* TODO: TBT PLLs */) {
+ if (intel_dig_port->tc_type == TC_PORT_TBT) {
min = DPLL_ID_ICL_TBTPLL;
max = min;
ret = icl_calc_dpll_state(crtc_state, encoder, clock,
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h
index 7e522cf4f13f..bf0de8a4dc63 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h
@@ -344,5 +344,6 @@ void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
struct intel_dpll_hw_state *hw_state);
int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv,
uint32_t pll_id);
+int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv);
#endif /* _INTEL_DPLL_MGR_H_ */
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 8fc61e96754f..f5731215210a 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -972,9 +972,8 @@ struct intel_plane {
void (*disable_plane)(struct intel_plane *plane,
struct intel_crtc *crtc);
bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe);
- int (*check_plane)(struct intel_plane *plane,
- struct intel_crtc_state *crtc_state,
- struct intel_plane_state *state);
+ int (*check_plane)(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state);
};
struct intel_watermark_params {
@@ -1168,6 +1167,7 @@ struct intel_digital_port {
bool release_cl2_override;
uint8_t max_lanes;
enum intel_display_power_domain ddi_io_power_domain;
+ enum tc_port_type tc_type;
void (*write_infoframe)(struct drm_encoder *encoder,
const struct intel_crtc_state *crtc_state,
@@ -1314,6 +1314,12 @@ dp_to_lspcon(struct intel_dp *intel_dp)
return &dp_to_dig_port(intel_dp)->lspcon;
}
+static inline struct drm_i915_private *
+dp_to_i915(struct intel_dp *intel_dp)
+{
+ return to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
+}
+
static inline struct intel_digital_port *
hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
{
@@ -1717,6 +1723,9 @@ void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits);
void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits);
+void icl_program_mg_dp_mode(struct intel_dp *intel_dp);
+void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port);
+void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port);
void
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
@@ -1930,6 +1939,9 @@ void intel_psr_enable(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
void intel_psr_disable(struct intel_dp *intel_dp,
const struct intel_crtc_state *old_crtc_state);
+int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv,
+ struct drm_modeset_acquire_ctx *ctx,
+ u64 value);
void intel_psr_invalidate(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits,
enum fb_op_origin origin);
@@ -1939,20 +1951,33 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
void intel_psr_init(struct drm_i915_private *dev_priv);
void intel_psr_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state);
-void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug);
+void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug);
void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir);
void intel_psr_short_pulse(struct intel_dp *intel_dp);
-int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state);
+int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
+ u32 *out_value);
/* intel_runtime_pm.c */
int intel_power_domains_init(struct drm_i915_private *);
-void intel_power_domains_fini(struct drm_i915_private *);
+void intel_power_domains_cleanup(struct drm_i915_private *dev_priv);
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
-void intel_power_domains_suspend(struct drm_i915_private *dev_priv);
-void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
+void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv);
+void intel_power_domains_enable(struct drm_i915_private *dev_priv);
+void intel_power_domains_disable(struct drm_i915_private *dev_priv);
+
+enum i915_drm_suspend_mode {
+ I915_DRM_SUSPEND_IDLE,
+ I915_DRM_SUSPEND_MEM,
+ I915_DRM_SUSPEND_HIBERNATE,
+};
+
+void intel_power_domains_suspend(struct drm_i915_private *dev_priv,
+ enum i915_drm_suspend_mode);
+void intel_power_domains_resume(struct drm_i915_private *dev_priv);
void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume);
void bxt_display_core_uninit(struct drm_i915_private *dev_priv);
void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_disable(struct drm_i915_private *dev_priv);
const char *
intel_display_power_domain_str(enum intel_display_power_domain domain);
@@ -2030,8 +2055,6 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv);
void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
-void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
-
void chv_phy_powergate_lanes(struct intel_encoder *encoder,
bool override, unsigned int mask);
bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
@@ -2172,12 +2195,17 @@ void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon);
/* intel_pipe_crc.c */
#ifdef CONFIG_DEBUG_FS
-int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
- size_t *values_cnt);
+int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name);
+int intel_crtc_verify_crc_source(struct drm_crtc *crtc,
+ const char *source_name, size_t *values_cnt);
+const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc,
+ size_t *count);
void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc);
void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc);
#else
#define intel_crtc_set_crc_source NULL
+#define intel_crtc_verify_crc_source NULL
+#define intel_crtc_get_crc_sources NULL
static inline void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc)
{
}
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 2d1952849d69..10cd051ba29e 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -513,7 +513,7 @@ int intel_engine_create_scratch(struct intel_engine_cs *engine,
goto err_unref;
}
- ret = i915_vma_pin(vma, 0, 4096, PIN_GLOBAL | PIN_HIGH);
+ ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (ret)
goto err_unref;
@@ -527,36 +527,19 @@ err_unref:
void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
{
- i915_vma_unpin_and_release(&engine->scratch);
-}
-
-static void cleanup_phys_status_page(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
-
- if (!dev_priv->status_page_dmah)
- return;
-
- drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah);
- engine->status_page.page_addr = NULL;
+ i915_vma_unpin_and_release(&engine->scratch, 0);
}
static void cleanup_status_page(struct intel_engine_cs *engine)
{
- struct i915_vma *vma;
- struct drm_i915_gem_object *obj;
-
- vma = fetch_and_zero(&engine->status_page.vma);
- if (!vma)
- return;
-
- obj = vma->obj;
+ if (HWS_NEEDS_PHYSICAL(engine->i915)) {
+ void *addr = fetch_and_zero(&engine->status_page.page_addr);
- i915_vma_unpin(vma);
- i915_vma_close(vma);
+ __free_page(virt_to_page(addr));
+ }
- i915_gem_object_unpin_map(obj);
- __i915_gem_object_release_unless_active(obj);
+ i915_vma_unpin_and_release(&engine->status_page.vma,
+ I915_VMA_RELEASE_MAP);
}
static int init_status_page(struct intel_engine_cs *engine)
@@ -598,7 +581,7 @@ static int init_status_page(struct intel_engine_cs *engine)
flags |= PIN_MAPPABLE;
else
flags |= PIN_HIGH;
- ret = i915_vma_pin(vma, 0, 4096, flags);
+ ret = i915_vma_pin(vma, 0, 0, flags);
if (ret)
goto err;
@@ -622,17 +605,18 @@ err:
static int init_phys_status_page(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
+ struct page *page;
- GEM_BUG_ON(engine->id != RCS);
-
- dev_priv->status_page_dmah =
- drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
- if (!dev_priv->status_page_dmah)
+ /*
+ * Though the HWS register does support 36bit addresses, historically
+ * we have had hangs and corruption reported due to wild writes if
+ * the HWS is placed above 4G.
+ */
+ page = alloc_page(GFP_KERNEL | __GFP_DMA32 | __GFP_ZERO);
+ if (!page)
return -ENOMEM;
- engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
- memset(engine->status_page.page_addr, 0, PAGE_SIZE);
+ engine->status_page.page_addr = page_address(page);
return 0;
}
@@ -722,10 +706,7 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
intel_engine_cleanup_scratch(engine);
- if (HWS_NEEDS_PHYSICAL(engine->i915))
- cleanup_phys_status_page(engine);
- else
- cleanup_status_page(engine);
+ cleanup_status_page(engine);
intel_engine_fini_breadcrumbs(engine);
intel_engine_cleanup_cmd_parser(engine);
@@ -800,6 +781,16 @@ int intel_engine_stop_cs(struct intel_engine_cs *engine)
return err;
}
+void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+
+ GEM_TRACE("%s\n", engine->name);
+
+ I915_WRITE_FW(RING_MI_MODE(engine->mmio_base),
+ _MASKED_BIT_DISABLE(STOP_RING));
+}
+
const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
{
switch (type) {
@@ -980,8 +971,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
return true;
/* Any inflight/incomplete requests? */
- if (!i915_seqno_passed(intel_engine_get_seqno(engine),
- intel_engine_last_submit(engine)))
+ if (!intel_engine_signaled(engine, intel_engine_last_submit(engine)))
return false;
if (I915_SELFTEST_ONLY(engine->breadcrumbs.mock))
@@ -1348,20 +1338,19 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
if (HAS_EXECLISTS(dev_priv)) {
const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
- u32 ptr, read, write;
unsigned int idx;
+ u8 read, write;
drm_printf(m, "\tExeclist status: 0x%08x %08x\n",
I915_READ(RING_EXECLIST_STATUS_LO(engine)),
I915_READ(RING_EXECLIST_STATUS_HI(engine)));
- ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
- read = GEN8_CSB_READ_PTR(ptr);
- write = GEN8_CSB_WRITE_PTR(ptr);
- drm_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], tasklet queued? %s (%s)\n",
- read, execlists->csb_head,
- write,
- intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)),
+ read = execlists->csb_head;
+ write = READ_ONCE(*execlists->csb_write);
+
+ drm_printf(m, "\tExeclist CSB read %d, write %d [mmio:%d], tasklet queued? %s (%s)\n",
+ read, write,
+ GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(engine))),
yesno(test_bit(TASKLET_STATE_SCHED,
&engine->execlists.tasklet.state)),
enableddisabled(!atomic_read(&engine->execlists.tasklet.count)));
@@ -1373,12 +1362,12 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
write += GEN8_CSB_ENTRIES;
while (read < write) {
idx = ++read % GEN8_CSB_ENTRIES;
- drm_printf(m, "\tExeclist CSB[%d]: 0x%08x [0x%08x in hwsp], context: %d [%d in hwsp]\n",
+ drm_printf(m, "\tExeclist CSB[%d]: 0x%08x [mmio:0x%08x], context: %d [mmio:%d]\n",
idx,
- I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
hws[idx * 2],
- I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)),
- hws[idx * 2 + 1]);
+ I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
+ hws[idx * 2 + 1],
+ I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)));
}
rcu_read_lock();
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index 560c7406ae40..230aea69385d 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -27,8 +27,6 @@
#include "intel_guc_submission.h"
#include "i915_drv.h"
-static void guc_init_ggtt_pin_bias(struct intel_guc *guc);
-
static void gen8_guc_raise_irq(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
@@ -128,13 +126,15 @@ static int guc_init_wq(struct intel_guc *guc)
static void guc_fini_wq(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct workqueue_struct *wq;
- if (HAS_LOGICAL_RING_PREEMPTION(dev_priv) &&
- USES_GUC_SUBMISSION(dev_priv))
- destroy_workqueue(guc->preempt_wq);
+ wq = fetch_and_zero(&guc->preempt_wq);
+ if (wq)
+ destroy_workqueue(wq);
- destroy_workqueue(guc->log.relay.flush_wq);
+ wq = fetch_and_zero(&guc->log.relay.flush_wq);
+ if (wq)
+ destroy_workqueue(wq);
}
int intel_guc_init_misc(struct intel_guc *guc)
@@ -142,8 +142,6 @@ int intel_guc_init_misc(struct intel_guc *guc)
struct drm_i915_private *i915 = guc_to_i915(guc);
int ret;
- guc_init_ggtt_pin_bias(guc);
-
ret = guc_init_wq(guc);
if (ret)
return ret;
@@ -170,7 +168,7 @@ static int guc_shared_data_create(struct intel_guc *guc)
vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
if (IS_ERR(vaddr)) {
- i915_vma_unpin_and_release(&vma);
+ i915_vma_unpin_and_release(&vma, 0);
return PTR_ERR(vaddr);
}
@@ -182,8 +180,7 @@ static int guc_shared_data_create(struct intel_guc *guc)
static void guc_shared_data_destroy(struct intel_guc *guc)
{
- i915_gem_object_unpin_map(guc->shared_data->obj);
- i915_vma_unpin_and_release(&guc->shared_data);
+ i915_vma_unpin_and_release(&guc->shared_data, I915_VMA_RELEASE_MAP);
}
int intel_guc_init(struct intel_guc *guc)
@@ -584,53 +581,28 @@ int intel_guc_resume(struct intel_guc *guc)
*
* ::
*
- * +==============> +====================+ <== GUC_GGTT_TOP
- * ^ | |
- * | | |
- * | | DRAM |
- * | | Memory |
- * | | |
- * GuC | |
- * Address +========> +====================+ <== WOPCM Top
- * Space ^ | HW contexts RSVD |
- * | | | WOPCM |
- * | | +==> +--------------------+ <== GuC WOPCM Top
- * | GuC ^ | |
- * | GGTT | | |
- * | Pin GuC | GuC |
- * | Bias WOPCM | WOPCM |
- * | | Size | |
- * | | | | |
- * v v v | |
- * +=====+=====+==> +====================+ <== GuC WOPCM Base
- * | Non-GuC WOPCM |
- * | (HuC/Reserved) |
- * +====================+ <== WOPCM Base
+ * +===========> +====================+ <== FFFF_FFFF
+ * ^ | Reserved |
+ * | +====================+ <== GUC_GGTT_TOP
+ * | | |
+ * | | DRAM |
+ * GuC | |
+ * Address +===> +====================+ <== GuC ggtt_pin_bias
+ * Space ^ | |
+ * | | | |
+ * | GuC | GuC |
+ * | WOPCM | WOPCM |
+ * | Size | |
+ * | | | |
+ * v v | |
+ * +=======+===> +====================+ <== 0000_0000
*
- * The lower part of GuC Address Space [0, ggtt_pin_bias) is mapped to WOPCM
+ * The lower part of GuC Address Space [0, ggtt_pin_bias) is mapped to GuC WOPCM
* while upper part of GuC Address Space [ggtt_pin_bias, GUC_GGTT_TOP) is mapped
- * to DRAM. The value of the GuC ggtt_pin_bias is determined by WOPCM size and
- * actual GuC WOPCM size.
+ * to DRAM. The value of the GuC ggtt_pin_bias is the GuC WOPCM size.
*/
/**
- * guc_init_ggtt_pin_bias() - Initialize the GuC ggtt_pin_bias value.
- * @guc: intel_guc structure.
- *
- * This function will calculate and initialize the ggtt_pin_bias value based on
- * overall WOPCM size and GuC WOPCM size.
- */
-static void guc_init_ggtt_pin_bias(struct intel_guc *guc)
-{
- struct drm_i915_private *i915 = guc_to_i915(guc);
-
- GEM_BUG_ON(!i915->wopcm.size);
- GEM_BUG_ON(i915->wopcm.size < i915->wopcm.guc.base);
-
- guc->ggtt_pin_bias = i915->wopcm.size - i915->wopcm.guc.base;
-}
-
-/**
* intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
* @guc: the guc
* @size: size of area to allocate (both virtual space and memory)
@@ -648,6 +620,7 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
+ u64 flags;
int ret;
obj = i915_gem_object_create(dev_priv, size);
@@ -658,8 +631,8 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
if (IS_ERR(vma))
goto err;
- ret = i915_vma_pin(vma, 0, PAGE_SIZE,
- PIN_GLOBAL | PIN_OFFSET_BIAS | guc->ggtt_pin_bias);
+ flags = PIN_GLOBAL | PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
+ ret = i915_vma_pin(vma, 0, 0, flags);
if (ret) {
vma = ERR_PTR(ret);
goto err;
@@ -671,3 +644,20 @@ err:
i915_gem_object_put(obj);
return vma;
}
+
+/**
+ * intel_guc_reserved_gtt_size()
+ * @guc: intel_guc structure
+ *
+ * The GuC WOPCM mapping shadows the lower part of the GGTT, so if we are using
+ * GuC we can't have any objects pinned in that region. This function returns
+ * the size of the shadowed region.
+ *
+ * Returns:
+ * 0 if GuC is not present or not in use.
+ * Otherwise, the GuC WOPCM size.
+ */
+u32 intel_guc_reserved_gtt_size(struct intel_guc *guc)
+{
+ return guc_to_i915(guc)->wopcm.guc.size;
+}
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 4121928a495e..ad42faf48c46 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -49,9 +49,6 @@ struct intel_guc {
struct intel_guc_log log;
struct intel_guc_ct ct;
- /* Offset where Non-WOPCM memory starts. */
- u32 ggtt_pin_bias;
-
/* Log snapshot if GuC errors during load */
struct drm_i915_gem_object *load_err_log;
@@ -130,10 +127,10 @@ static inline void intel_guc_to_host_event_handler(struct intel_guc *guc)
* @vma: i915 graphics virtual memory area.
*
* GuC does not allow any gfx GGTT address that falls into range
- * [0, GuC ggtt_pin_bias), which is reserved for Boot ROM, SRAM and WOPCM.
- * Currently, in order to exclude [0, GuC ggtt_pin_bias) address space from
+ * [0, ggtt.pin_bias), which is reserved for Boot ROM, SRAM and WOPCM.
+ * Currently, in order to exclude [0, ggtt.pin_bias) address space from
* GGTT, all gfx objects used by GuC are allocated with intel_guc_allocate_vma()
- * and pinned with PIN_OFFSET_BIAS along with the value of GuC ggtt_pin_bias.
+ * and pinned with PIN_OFFSET_BIAS along with the value of ggtt.pin_bias.
*
* Return: GGTT offset of the @vma.
*/
@@ -142,7 +139,7 @@ static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc,
{
u32 offset = i915_ggtt_offset(vma);
- GEM_BUG_ON(offset < guc->ggtt_pin_bias);
+ GEM_BUG_ON(offset < i915_ggtt_pin_bias(vma));
GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP));
return offset;
@@ -168,6 +165,7 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
int intel_guc_suspend(struct intel_guc *guc);
int intel_guc_resume(struct intel_guc *guc);
struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
+u32 intel_guc_reserved_gtt_size(struct intel_guc *guc);
static inline int intel_guc_sanitize(struct intel_guc *guc)
{
diff --git a/drivers/gpu/drm/i915/intel_guc_ads.c b/drivers/gpu/drm/i915/intel_guc_ads.c
index dcaa3fb71765..f0db62887f50 100644
--- a/drivers/gpu/drm/i915/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/intel_guc_ads.c
@@ -148,5 +148,5 @@ int intel_guc_ads_create(struct intel_guc *guc)
void intel_guc_ads_destroy(struct intel_guc *guc)
{
- i915_vma_unpin_and_release(&guc->ads_vma);
+ i915_vma_unpin_and_release(&guc->ads_vma, 0);
}
diff --git a/drivers/gpu/drm/i915/intel_guc_ct.c b/drivers/gpu/drm/i915/intel_guc_ct.c
index 371b6005954a..a52883e9146f 100644
--- a/drivers/gpu/drm/i915/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/intel_guc_ct.c
@@ -204,7 +204,7 @@ static int ctch_init(struct intel_guc *guc,
return 0;
err_vma:
- i915_vma_unpin_and_release(&ctch->vma);
+ i915_vma_unpin_and_release(&ctch->vma, 0);
err_out:
CT_DEBUG_DRIVER("CT: channel %d initialization failed; err=%d\n",
ctch->owner, err);
@@ -214,10 +214,7 @@ err_out:
static void ctch_fini(struct intel_guc *guc,
struct intel_guc_ct_channel *ctch)
{
- GEM_BUG_ON(!ctch->vma);
-
- i915_gem_object_unpin_map(ctch->vma->obj);
- i915_vma_unpin_and_release(&ctch->vma);
+ i915_vma_unpin_and_release(&ctch->vma, I915_VMA_RELEASE_MAP);
}
static int ctch_open(struct intel_guc *guc,
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index 1a0f2a39cef9..8382d591c784 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -49,6 +49,7 @@
#define WQ_TYPE_BATCH_BUF (0x1 << WQ_TYPE_SHIFT)
#define WQ_TYPE_PSEUDO (0x2 << WQ_TYPE_SHIFT)
#define WQ_TYPE_INORDER (0x3 << WQ_TYPE_SHIFT)
+#define WQ_TYPE_NOOP (0x4 << WQ_TYPE_SHIFT)
#define WQ_TARGET_SHIFT 10
#define WQ_LEN_SHIFT 16
#define WQ_NO_WCFLUSH_WAIT (1 << 27)
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index 6da61a71d28f..d3ebdbc0182e 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -498,7 +498,7 @@ err:
void intel_guc_log_destroy(struct intel_guc_log *log)
{
- i915_vma_unpin_and_release(&log->vma);
+ i915_vma_unpin_and_release(&log->vma, 0);
}
int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 4aa5e6463e7b..07b9d313b019 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -317,7 +317,7 @@ static int guc_stage_desc_pool_create(struct intel_guc *guc)
vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
if (IS_ERR(vaddr)) {
- i915_vma_unpin_and_release(&vma);
+ i915_vma_unpin_and_release(&vma, 0);
return PTR_ERR(vaddr);
}
@@ -331,8 +331,7 @@ static int guc_stage_desc_pool_create(struct intel_guc *guc)
static void guc_stage_desc_pool_destroy(struct intel_guc *guc)
{
ida_destroy(&guc->stage_ids);
- i915_gem_object_unpin_map(guc->stage_desc_pool->obj);
- i915_vma_unpin_and_release(&guc->stage_desc_pool);
+ i915_vma_unpin_and_release(&guc->stage_desc_pool, I915_VMA_RELEASE_MAP);
}
/*
@@ -457,6 +456,9 @@ static void guc_wq_item_append(struct intel_guc_client *client,
*/
BUILD_BUG_ON(wqi_size != 16);
+ /* We expect the WQ to be active if we're appending items to it */
+ GEM_BUG_ON(desc->wq_status != WQ_STATUS_ACTIVE);
+
/* Free space is guaranteed. */
wq_off = READ_ONCE(desc->tail);
GEM_BUG_ON(CIRC_SPACE(wq_off, READ_ONCE(desc->head),
@@ -466,15 +468,19 @@ static void guc_wq_item_append(struct intel_guc_client *client,
/* WQ starts from the page after doorbell / process_desc */
wqi = client->vaddr + wq_off + GUC_DB_SIZE;
- /* Now fill in the 4-word work queue item */
- wqi->header = WQ_TYPE_INORDER |
- (wqi_len << WQ_LEN_SHIFT) |
- (target_engine << WQ_TARGET_SHIFT) |
- WQ_NO_WCFLUSH_WAIT;
- wqi->context_desc = context_desc;
- wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT;
- GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX);
- wqi->fence_id = fence_id;
+ if (I915_SELFTEST_ONLY(client->use_nop_wqi)) {
+ wqi->header = WQ_TYPE_NOOP | (wqi_len << WQ_LEN_SHIFT);
+ } else {
+ /* Now fill in the 4-word work queue item */
+ wqi->header = WQ_TYPE_INORDER |
+ (wqi_len << WQ_LEN_SHIFT) |
+ (target_engine << WQ_TARGET_SHIFT) |
+ WQ_NO_WCFLUSH_WAIT;
+ wqi->context_desc = context_desc;
+ wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT;
+ GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX);
+ wqi->fence_id = fence_id;
+ }
/* Make the update visible to GuC */
WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1));
@@ -1008,7 +1014,7 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
err_vaddr:
i915_gem_object_unpin_map(client->vma->obj);
err_vma:
- i915_vma_unpin_and_release(&client->vma);
+ i915_vma_unpin_and_release(&client->vma, 0);
err_id:
ida_simple_remove(&guc->stage_ids, client->stage_id);
err_client:
@@ -1020,8 +1026,7 @@ static void guc_client_free(struct intel_guc_client *client)
{
unreserve_doorbell(client);
guc_stage_desc_fini(client->guc, client);
- i915_gem_object_unpin_map(client->vma->obj);
- i915_vma_unpin_and_release(&client->vma);
+ i915_vma_unpin_and_release(&client->vma, I915_VMA_RELEASE_MAP);
ida_simple_remove(&client->guc->stage_ids, client->stage_id);
kfree(client);
}
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.h b/drivers/gpu/drm/i915/intel_guc_submission.h
index fb081cefef93..169c54568340 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.h
+++ b/drivers/gpu/drm/i915/intel_guc_submission.h
@@ -28,6 +28,7 @@
#include <linux/spinlock.h>
#include "i915_gem.h"
+#include "i915_selftest.h"
struct drm_i915_private;
@@ -71,6 +72,9 @@ struct intel_guc_client {
spinlock_t wq_lock;
/* Per-engine counts of GuC submissions */
u64 submissions[I915_NUM_ENGINES];
+
+ /* For testing purposes, use nop WQ items instead of real ones */
+ I915_SELFTEST_DECLARE(bool use_nop_wqi);
};
int intel_guc_submission_init(struct intel_guc *guc);
diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c
index 2fc7a0dd0df9..e26d05a46451 100644
--- a/drivers/gpu/drm/i915/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/intel_hangcheck.c
@@ -142,7 +142,7 @@ static int semaphore_passed(struct intel_engine_cs *engine)
if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
return -1;
- if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno))
+ if (intel_engine_signaled(signaller, seqno))
return 1;
/* cursory check for an unkickable deadlock */
diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c
index 0cc6a861bcf8..26e48fc95543 100644
--- a/drivers/gpu/drm/i915/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/intel_hdcp.c
@@ -57,9 +57,9 @@ static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
/* PG1 (power well #1) needs to be enabled */
for_each_power_well(dev_priv, power_well) {
- if (power_well->id == id) {
- enabled = power_well->ops->is_enabled(dev_priv,
- power_well);
+ if (power_well->desc->id == id) {
+ enabled = power_well->desc->ops->is_enabled(dev_priv,
+ power_well);
break;
}
}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index a9076402dcb0..a2dab0b6bde6 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -943,8 +943,12 @@ static int intel_hdmi_hdcp_write(struct intel_digital_port *intel_dig_port,
ret = i2c_transfer(adapter, &msg, 1);
if (ret == 1)
- return 0;
- return ret >= 0 ? -EIO : ret;
+ ret = 0;
+ else if (ret >= 0)
+ ret = -EIO;
+
+ kfree(write_buf);
+ return ret;
}
static
@@ -1907,22 +1911,26 @@ intel_hdmi_set_edid(struct drm_connector *connector)
static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector, bool force)
{
- enum drm_connector_status status;
+ enum drm_connector_status status = connector_status_disconnected;
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct intel_encoder *encoder = &hdmi_to_dig_port(intel_hdmi)->base;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
+ if (IS_ICELAKE(dev_priv) &&
+ !intel_digital_port_connected(encoder))
+ goto out;
+
intel_hdmi_unset_edid(connector);
if (intel_hdmi_set_edid(connector))
status = connector_status_connected;
- else
- status = connector_status_disconnected;
+out:
intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
if (status != connector_status_connected)
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
index ffcad5fad6a7..37ef540dd280 100644
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ b/drivers/gpu/drm/i915/intel_huc.c
@@ -63,7 +63,7 @@ int intel_huc_auth(struct intel_huc *huc)
return -ENOEXEC;
vma = i915_gem_object_ggtt_pin(huc->fw.obj, NULL, 0, 0,
- PIN_OFFSET_BIAS | guc->ggtt_pin_bias);
+ PIN_OFFSET_BIAS | i915->ggtt.pin_bias);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
DRM_ERROR("HuC: Failed to pin huc fw object %d\n", ret);
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index bef32b7c248e..33d87ab93fdd 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -37,7 +37,7 @@
struct gmbus_pin {
const char *name;
- i915_reg_t reg;
+ enum i915_gpio gpio;
};
/* Map gmbus pin pairs to names and registers. */
@@ -121,8 +121,7 @@ bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
else
size = ARRAY_SIZE(gmbus_pins);
- return pin < size &&
- i915_mmio_reg_valid(get_gmbus_pin(dev_priv, pin)->reg);
+ return pin < size && get_gmbus_pin(dev_priv, pin)->name;
}
/* Intel GPIO access functions */
@@ -292,8 +291,7 @@ intel_gpio_setup(struct intel_gmbus *bus, unsigned int pin)
algo = &bus->bit_algo;
- bus->gpio_reg = _MMIO(dev_priv->gpio_mmio_base +
- i915_mmio_reg_offset(get_gmbus_pin(dev_priv, pin)->reg));
+ bus->gpio_reg = GPIO(get_gmbus_pin(dev_priv, pin)->gpio);
bus->adapter.algo_data = algo;
algo->setsda = set_data;
algo->setscl = set_clock;
@@ -825,9 +823,11 @@ int intel_setup_gmbus(struct drm_i915_private *dev_priv)
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE;
else if (!HAS_GMCH_DISPLAY(dev_priv))
- dev_priv->gpio_mmio_base =
- i915_mmio_reg_offset(PCH_GPIOA) -
- i915_mmio_reg_offset(GPIOA);
+ /*
+ * Broxton uses the same PCH offsets for South Display Engine,
+ * even though it doesn't have a PCH.
+ */
+ dev_priv->gpio_mmio_base = PCH_DISPLAY_BASE;
mutex_init(&dev_priv->gmbus_mutex);
init_waitqueue_head(&dev_priv->gmbus_wait_queue);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 174479232e94..9b1f0e5211a0 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -541,11 +541,6 @@ static void inject_preempt_context(struct intel_engine_cs *engine)
GEM_BUG_ON(execlists->preempt_complete_status !=
upper_32_bits(ce->lrc_desc));
- GEM_BUG_ON((ce->lrc_reg_state[CTX_CONTEXT_CONTROL + 1] &
- _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
- CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT)) !=
- _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
- CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT));
/*
* Switch to our empty preempt context so
@@ -1277,6 +1272,8 @@ static void execlists_context_destroy(struct intel_context *ce)
static void execlists_context_unpin(struct intel_context *ce)
{
+ i915_gem_context_unpin_hw_id(ce->gem_context);
+
intel_ring_unpin(ce->ring);
ce->state->obj->pin_global--;
@@ -1303,10 +1300,9 @@ static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
}
flags = PIN_GLOBAL | PIN_HIGH;
- if (ctx->ggtt_offset_bias)
- flags |= PIN_OFFSET_BIAS | ctx->ggtt_offset_bias;
+ flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
- return i915_vma_pin(vma, 0, GEN8_LR_CONTEXT_ALIGN, flags);
+ return i915_vma_pin(vma, 0, 0, flags);
}
static struct intel_context *
@@ -1332,10 +1328,14 @@ __execlists_context_pin(struct intel_engine_cs *engine,
goto unpin_vma;
}
- ret = intel_ring_pin(ce->ring, ctx->i915, ctx->ggtt_offset_bias);
+ ret = intel_ring_pin(ce->ring);
if (ret)
goto unpin_map;
+ ret = i915_gem_context_pin_hw_id(ctx);
+ if (ret)
+ goto unpin_ring;
+
intel_lr_context_descriptor_update(ctx, engine, ce);
ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
@@ -1348,6 +1348,8 @@ __execlists_context_pin(struct intel_engine_cs *engine,
i915_gem_context_get(ctx);
return ce;
+unpin_ring:
+ intel_ring_unpin(ce->ring);
unpin_map:
i915_gem_object_unpin_map(ce->state->obj);
unpin_vma:
@@ -1643,7 +1645,7 @@ static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
goto err;
}
- err = i915_vma_pin(vma, 0, PAGE_SIZE, PIN_GLOBAL | PIN_HIGH);
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (err)
goto err;
@@ -1657,7 +1659,7 @@ err:
static void lrc_destroy_wa_ctx(struct intel_engine_cs *engine)
{
- i915_vma_unpin_and_release(&engine->wa_ctx.vma);
+ i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0);
}
typedef u32 *(*wa_bb_func_t)(struct intel_engine_cs *engine, u32 *batch);
@@ -1775,11 +1777,7 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine)
static int gen8_init_common_ring(struct intel_engine_cs *engine)
{
- int ret;
-
- ret = intel_mocs_init_engine(engine);
- if (ret)
- return ret;
+ intel_mocs_init_engine(engine);
intel_engine_reset_breadcrumbs(engine);
@@ -1838,7 +1836,8 @@ execlists_reset_prepare(struct intel_engine_cs *engine)
struct i915_request *request, *active;
unsigned long flags;
- GEM_TRACE("%s\n", engine->name);
+ GEM_TRACE("%s: depth<-%d\n", engine->name,
+ atomic_read(&execlists->tasklet.count));
/*
* Prevent request submission to the hardware until we have
@@ -1971,22 +1970,18 @@ static void execlists_reset_finish(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
- /* After a GPU reset, we may have requests to replay */
- if (!RB_EMPTY_ROOT(&execlists->queue.rb_root))
- tasklet_schedule(&execlists->tasklet);
-
/*
- * Flush the tasklet while we still have the forcewake to be sure
- * that it is not allowed to sleep before we restart and reload a
- * context.
+ * After a GPU reset, we may have requests to replay. Do so now while
+ * we still have the forcewake to be sure that the GPU is not allowed
+ * to sleep before we restart and reload a context.
*
- * As before (with execlists_reset_prepare) we rely on the caller
- * serialising multiple attempts to reset so that we know that we
- * are the only one manipulating tasklet state.
*/
- __tasklet_enable_sync_once(&execlists->tasklet);
+ if (!RB_EMPTY_ROOT(&execlists->queue.rb_root))
+ execlists->tasklet.func(execlists->tasklet.data);
- GEM_TRACE("%s\n", engine->name);
+ tasklet_enable(&execlists->tasklet);
+ GEM_TRACE("%s: depth->%d\n", engine->name,
+ atomic_read(&execlists->tasklet.count));
}
static int intel_logical_ring_emit_pdps(struct i915_request *rq)
@@ -2066,8 +2061,7 @@ static int gen8_emit_bb_start(struct i915_request *rq,
/* FIXME(BDW): Address space and security selectors. */
*cs++ = MI_BATCH_BUFFER_START_GEN8 |
- (flags & I915_DISPATCH_SECURE ? 0 : BIT(8)) |
- (flags & I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0);
+ (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
*cs++ = lower_32_bits(offset);
*cs++ = upper_32_bits(offset);
@@ -2494,6 +2488,9 @@ int logical_xcs_ring_init(struct intel_engine_cs *engine)
static u32
make_rpcs(struct drm_i915_private *dev_priv)
{
+ bool subslice_pg = INTEL_INFO(dev_priv)->sseu.has_subslice_pg;
+ u8 slices = hweight8(INTEL_INFO(dev_priv)->sseu.slice_mask);
+ u8 subslices = hweight8(INTEL_INFO(dev_priv)->sseu.subslice_mask[0]);
u32 rpcs = 0;
/*
@@ -2504,30 +2501,88 @@ make_rpcs(struct drm_i915_private *dev_priv)
return 0;
/*
+ * Since the SScount bitfield in GEN8_R_PWR_CLK_STATE is only three bits
+ * wide and Icelake has up to eight subslices, specfial programming is
+ * needed in order to correctly enable all subslices.
+ *
+ * According to documentation software must consider the configuration
+ * as 2x4x8 and hardware will translate this to 1x8x8.
+ *
+ * Furthemore, even though SScount is three bits, maximum documented
+ * value for it is four. From this some rules/restrictions follow:
+ *
+ * 1.
+ * If enabled subslice count is greater than four, two whole slices must
+ * be enabled instead.
+ *
+ * 2.
+ * When more than one slice is enabled, hardware ignores the subslice
+ * count altogether.
+ *
+ * From these restrictions it follows that it is not possible to enable
+ * a count of subslices between the SScount maximum of four restriction,
+ * and the maximum available number on a particular SKU. Either all
+ * subslices are enabled, or a count between one and four on the first
+ * slice.
+ */
+ if (IS_GEN11(dev_priv) && slices == 1 && subslices >= 4) {
+ GEM_BUG_ON(subslices & 1);
+
+ subslice_pg = false;
+ slices *= 2;
+ }
+
+ /*
* Starting in Gen9, render power gating can leave
* slice/subslice/EU in a partially enabled state. We
* must make an explicit request through RPCS for full
* enablement.
*/
if (INTEL_INFO(dev_priv)->sseu.has_slice_pg) {
- rpcs |= GEN8_RPCS_S_CNT_ENABLE;
- rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.slice_mask) <<
- GEN8_RPCS_S_CNT_SHIFT;
- rpcs |= GEN8_RPCS_ENABLE;
+ u32 mask, val = slices;
+
+ if (INTEL_GEN(dev_priv) >= 11) {
+ mask = GEN11_RPCS_S_CNT_MASK;
+ val <<= GEN11_RPCS_S_CNT_SHIFT;
+ } else {
+ mask = GEN8_RPCS_S_CNT_MASK;
+ val <<= GEN8_RPCS_S_CNT_SHIFT;
+ }
+
+ GEM_BUG_ON(val & ~mask);
+ val &= mask;
+
+ rpcs |= GEN8_RPCS_ENABLE | GEN8_RPCS_S_CNT_ENABLE | val;
}
- if (INTEL_INFO(dev_priv)->sseu.has_subslice_pg) {
- rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
- rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.subslice_mask[0]) <<
- GEN8_RPCS_SS_CNT_SHIFT;
- rpcs |= GEN8_RPCS_ENABLE;
+ if (subslice_pg) {
+ u32 val = subslices;
+
+ val <<= GEN8_RPCS_SS_CNT_SHIFT;
+
+ GEM_BUG_ON(val & ~GEN8_RPCS_SS_CNT_MASK);
+ val &= GEN8_RPCS_SS_CNT_MASK;
+
+ rpcs |= GEN8_RPCS_ENABLE | GEN8_RPCS_SS_CNT_ENABLE | val;
}
if (INTEL_INFO(dev_priv)->sseu.has_eu_pg) {
- rpcs |= INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
- GEN8_RPCS_EU_MIN_SHIFT;
- rpcs |= INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
- GEN8_RPCS_EU_MAX_SHIFT;
+ u32 val;
+
+ val = INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
+ GEN8_RPCS_EU_MIN_SHIFT;
+ GEM_BUG_ON(val & ~GEN8_RPCS_EU_MIN_MASK);
+ val &= GEN8_RPCS_EU_MIN_MASK;
+
+ rpcs |= val;
+
+ val = INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
+ GEN8_RPCS_EU_MAX_SHIFT;
+ GEM_BUG_ON(val & ~GEN8_RPCS_EU_MAX_MASK);
+ val &= GEN8_RPCS_EU_MAX_MASK;
+
+ rpcs |= val;
+
rpcs |= GEN8_RPCS_ENABLE;
}
@@ -2584,11 +2639,13 @@ static void execlists_init_reg_state(u32 *regs,
MI_LRI_FORCE_POSTED;
CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(engine),
- _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
- CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT) |
- _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
- (HAS_RESOURCE_STREAMER(dev_priv) ?
- CTX_CTRL_RS_CTX_ENABLE : 0)));
+ _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) |
+ _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH));
+ if (INTEL_GEN(dev_priv) < 11) {
+ regs[CTX_CONTEXT_CONTROL + 1] |=
+ _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
+ CTX_CTRL_RS_CTX_ENABLE);
+ }
CTX_REG(regs, CTX_RING_HEAD, RING_HEAD(base), 0);
CTX_REG(regs, CTX_RING_TAIL, RING_TAIL(base), 0);
CTX_REG(regs, CTX_RING_BUFFER_START, RING_START(base), 0);
@@ -2654,6 +2711,10 @@ static void execlists_init_reg_state(u32 *regs,
i915_oa_init_reg_state(engine, ctx, regs);
}
+
+ regs[CTX_END] = MI_BATCH_BUFFER_END;
+ if (INTEL_GEN(dev_priv) >= 10)
+ regs[CTX_END] |= BIT(0);
}
static int
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 4dfb78e3ec7e..f5a5502ecf70 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -27,8 +27,6 @@
#include "intel_ringbuffer.h"
#include "i915_gem_context.h"
-#define GEN8_LR_CONTEXT_ALIGN I915_GTT_MIN_ALIGNMENT
-
/* Execlists regs */
#define RING_ELSP(engine) _MMIO((engine)->mmio_base + 0x230)
#define RING_EXECLIST_STATUS_LO(engine) _MMIO((engine)->mmio_base + 0x234)
diff --git a/drivers/gpu/drm/i915/intel_lrc_reg.h b/drivers/gpu/drm/i915/intel_lrc_reg.h
index 169a2239d6c7..5ef932d810a7 100644
--- a/drivers/gpu/drm/i915/intel_lrc_reg.h
+++ b/drivers/gpu/drm/i915/intel_lrc_reg.h
@@ -37,7 +37,7 @@
#define CTX_PDP0_LDW 0x32
#define CTX_LRI_HEADER_2 0x41
#define CTX_R_PWR_CLK_STATE 0x42
-#define CTX_GPGPU_CSR_BASE_ADDRESS 0x44
+#define CTX_END 0x44
#define CTX_REG(reg_state, pos, reg, val) do { \
u32 *reg_state__ = (reg_state); \
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c
index 5dae16ccd9f1..3e085c5f2b81 100644
--- a/drivers/gpu/drm/i915/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/intel_lspcon.c
@@ -74,7 +74,7 @@ static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon,
DRM_DEBUG_KMS("Waiting for LSPCON mode %s to settle\n",
lspcon_mode_name(mode));
- wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 100);
+ wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 400);
if (current_mode != mode)
DRM_ERROR("LSPCON mode hasn't settled\n");
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index 9f0bd6a4cb79..77e9871a8c9a 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -232,20 +232,17 @@ static i915_reg_t mocs_register(enum intel_engine_id engine_id, int index)
*
* This function simply emits a MI_LOAD_REGISTER_IMM command for the
* given table starting at the given address.
- *
- * Return: 0 on success, otherwise the error status.
*/
-int intel_mocs_init_engine(struct intel_engine_cs *engine)
+void intel_mocs_init_engine(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
struct drm_i915_mocs_table table;
unsigned int index;
if (!get_mocs_settings(dev_priv, &table))
- return 0;
+ return;
- if (WARN_ON(table.size > GEN9_NUM_MOCS_ENTRIES))
- return -ENODEV;
+ GEM_BUG_ON(table.size > GEN9_NUM_MOCS_ENTRIES);
for (index = 0; index < table.size; index++)
I915_WRITE(mocs_register(engine->id, index),
@@ -262,8 +259,6 @@ int intel_mocs_init_engine(struct intel_engine_cs *engine)
for (; index < GEN9_NUM_MOCS_ENTRIES; index++)
I915_WRITE(mocs_register(engine->id, index),
table.table[0].control_value);
-
- return 0;
}
/**
diff --git a/drivers/gpu/drm/i915/intel_mocs.h b/drivers/gpu/drm/i915/intel_mocs.h
index d1751f91c1a4..d89080d75b80 100644
--- a/drivers/gpu/drm/i915/intel_mocs.h
+++ b/drivers/gpu/drm/i915/intel_mocs.h
@@ -54,6 +54,6 @@
int intel_rcs_context_init_mocs(struct i915_request *rq);
void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv);
-int intel_mocs_init_engine(struct intel_engine_cs *engine);
+void intel_mocs_init_engine(struct intel_engine_cs *engine);
#endif
diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.c b/drivers/gpu/drm/i915/intel_pipe_crc.c
index 849e1b69ba73..f3c9010e332a 100644
--- a/drivers/gpu/drm/i915/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/intel_pipe_crc.c
@@ -468,8 +468,122 @@ void intel_display_crc_init(struct drm_i915_private *dev_priv)
}
}
-int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
- size_t *values_cnt)
+static int i8xx_crc_source_valid(struct drm_i915_private *dev_priv,
+ const enum intel_pipe_crc_source source)
+{
+ switch (source) {
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int i9xx_crc_source_valid(struct drm_i915_private *dev_priv,
+ const enum intel_pipe_crc_source source)
+{
+ switch (source) {
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ case INTEL_PIPE_CRC_SOURCE_TV:
+ case INTEL_PIPE_CRC_SOURCE_DP_B:
+ case INTEL_PIPE_CRC_SOURCE_DP_C:
+ case INTEL_PIPE_CRC_SOURCE_DP_D:
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int vlv_crc_source_valid(struct drm_i915_private *dev_priv,
+ const enum intel_pipe_crc_source source)
+{
+ switch (source) {
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ case INTEL_PIPE_CRC_SOURCE_DP_B:
+ case INTEL_PIPE_CRC_SOURCE_DP_C:
+ case INTEL_PIPE_CRC_SOURCE_DP_D:
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ilk_crc_source_valid(struct drm_i915_private *dev_priv,
+ const enum intel_pipe_crc_source source)
+{
+ switch (source) {
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ case INTEL_PIPE_CRC_SOURCE_PLANE1:
+ case INTEL_PIPE_CRC_SOURCE_PLANE2:
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ivb_crc_source_valid(struct drm_i915_private *dev_priv,
+ const enum intel_pipe_crc_source source)
+{
+ switch (source) {
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ case INTEL_PIPE_CRC_SOURCE_PLANE1:
+ case INTEL_PIPE_CRC_SOURCE_PLANE2:
+ case INTEL_PIPE_CRC_SOURCE_PF:
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int
+intel_is_valid_crc_source(struct drm_i915_private *dev_priv,
+ const enum intel_pipe_crc_source source)
+{
+ if (IS_GEN2(dev_priv))
+ return i8xx_crc_source_valid(dev_priv, source);
+ else if (INTEL_GEN(dev_priv) < 5)
+ return i9xx_crc_source_valid(dev_priv, source);
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ return vlv_crc_source_valid(dev_priv, source);
+ else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv))
+ return ilk_crc_source_valid(dev_priv, source);
+ else
+ return ivb_crc_source_valid(dev_priv, source);
+}
+
+const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc,
+ size_t *count)
+{
+ *count = ARRAY_SIZE(pipe_crc_sources);
+ return pipe_crc_sources;
+}
+
+int intel_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
+ size_t *values_cnt)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ enum intel_pipe_crc_source source;
+
+ if (display_crc_ctl_parse_source(source_name, &source) < 0) {
+ DRM_DEBUG_DRIVER("unknown source %s\n", source_name);
+ return -EINVAL;
+ }
+
+ if (source == INTEL_PIPE_CRC_SOURCE_AUTO ||
+ intel_is_valid_crc_source(dev_priv, source) == 0) {
+ *values_cnt = 5;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
{
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
@@ -508,7 +622,6 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
}
pipe_crc->skipped = 0;
- *values_cnt = 5;
out:
intel_display_power_put(dev_priv, power_domain);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 43ae9de12ba3..d99e5fabe93c 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -26,6 +26,7 @@
*/
#include <linux/cpufreq.h>
+#include <linux/pm_runtime.h>
#include <drm/drm_plane_helper.h>
#include "i915_drv.h"
#include "intel_drv.h"
@@ -2942,8 +2943,8 @@ static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
unsigned int latency = wm[level];
if (latency == 0) {
- DRM_ERROR("%s WM%d latency not provided\n",
- name, level);
+ DRM_DEBUG_KMS("%s WM%d latency not provided\n",
+ name, level);
continue;
}
@@ -3771,11 +3772,11 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
return true;
}
-static unsigned int intel_get_ddb_size(struct drm_i915_private *dev_priv,
- const struct intel_crtc_state *cstate,
- const unsigned int total_data_rate,
- const int num_active,
- struct skl_ddb_allocation *ddb)
+static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
+ const struct intel_crtc_state *cstate,
+ const unsigned int total_data_rate,
+ const int num_active,
+ struct skl_ddb_allocation *ddb)
{
const struct drm_display_mode *adjusted_mode;
u64 total_data_bw;
@@ -3814,8 +3815,12 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *for_crtc = cstate->base.crtc;
- unsigned int pipe_size, ddb_size;
- int nth_active_pipe;
+ const struct drm_crtc_state *crtc_state;
+ const struct drm_crtc *crtc;
+ u32 pipe_width = 0, total_width = 0, width_before_pipe = 0;
+ enum pipe for_pipe = to_intel_crtc(for_crtc)->pipe;
+ u16 ddb_size;
+ u32 i;
if (WARN_ON(!state) || !cstate->base.active) {
alloc->start = 0;
@@ -3833,14 +3838,14 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
*num_active, ddb);
/*
- * If the state doesn't change the active CRTC's, then there's
- * no need to recalculate; the existing pipe allocation limits
- * should remain unchanged. Note that we're safe from racing
- * commits since any racing commit that changes the active CRTC
- * list would need to grab _all_ crtc locks, including the one
- * we currently hold.
+ * If the state doesn't change the active CRTC's or there is no
+ * modeset request, then there's no need to recalculate;
+ * the existing pipe allocation limits should remain unchanged.
+ * Note that we're safe from racing commits since any racing commit
+ * that changes the active CRTC list or do modeset would need to
+ * grab _all_ crtc locks, including the one we currently hold.
*/
- if (!intel_state->active_pipe_changes) {
+ if (!intel_state->active_pipe_changes && !intel_state->modeset) {
/*
* alloc may be cleared by clear_intel_crtc_state,
* copy from old state to be sure
@@ -3849,11 +3854,32 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
return;
}
- nth_active_pipe = hweight32(intel_state->active_crtcs &
- (drm_crtc_mask(for_crtc) - 1));
- pipe_size = ddb_size / hweight32(intel_state->active_crtcs);
- alloc->start = nth_active_pipe * ddb_size / *num_active;
- alloc->end = alloc->start + pipe_size;
+ /*
+ * Watermark/ddb requirement highly depends upon width of the
+ * framebuffer, So instead of allocating DDB equally among pipes
+ * distribute DDB based on resolution/width of the display.
+ */
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ const struct drm_display_mode *adjusted_mode;
+ int hdisplay, vdisplay;
+ enum pipe pipe;
+
+ if (!crtc_state->enable)
+ continue;
+
+ pipe = to_intel_crtc(crtc)->pipe;
+ adjusted_mode = &crtc_state->adjusted_mode;
+ drm_mode_get_hv_timing(adjusted_mode, &hdisplay, &vdisplay);
+ total_width += hdisplay;
+
+ if (pipe < for_pipe)
+ width_before_pipe += hdisplay;
+ else if (pipe == for_pipe)
+ pipe_width = hdisplay;
+ }
+
+ alloc->start = ddb_size * width_before_pipe / total_width;
+ alloc->end = ddb_size * (width_before_pipe + pipe_width) / total_width;
}
static unsigned int skl_cursor_allocation(int num_active)
@@ -3909,7 +3935,12 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
val & PLANE_CTL_ALPHA_MASK);
val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
- val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id));
+ /*
+ * FIXME: add proper NV12 support for ICL. Avoid reading unclaimed
+ * registers for now.
+ */
+ if (INTEL_GEN(dev_priv) < 11)
+ val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id));
if (fourcc == DRM_FORMAT_NV12) {
skl_ddb_entry_init_from_hw(dev_priv,
@@ -4977,6 +5008,7 @@ static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
&ddb->plane[pipe][plane_id]);
+ /* FIXME: add proper NV12 support for ICL. */
if (INTEL_GEN(dev_priv) >= 11)
return skl_ddb_entry_write(dev_priv,
PLANE_BUF_CFG(pipe, plane_id),
@@ -5142,17 +5174,6 @@ skl_compute_ddb(struct drm_atomic_state *state)
}
static void
-skl_copy_ddb_for_pipe(struct skl_ddb_values *dst,
- struct skl_ddb_values *src,
- enum pipe pipe)
-{
- memcpy(dst->ddb.uv_plane[pipe], src->ddb.uv_plane[pipe],
- sizeof(dst->ddb.uv_plane[pipe]));
- memcpy(dst->ddb.plane[pipe], src->ddb.plane[pipe],
- sizeof(dst->ddb.plane[pipe]));
-}
-
-static void
skl_print_wm_changes(const struct drm_atomic_state *state)
{
const struct drm_device *dev = state->dev;
@@ -5259,7 +5280,7 @@ skl_ddb_add_affected_pipes(struct drm_atomic_state *state, bool *changed)
* any other display updates race with this transaction, so we need
* to grab the lock on *all* CRTC's.
*/
- if (intel_state->active_pipe_changes) {
+ if (intel_state->active_pipe_changes || intel_state->modeset) {
realloc_pipes = ~0;
intel_state->wm_results.dirty_pipes = ~0;
}
@@ -5381,7 +5402,10 @@ static void skl_initial_wm(struct intel_atomic_state *state,
if (cstate->base.active_changed)
skl_atomic_update_crtc_wm(state, cstate);
- skl_copy_ddb_for_pipe(hw_vals, results, pipe);
+ memcpy(hw_vals->ddb.uv_plane[pipe], results->ddb.uv_plane[pipe],
+ sizeof(hw_vals->ddb.uv_plane[pipe]));
+ memcpy(hw_vals->ddb.plane[pipe], results->ddb.plane[pipe],
+ sizeof(hw_vals->ddb.plane[pipe]));
mutex_unlock(&dev_priv->wm.wm_mutex);
}
@@ -6379,7 +6403,6 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
new_power = HIGH_POWER;
rps_set_power(dev_priv, new_power);
mutex_unlock(&rps->power.mutex);
- rps->last_adj = 0;
}
void intel_rps_mark_interactive(struct drm_i915_private *i915, bool interactive)
@@ -8159,7 +8182,7 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
*/
if (!sanitize_rc6(dev_priv)) {
DRM_INFO("RC6 disabled, disabling runtime PM support\n");
- intel_runtime_pm_get(dev_priv);
+ pm_runtime_get(&dev_priv->drm.pdev->dev);
}
mutex_lock(&dev_priv->pcu_lock);
@@ -8211,7 +8234,7 @@ void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
valleyview_cleanup_gt_powersave(dev_priv);
if (!HAS_RC6(dev_priv))
- intel_runtime_pm_put(dev_priv);
+ pm_runtime_put(&dev_priv->drm.pdev->dev);
}
/**
@@ -8238,7 +8261,7 @@ void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
if (INTEL_GEN(dev_priv) >= 11)
gen11_reset_rps_interrupts(dev_priv);
- else
+ else if (INTEL_GEN(dev_priv) >= 6)
gen6_reset_rps_interrupts(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 4bd5768731ee..b6838b525502 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -56,7 +56,30 @@
#include "intel_drv.h"
#include "i915_drv.h"
-void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug)
+static bool psr_global_enabled(u32 debug)
+{
+ switch (debug & I915_PSR_DEBUG_MODE_MASK) {
+ case I915_PSR_DEBUG_DEFAULT:
+ return i915_modparams.enable_psr;
+ case I915_PSR_DEBUG_DISABLE:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
+ const struct intel_crtc_state *crtc_state)
+{
+ switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
+ case I915_PSR_DEBUG_FORCE_PSR1:
+ return false;
+ default:
+ return crtc_state->has_psr2;
+ }
+}
+
+void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug)
{
u32 debug_mask, mask;
@@ -77,10 +100,9 @@ void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug)
EDP_PSR_PRE_ENTRY(TRANSCODER_C);
}
- if (debug)
+ if (debug & I915_PSR_DEBUG_IRQ)
mask |= debug_mask;
- WRITE_ONCE(dev_priv->psr.debug, debug);
I915_WRITE(EDP_PSR_IMR, ~mask);
}
@@ -213,6 +235,9 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
dev_priv->psr.sink_sync_latency =
intel_dp_get_sink_sync_latency(intel_dp);
+ WARN_ON(dev_priv->psr.dp);
+ dev_priv->psr.dp = intel_dp;
+
if (INTEL_GEN(dev_priv) >= 9 &&
(intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
bool y_req = intel_dp->psr_dpcd[1] &
@@ -245,7 +270,7 @@ static void intel_psr_setup_vsc(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct edp_vsc_psr psr_vsc;
if (dev_priv->psr.psr2_enabled) {
@@ -275,8 +300,7 @@ static void intel_psr_setup_vsc(struct intel_dp *intel_dp,
static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 aux_clock_divider, aux_ctl;
int i;
static const uint8_t aux_msg[] = {
@@ -309,9 +333,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
static void intel_psr_enable_sink(struct intel_dp *intel_dp)
{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u8 dpcd_val = DP_PSR_ENABLE;
/* Enable ALPM at sink for psr2 */
@@ -332,9 +354,7 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp)
static void hsw_activate_psr1(struct intel_dp *intel_dp)
{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 max_sleep_time = 0x1f;
u32 val = EDP_PSR_ENABLE;
@@ -389,9 +409,7 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
static void hsw_activate_psr2(struct intel_dp *intel_dp)
{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 val;
/* Let's use 6 as the minimum to cover all known cases including the
@@ -427,8 +445,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
int crtc_hdisplay = crtc_state->base.adjusted_mode.crtc_hdisplay;
int crtc_vdisplay = crtc_state->base.adjusted_mode.crtc_vdisplay;
int psr_max_h = 0, psr_max_v = 0;
@@ -463,7 +480,7 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
const struct drm_display_mode *adjusted_mode =
&crtc_state->base.adjusted_mode;
int psr_setup_time;
@@ -471,10 +488,8 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
if (!CAN_PSR(dev_priv))
return;
- if (!i915_modparams.enable_psr) {
- DRM_DEBUG_KMS("PSR disable by flag\n");
+ if (intel_dp != dev_priv->psr.dp)
return;
- }
/*
* HSW spec explicitly says PSR is tied to port A.
@@ -517,14 +532,11 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
crtc_state->has_psr = true;
crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
- DRM_DEBUG_KMS("Enabling PSR%s\n", crtc_state->has_psr2 ? "2" : "");
}
static void intel_psr_activate(struct intel_dp *intel_dp)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
if (INTEL_GEN(dev_priv) >= 9)
WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
@@ -544,9 +556,7 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
static void intel_psr_enable_source(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
/* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
@@ -589,6 +599,24 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
}
}
+static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_dp *intel_dp = dev_priv->psr.dp;
+
+ if (dev_priv->psr.enabled)
+ return;
+
+ DRM_DEBUG_KMS("Enabling PSR%s\n",
+ dev_priv->psr.psr2_enabled ? "2" : "1");
+ intel_psr_setup_vsc(intel_dp, crtc_state);
+ intel_psr_enable_sink(intel_dp);
+ intel_psr_enable_source(intel_dp, crtc_state);
+ dev_priv->psr.enabled = true;
+
+ intel_psr_activate(intel_dp);
+}
+
/**
* intel_psr_enable - Enable PSR
* @intel_dp: Intel DP
@@ -599,9 +627,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
void intel_psr_enable(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
if (!crtc_state->has_psr)
return;
@@ -610,21 +636,21 @@ void intel_psr_enable(struct intel_dp *intel_dp,
return;
WARN_ON(dev_priv->drrs.dp);
+
mutex_lock(&dev_priv->psr.lock);
- if (dev_priv->psr.enabled) {
+ if (dev_priv->psr.prepared) {
DRM_DEBUG_KMS("PSR already in use\n");
goto unlock;
}
- dev_priv->psr.psr2_enabled = crtc_state->has_psr2;
+ dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
dev_priv->psr.busy_frontbuffer_bits = 0;
+ dev_priv->psr.prepared = true;
- intel_psr_setup_vsc(intel_dp, crtc_state);
- intel_psr_enable_sink(intel_dp);
- intel_psr_enable_source(intel_dp, crtc_state);
- dev_priv->psr.enabled = intel_dp;
-
- intel_psr_activate(intel_dp);
+ if (psr_global_enabled(dev_priv->psr.debug))
+ intel_psr_enable_locked(dev_priv, crtc_state);
+ else
+ DRM_DEBUG_KMS("PSR disabled by flag\n");
unlock:
mutex_unlock(&dev_priv->psr.lock);
@@ -633,9 +659,7 @@ unlock:
static void
intel_psr_disable_source(struct intel_dp *intel_dp)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
if (dev_priv->psr.active) {
i915_reg_t psr_status;
@@ -674,21 +698,21 @@ intel_psr_disable_source(struct intel_dp *intel_dp)
static void intel_psr_disable_locked(struct intel_dp *intel_dp)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
lockdep_assert_held(&dev_priv->psr.lock);
if (!dev_priv->psr.enabled)
return;
+ DRM_DEBUG_KMS("Disabling PSR%s\n",
+ dev_priv->psr.psr2_enabled ? "2" : "1");
intel_psr_disable_source(intel_dp);
/* Disable PSR on Sink */
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
- dev_priv->psr.enabled = NULL;
+ dev_priv->psr.enabled = false;
}
/**
@@ -701,9 +725,7 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
void intel_psr_disable(struct intel_dp *intel_dp,
const struct intel_crtc_state *old_crtc_state)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
if (!old_crtc_state->has_psr)
return;
@@ -712,57 +734,61 @@ void intel_psr_disable(struct intel_dp *intel_dp,
return;
mutex_lock(&dev_priv->psr.lock);
+ if (!dev_priv->psr.prepared) {
+ mutex_unlock(&dev_priv->psr.lock);
+ return;
+ }
+
intel_psr_disable_locked(intel_dp);
+
+ dev_priv->psr.prepared = false;
mutex_unlock(&dev_priv->psr.lock);
cancel_work_sync(&dev_priv->psr.work);
}
-int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state)
+/**
+ * intel_psr_wait_for_idle - wait for PSR1 to idle
+ * @new_crtc_state: new CRTC state
+ * @out_value: PSR status in case of failure
+ *
+ * This function is expected to be called from pipe_update_start() where it is
+ * not expected to race with PSR enable or disable.
+ *
+ * Returns: 0 on success or -ETIMEOUT if PSR status does not idle.
+ */
+int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
+ u32 *out_value)
{
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- i915_reg_t reg;
- u32 mask;
- if (!new_crtc_state->has_psr)
+ if (!dev_priv->psr.enabled || !new_crtc_state->has_psr)
return 0;
- /*
- * The sole user right now is intel_pipe_update_start(),
- * which won't race with psr_enable/disable, which is
- * where psr2_enabled is written to. So, we don't need
- * to acquire the psr.lock. More importantly, we want the
- * latency inside intel_pipe_update_start() to be as low
- * as possible, so no need to acquire psr.lock when it is
- * not needed and will induce latencies in the atomic
- * update path.
- */
- if (dev_priv->psr.psr2_enabled) {
- reg = EDP_PSR2_STATUS;
- mask = EDP_PSR2_STATUS_STATE_MASK;
- } else {
- reg = EDP_PSR_STATUS;
- mask = EDP_PSR_STATUS_STATE_MASK;
- }
+ /* FIXME: Update this for PSR2 if we need to wait for idle */
+ if (READ_ONCE(dev_priv->psr.psr2_enabled))
+ return 0;
/*
- * Max time for PSR to idle = Inverse of the refresh rate +
- * 6 ms of exit training time + 1.5 ms of aux channel
- * handshake. 50 msec is defesive enough to cover everything.
+ * From bspec: Panel Self Refresh (BDW+)
+ * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
+ * exit training time + 1.5 ms of aux channel handshake. 50 ms is
+ * defensive enough to cover everything.
*/
- return intel_wait_for_register(dev_priv, reg, mask,
- EDP_PSR_STATUS_STATE_IDLE, 50);
+
+ return __intel_wait_for_register(dev_priv, EDP_PSR_STATUS,
+ EDP_PSR_STATUS_STATE_MASK,
+ EDP_PSR_STATUS_STATE_IDLE, 2, 50,
+ out_value);
}
static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
{
- struct intel_dp *intel_dp;
i915_reg_t reg;
u32 mask;
int err;
- intel_dp = dev_priv->psr.enabled;
- if (!intel_dp)
+ if (!dev_priv->psr.enabled)
return false;
if (dev_priv->psr.psr2_enabled) {
@@ -784,6 +810,89 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
return err == 0 && dev_priv->psr.enabled;
}
+static bool switching_psr(struct drm_i915_private *dev_priv,
+ struct intel_crtc_state *crtc_state,
+ u32 mode)
+{
+ /* Can't switch psr state anyway if PSR2 is not supported. */
+ if (!crtc_state || !crtc_state->has_psr2)
+ return false;
+
+ if (dev_priv->psr.psr2_enabled && mode == I915_PSR_DEBUG_FORCE_PSR1)
+ return true;
+
+ if (!dev_priv->psr.psr2_enabled && mode != I915_PSR_DEBUG_FORCE_PSR1)
+ return true;
+
+ return false;
+}
+
+int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv,
+ struct drm_modeset_acquire_ctx *ctx,
+ u64 val)
+{
+ struct drm_device *dev = &dev_priv->drm;
+ struct drm_connector_state *conn_state;
+ struct intel_crtc_state *crtc_state = NULL;
+ struct drm_crtc_commit *commit;
+ struct drm_crtc *crtc;
+ struct intel_dp *dp;
+ int ret;
+ bool enable;
+ u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
+
+ if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
+ mode > I915_PSR_DEBUG_FORCE_PSR1) {
+ DRM_DEBUG_KMS("Invalid debug mask %llx\n", val);
+ return -EINVAL;
+ }
+
+ ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx);
+ if (ret)
+ return ret;
+
+ /* dev_priv->psr.dp should be set once and then never touched again. */
+ dp = READ_ONCE(dev_priv->psr.dp);
+ conn_state = dp->attached_connector->base.state;
+ crtc = conn_state->crtc;
+ if (crtc) {
+ ret = drm_modeset_lock(&crtc->mutex, ctx);
+ if (ret)
+ return ret;
+
+ crtc_state = to_intel_crtc_state(crtc->state);
+ commit = crtc_state->base.commit;
+ } else {
+ commit = conn_state->commit;
+ }
+ if (commit) {
+ ret = wait_for_completion_interruptible(&commit->hw_done);
+ if (ret)
+ return ret;
+ }
+
+ ret = mutex_lock_interruptible(&dev_priv->psr.lock);
+ if (ret)
+ return ret;
+
+ enable = psr_global_enabled(val);
+
+ if (!enable || switching_psr(dev_priv, crtc_state, mode))
+ intel_psr_disable_locked(dev_priv->psr.dp);
+
+ dev_priv->psr.debug = val;
+ if (crtc)
+ dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
+
+ intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
+
+ if (dev_priv->psr.prepared && enable)
+ intel_psr_enable_locked(dev_priv, crtc_state);
+
+ mutex_unlock(&dev_priv->psr.lock);
+ return ret;
+}
+
static void intel_psr_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
@@ -811,7 +920,7 @@ static void intel_psr_work(struct work_struct *work)
if (dev_priv->psr.busy_frontbuffer_bits || dev_priv->psr.active)
goto unlock;
- intel_psr_activate(dev_priv->psr.enabled);
+ intel_psr_activate(dev_priv->psr.dp);
unlock:
mutex_unlock(&dev_priv->psr.lock);
}
@@ -866,7 +975,7 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
return;
}
- crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
+ crtc = dp_to_dig_port(dev_priv->psr.dp)->base.base.crtc;
pipe = to_intel_crtc(crtc)->pipe;
frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
@@ -909,7 +1018,7 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
return;
}
- crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
+ crtc = dp_to_dig_port(dev_priv->psr.dp)->base.base.crtc;
pipe = to_intel_crtc(crtc)->pipe;
frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
@@ -977,9 +1086,7 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
void intel_psr_short_pulse(struct intel_dp *intel_dp)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct i915_psr *psr = &dev_priv->psr;
u8 val;
const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
@@ -991,7 +1098,7 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
mutex_lock(&psr->lock);
- if (psr->enabled != intel_dp)
+ if (!psr->enabled || psr->dp != intel_dp)
goto exit;
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val) != 1) {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 6a8f27d0a742..472939f5c18f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -344,11 +344,14 @@ gen7_render_ring_flush(struct i915_request *rq, u32 mode)
static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
+ struct page *page = virt_to_page(engine->status_page.page_addr);
+ phys_addr_t phys = PFN_PHYS(page_to_pfn(page));
u32 addr;
- addr = dev_priv->status_page_dmah->busaddr;
+ addr = lower_32_bits(phys);
if (INTEL_GEN(dev_priv) >= 4)
- addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
+ addr |= (phys >> 28) & 0xf0;
+
I915_WRITE(HWS_PGA, addr);
}
@@ -537,6 +540,8 @@ static int init_ring_common(struct intel_engine_cs *engine)
if (INTEL_GEN(dev_priv) > 2)
I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
+ /* Papering over lost _interrupts_ immediately following the restart */
+ intel_engine_wakeup(engine);
out:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
@@ -1013,24 +1018,22 @@ i915_emit_bb_start(struct i915_request *rq,
return 0;
}
-
-
-int intel_ring_pin(struct intel_ring *ring,
- struct drm_i915_private *i915,
- unsigned int offset_bias)
+int intel_ring_pin(struct intel_ring *ring)
{
- enum i915_map_type map = HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
struct i915_vma *vma = ring->vma;
+ enum i915_map_type map =
+ HAS_LLC(vma->vm->i915) ? I915_MAP_WB : I915_MAP_WC;
unsigned int flags;
void *addr;
int ret;
GEM_BUG_ON(ring->vaddr);
-
flags = PIN_GLOBAL;
- if (offset_bias)
- flags |= PIN_OFFSET_BIAS | offset_bias;
+
+ /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
+ flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
+
if (vma->obj->stolen)
flags |= PIN_MAPPABLE;
else
@@ -1045,7 +1048,7 @@ int intel_ring_pin(struct intel_ring *ring,
return ret;
}
- ret = i915_vma_pin(vma, 0, PAGE_SIZE, flags);
+ ret = i915_vma_pin(vma, 0, 0, flags);
if (unlikely(ret))
return ret;
@@ -1230,8 +1233,7 @@ static int __context_pin(struct intel_context *ce)
return err;
}
- err = i915_vma_pin(vma, 0, I915_GTT_MIN_ALIGNMENT,
- PIN_GLOBAL | PIN_HIGH);
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (err)
return err;
@@ -1419,8 +1421,7 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
goto err;
}
- /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
- err = intel_ring_pin(ring, engine->i915, I915_GTT_PAGE_SIZE);
+ err = intel_ring_pin(ring);
if (err)
goto err_ring;
@@ -1706,9 +1707,29 @@ static int switch_context(struct i915_request *rq)
}
if (ppgtt) {
+ ret = engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (ret)
+ goto err_mm;
+
ret = flush_pd_dir(rq);
if (ret)
goto err_mm;
+
+ /*
+ * Not only do we need a full barrier (post-sync write) after
+ * invalidating the TLBs, but we need to wait a little bit
+ * longer. Whether this is merely delaying us, or the
+ * subsequent flush is a key part of serialising with the
+ * post-sync op, this extra pass appears vital before a
+ * mm switch!
+ */
+ ret = engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (ret)
+ goto err_mm;
+
+ ret = engine->emit_flush(rq, EMIT_FLUSH);
+ if (ret)
+ goto err_mm;
}
if (ctx->remap_slice) {
@@ -1946,7 +1967,7 @@ static void gen6_bsd_submit_request(struct i915_request *request)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
-static int gen6_bsd_ring_flush(struct i915_request *rq, u32 mode)
+static int mi_flush_dw(struct i915_request *rq, u32 flags)
{
u32 cmd, *cs;
@@ -1956,7 +1977,8 @@ static int gen6_bsd_ring_flush(struct i915_request *rq, u32 mode)
cmd = MI_FLUSH_DW;
- /* We always require a command barrier so that subsequent
+ /*
+ * We always require a command barrier so that subsequent
* commands, such as breadcrumb interrupts, are strictly ordered
* wrt the contents of the write cache being flushed to memory
* (and thus being coherent from the CPU).
@@ -1964,22 +1986,33 @@ static int gen6_bsd_ring_flush(struct i915_request *rq, u32 mode)
cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
/*
- * Bspec vol 1c.5 - video engine command streamer:
+ * Bspec vol 1c.3 - blitter engine command streamer:
* "If ENABLED, all TLBs will be invalidated once the flush
* operation is complete. This bit is only valid when the
* Post-Sync Operation field is a value of 1h or 3h."
*/
- if (mode & EMIT_INVALIDATE)
- cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
+ cmd |= flags;
*cs++ = cmd;
*cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
*cs++ = 0;
*cs++ = MI_NOOP;
+
intel_ring_advance(rq, cs);
+
return 0;
}
+static int gen6_flush_dw(struct i915_request *rq, u32 mode, u32 invflags)
+{
+ return mi_flush_dw(rq, mode & EMIT_INVALIDATE ? invflags : 0);
+}
+
+static int gen6_bsd_ring_flush(struct i915_request *rq, u32 mode)
+{
+ return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB | MI_INVALIDATE_BSD);
+}
+
static int
hsw_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
@@ -1992,9 +2025,7 @@ hsw_emit_bb_start(struct i915_request *rq,
return PTR_ERR(cs);
*cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
- 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
- (dispatch_flags & I915_DISPATCH_RS ?
- MI_BATCH_RESOURCE_STREAMER : 0);
+ 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW);
/* bit0-7 is the length on GEN6+ */
*cs++ = offset;
intel_ring_advance(rq, cs);
@@ -2026,36 +2057,7 @@ gen6_emit_bb_start(struct i915_request *rq,
static int gen6_ring_flush(struct i915_request *rq, u32 mode)
{
- u32 cmd, *cs;
-
- cs = intel_ring_begin(rq, 4);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- cmd = MI_FLUSH_DW;
-
- /* We always require a command barrier so that subsequent
- * commands, such as breadcrumb interrupts, are strictly ordered
- * wrt the contents of the write cache being flushed to memory
- * (and thus being coherent from the CPU).
- */
- cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
-
- /*
- * Bspec vol 1c.3 - blitter engine command streamer:
- * "If ENABLED, all TLBs will be invalidated once the flush
- * operation is complete. This bit is only valid when the
- * Post-Sync Operation field is a value of 1h or 3h."
- */
- if (mode & EMIT_INVALIDATE)
- cmd |= MI_INVALIDATE_TLB;
- *cs++ = cmd;
- *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
- *cs++ = 0;
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
-
- return 0;
+ return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB);
}
static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index f5ffa6d31e82..2dfa585712c2 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -2,6 +2,8 @@
#ifndef _INTEL_RINGBUFFER_H_
#define _INTEL_RINGBUFFER_H_
+#include <drm/drm_util.h>
+
#include <linux/hashtable.h>
#include <linux/seqlock.h>
@@ -474,7 +476,6 @@ struct intel_engine_cs {
unsigned int dispatch_flags);
#define I915_DISPATCH_SECURE BIT(0)
#define I915_DISPATCH_PINNED BIT(1)
-#define I915_DISPATCH_RS BIT(2)
void (*emit_breadcrumb)(struct i915_request *rq, u32 *cs);
int emit_breadcrumb_sz;
@@ -797,9 +798,7 @@ struct intel_ring *
intel_engine_create_ring(struct intel_engine_cs *engine,
struct i915_timeline *timeline,
int size);
-int intel_ring_pin(struct intel_ring *ring,
- struct drm_i915_private *i915,
- unsigned int offset_bias);
+int intel_ring_pin(struct intel_ring *ring);
void intel_ring_reset(struct intel_ring *ring, u32 tail);
unsigned int intel_ring_update_space(struct intel_ring *ring);
void intel_ring_unpin(struct intel_ring *ring);
@@ -909,18 +908,15 @@ int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
int intel_engine_stop_cs(struct intel_engine_cs *engine);
+void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine);
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
-static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
-{
- return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
-}
-
static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
{
- /* We are only peeking at the tail of the submit queue (and not the
+ /*
+ * We are only peeking at the tail of the submit queue (and not the
* queue itself) in order to gain a hint as to the current active
* state of the engine. Callers are not expected to be taking
* engine->timeline->lock, nor are they expected to be concerned
@@ -930,6 +926,31 @@ static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
return READ_ONCE(engine->timeline.seqno);
}
+static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
+{
+ return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
+}
+
+static inline bool intel_engine_signaled(struct intel_engine_cs *engine,
+ u32 seqno)
+{
+ return i915_seqno_passed(intel_engine_get_seqno(engine), seqno);
+}
+
+static inline bool intel_engine_has_completed(struct intel_engine_cs *engine,
+ u32 seqno)
+{
+ GEM_BUG_ON(!seqno);
+ return intel_engine_signaled(engine, seqno);
+}
+
+static inline bool intel_engine_has_started(struct intel_engine_cs *engine,
+ u32 seqno)
+{
+ GEM_BUG_ON(!seqno);
+ return intel_engine_signaled(engine, seqno - 1);
+}
+
void intel_engine_get_instdone(struct intel_engine_cs *engine,
struct intel_instdone *instdone);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 6b5aa3b074ec..480dadb1047b 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -52,10 +52,6 @@
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
enum i915_power_well_id power_well_id);
-static struct i915_power_well *
-lookup_power_well(struct drm_i915_private *dev_priv,
- enum i915_power_well_id power_well_id);
-
const char *
intel_display_power_domain_str(enum intel_display_power_domain domain)
{
@@ -159,17 +155,17 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
static void intel_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- DRM_DEBUG_KMS("enabling %s\n", power_well->name);
- power_well->ops->enable(dev_priv, power_well);
+ DRM_DEBUG_KMS("enabling %s\n", power_well->desc->name);
+ power_well->desc->ops->enable(dev_priv, power_well);
power_well->hw_enabled = true;
}
static void intel_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- DRM_DEBUG_KMS("disabling %s\n", power_well->name);
+ DRM_DEBUG_KMS("disabling %s\n", power_well->desc->name);
power_well->hw_enabled = false;
- power_well->ops->disable(dev_priv, power_well);
+ power_well->desc->ops->disable(dev_priv, power_well);
}
static void intel_power_well_get(struct drm_i915_private *dev_priv,
@@ -183,7 +179,7 @@ static void intel_power_well_put(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
WARN(!power_well->count, "Use count on power well %s is already zero",
- power_well->name);
+ power_well->desc->name);
if (!--power_well->count)
intel_power_well_disable(dev_priv, power_well);
@@ -213,7 +209,7 @@ bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
is_enabled = true;
for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain)) {
- if (power_well->always_on)
+ if (power_well->desc->always_on)
continue;
if (!power_well->hw_enabled) {
@@ -257,30 +253,6 @@ bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
return ret;
}
-/**
- * intel_display_set_init_power - set the initial power domain state
- * @dev_priv: i915 device instance
- * @enable: whether to enable or disable the initial power domain state
- *
- * For simplicity our driver load/unload and system suspend/resume code assumes
- * that all power domains are always enabled. This functions controls the state
- * of this little hack. While the initial power domain state is enabled runtime
- * pm is effectively disabled.
- */
-void intel_display_set_init_power(struct drm_i915_private *dev_priv,
- bool enable)
-{
- if (dev_priv->power_domains.init_power_on == enable)
- return;
-
- if (enable)
- intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
- else
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
-
- dev_priv->power_domains.init_power_on = enable;
-}
-
/*
* Starting with Haswell, we have a "Power Down Well" that can be turned off
* when not needed anymore. We have 4 registers that can request the power well
@@ -323,26 +295,29 @@ static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- enum i915_power_well_id id = power_well->id;
+ const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+ int pw_idx = power_well->desc->hsw.idx;
/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
WARN_ON(intel_wait_for_register(dev_priv,
- HSW_PWR_WELL_CTL_DRIVER(id),
- HSW_PWR_WELL_CTL_STATE(id),
- HSW_PWR_WELL_CTL_STATE(id),
+ regs->driver,
+ HSW_PWR_WELL_CTL_STATE(pw_idx),
+ HSW_PWR_WELL_CTL_STATE(pw_idx),
1));
}
static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
- enum i915_power_well_id id)
+ const struct i915_power_well_regs *regs,
+ int pw_idx)
{
- u32 req_mask = HSW_PWR_WELL_CTL_REQ(id);
+ u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
u32 ret;
- ret = I915_READ(HSW_PWR_WELL_CTL_BIOS(id)) & req_mask ? 1 : 0;
- ret |= I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) & req_mask ? 2 : 0;
- ret |= I915_READ(HSW_PWR_WELL_CTL_KVMR) & req_mask ? 4 : 0;
- ret |= I915_READ(HSW_PWR_WELL_CTL_DEBUG(id)) & req_mask ? 8 : 0;
+ ret = I915_READ(regs->bios) & req_mask ? 1 : 0;
+ ret |= I915_READ(regs->driver) & req_mask ? 2 : 0;
+ if (regs->kvmr.reg)
+ ret |= I915_READ(regs->kvmr) & req_mask ? 4 : 0;
+ ret |= I915_READ(regs->debug) & req_mask ? 8 : 0;
return ret;
}
@@ -350,7 +325,8 @@ static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- enum i915_power_well_id id = power_well->id;
+ const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+ int pw_idx = power_well->desc->hsw.idx;
bool disabled;
u32 reqs;
@@ -363,14 +339,14 @@ static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
* Skip the wait in case any of the request bits are set and print a
* diagnostic message.
*/
- wait_for((disabled = !(I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) &
- HSW_PWR_WELL_CTL_STATE(id))) ||
- (reqs = hsw_power_well_requesters(dev_priv, id)), 1);
+ wait_for((disabled = !(I915_READ(regs->driver) &
+ HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
+ (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
if (disabled)
return;
DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
- power_well->name,
+ power_well->desc->name,
!!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
}
@@ -386,14 +362,15 @@ static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- enum i915_power_well_id id = power_well->id;
- bool wait_fuses = power_well->hsw.has_fuses;
+ const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+ int pw_idx = power_well->desc->hsw.idx;
+ bool wait_fuses = power_well->desc->hsw.has_fuses;
enum skl_power_gate uninitialized_var(pg);
u32 val;
if (wait_fuses) {
- pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_TO_PG(id) :
- SKL_PW_TO_PG(id);
+ pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
+ SKL_PW_CTL_IDX_TO_PG(pw_idx);
/*
* For PW1 we have to wait both for the PW0/PG0 fuse state
* before enabling the power well and PW1/PG1's own fuse
@@ -405,52 +382,55 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
}
- val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
- I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), val | HSW_PWR_WELL_CTL_REQ(id));
+ val = I915_READ(regs->driver);
+ I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
hsw_wait_for_power_well_enable(dev_priv, power_well);
/* Display WA #1178: cnl */
if (IS_CANNONLAKE(dev_priv) &&
- (id == CNL_DISP_PW_AUX_B || id == CNL_DISP_PW_AUX_C ||
- id == CNL_DISP_PW_AUX_D || id == CNL_DISP_PW_AUX_F)) {
- val = I915_READ(CNL_AUX_ANAOVRD1(id));
+ pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
+ pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
+ val = I915_READ(CNL_AUX_ANAOVRD1(pw_idx));
val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
- I915_WRITE(CNL_AUX_ANAOVRD1(id), val);
+ I915_WRITE(CNL_AUX_ANAOVRD1(pw_idx), val);
}
if (wait_fuses)
gen9_wait_for_power_well_fuses(dev_priv, pg);
- hsw_power_well_post_enable(dev_priv, power_well->hsw.irq_pipe_mask,
- power_well->hsw.has_vga);
+ hsw_power_well_post_enable(dev_priv,
+ power_well->desc->hsw.irq_pipe_mask,
+ power_well->desc->hsw.has_vga);
}
static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- enum i915_power_well_id id = power_well->id;
+ const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+ int pw_idx = power_well->desc->hsw.idx;
u32 val;
- hsw_power_well_pre_disable(dev_priv, power_well->hsw.irq_pipe_mask);
+ hsw_power_well_pre_disable(dev_priv,
+ power_well->desc->hsw.irq_pipe_mask);
- val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
- I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id),
- val & ~HSW_PWR_WELL_CTL_REQ(id));
+ val = I915_READ(regs->driver);
+ I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
hsw_wait_for_power_well_disable(dev_priv, power_well);
}
-#define ICL_AUX_PW_TO_PORT(pw) ((pw) - ICL_DISP_PW_AUX_A)
+#define ICL_AUX_PW_TO_PORT(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
static void
icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- enum i915_power_well_id id = power_well->id;
- enum port port = ICL_AUX_PW_TO_PORT(id);
+ const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+ int pw_idx = power_well->desc->hsw.idx;
+ enum port port = ICL_AUX_PW_TO_PORT(pw_idx);
u32 val;
- val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
- I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), val | HSW_PWR_WELL_CTL_REQ(id));
+ val = I915_READ(regs->driver);
+ I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
val = I915_READ(ICL_PORT_CL_DW12(port));
I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX);
@@ -462,16 +442,16 @@ static void
icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- enum i915_power_well_id id = power_well->id;
- enum port port = ICL_AUX_PW_TO_PORT(id);
+ const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+ int pw_idx = power_well->desc->hsw.idx;
+ enum port port = ICL_AUX_PW_TO_PORT(pw_idx);
u32 val;
val = I915_READ(ICL_PORT_CL_DW12(port));
I915_WRITE(ICL_PORT_CL_DW12(port), val & ~ICL_LANE_ENABLE_AUX);
- val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
- I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id),
- val & ~HSW_PWR_WELL_CTL_REQ(id));
+ val = I915_READ(regs->driver);
+ I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
hsw_wait_for_power_well_disable(dev_priv, power_well);
}
@@ -484,22 +464,22 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- enum i915_power_well_id id = power_well->id;
- u32 mask = HSW_PWR_WELL_CTL_REQ(id) | HSW_PWR_WELL_CTL_STATE(id);
+ const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+ int pw_idx = power_well->desc->hsw.idx;
+ u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
+ HSW_PWR_WELL_CTL_STATE(pw_idx);
- return (I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) & mask) == mask;
+ return (I915_READ(regs->driver) & mask) == mask;
}
static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
{
- enum i915_power_well_id id = SKL_DISP_PW_2;
-
WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
"DC9 already programmed to be enabled.\n");
WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
"DC5 still not disabled to enable DC9.\n");
- WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) &
- HSW_PWR_WELL_CTL_REQ(id),
+ WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL2) &
+ HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
"Power well 2 on.\n");
WARN_ONCE(intel_irqs_enabled(dev_priv),
"Interrupts not disabled yet.\n");
@@ -668,6 +648,27 @@ static void assert_csr_loaded(struct drm_i915_private *dev_priv)
WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
}
+static struct i915_power_well *
+lookup_power_well(struct drm_i915_private *dev_priv,
+ enum i915_power_well_id power_well_id)
+{
+ struct i915_power_well *power_well;
+
+ for_each_power_well(dev_priv, power_well)
+ if (power_well->desc->id == power_well_id)
+ return power_well;
+
+ /*
+ * It's not feasible to add error checking code to the callers since
+ * this condition really shouldn't happen and it doesn't even make sense
+ * to abort things like display initialization sequences. Just return
+ * the first power well and hope the WARN gets reported so we can fix
+ * our driver.
+ */
+ WARN(1, "Power well %d not defined for this platform\n", power_well_id);
+ return &dev_priv->power_domains.power_wells[0];
+}
+
static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
{
bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
@@ -723,54 +724,57 @@ static void skl_enable_dc6(struct drm_i915_private *dev_priv)
static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- enum i915_power_well_id id = power_well->id;
- u32 mask = HSW_PWR_WELL_CTL_REQ(id);
- u32 bios_req = I915_READ(HSW_PWR_WELL_CTL_BIOS(id));
+ const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+ int pw_idx = power_well->desc->hsw.idx;
+ u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
+ u32 bios_req = I915_READ(regs->bios);
/* Take over the request bit if set by BIOS. */
if (bios_req & mask) {
- u32 drv_req = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
+ u32 drv_req = I915_READ(regs->driver);
if (!(drv_req & mask))
- I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), drv_req | mask);
- I915_WRITE(HSW_PWR_WELL_CTL_BIOS(id), bios_req & ~mask);
+ I915_WRITE(regs->driver, drv_req | mask);
+ I915_WRITE(regs->bios, bios_req & ~mask);
}
}
static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- bxt_ddi_phy_init(dev_priv, power_well->bxt.phy);
+ bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy);
}
static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- bxt_ddi_phy_uninit(dev_priv, power_well->bxt.phy);
+ bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy);
}
static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- return bxt_ddi_phy_is_enabled(dev_priv, power_well->bxt.phy);
+ return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy);
}
static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
{
struct i915_power_well *power_well;
- power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A);
+ power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
if (power_well->count > 0)
- bxt_ddi_phy_verify_state(dev_priv, power_well->bxt.phy);
+ bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
- power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC);
+ power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
if (power_well->count > 0)
- bxt_ddi_phy_verify_state(dev_priv, power_well->bxt.phy);
+ bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
if (IS_GEMINILAKE(dev_priv)) {
- power_well = lookup_power_well(dev_priv, GLK_DPIO_CMN_C);
+ power_well = lookup_power_well(dev_priv,
+ GLK_DISP_PW_DPIO_CMN_C);
if (power_well->count > 0)
- bxt_ddi_phy_verify_state(dev_priv, power_well->bxt.phy);
+ bxt_ddi_phy_verify_state(dev_priv,
+ power_well->desc->bxt.phy);
}
}
@@ -869,14 +873,14 @@ static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
static void vlv_set_power_well(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well, bool enable)
{
- enum i915_power_well_id power_well_id = power_well->id;
+ int pw_idx = power_well->desc->vlv.idx;
u32 mask;
u32 state;
u32 ctrl;
- mask = PUNIT_PWRGT_MASK(power_well_id);
- state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
- PUNIT_PWRGT_PWR_GATE(power_well_id);
+ mask = PUNIT_PWRGT_MASK(pw_idx);
+ state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
+ PUNIT_PWRGT_PWR_GATE(pw_idx);
mutex_lock(&dev_priv->pcu_lock);
@@ -917,14 +921,14 @@ static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- enum i915_power_well_id power_well_id = power_well->id;
+ int pw_idx = power_well->desc->vlv.idx;
bool enabled = false;
u32 mask;
u32 state;
u32 ctrl;
- mask = PUNIT_PWRGT_MASK(power_well_id);
- ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
+ mask = PUNIT_PWRGT_MASK(pw_idx);
+ ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
mutex_lock(&dev_priv->pcu_lock);
@@ -933,8 +937,8 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
* We only ever set the power-on and power-gate states, anything
* else is unexpected.
*/
- WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
- state != PUNIT_PWRGT_PWR_GATE(power_well_id));
+ WARN_ON(state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
+ state != PUNIT_PWRGT_PWR_GATE(pw_idx));
if (state == ctrl)
enabled = true;
@@ -1045,8 +1049,6 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
-
vlv_set_power_well(dev_priv, power_well, true);
vlv_display_power_well_init(dev_priv);
@@ -1055,8 +1057,6 @@ static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
-
vlv_display_power_well_deinit(dev_priv);
vlv_set_power_well(dev_priv, power_well, false);
@@ -1065,8 +1065,6 @@ static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
-
/* since ref/cri clock was enabled */
udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
@@ -1091,8 +1089,6 @@ static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
{
enum pipe pipe;
- WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
-
for_each_pipe(dev_priv, pipe)
assert_pll_disabled(dev_priv, pipe);
@@ -1104,32 +1100,14 @@ static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
#define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
-static struct i915_power_well *
-lookup_power_well(struct drm_i915_private *dev_priv,
- enum i915_power_well_id power_well_id)
-{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- int i;
-
- for (i = 0; i < power_domains->power_well_count; i++) {
- struct i915_power_well *power_well;
-
- power_well = &power_domains->power_wells[i];
- if (power_well->id == power_well_id)
- return power_well;
- }
-
- return NULL;
-}
-
#define BITS_SET(val, bits) (((val) & (bits)) == (bits))
static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
{
struct i915_power_well *cmn_bc =
- lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
+ lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
struct i915_power_well *cmn_d =
- lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
+ lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
u32 phy_control = dev_priv->chv_phy_control;
u32 phy_status = 0;
u32 phy_status_mask = 0xffffffff;
@@ -1154,7 +1132,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
- if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
+ if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
phy_status |= PHY_POWERGOOD(DPIO_PHY0);
/* this assumes override is only used to enable lanes */
@@ -1195,7 +1173,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
}
- if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
+ if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
phy_status |= PHY_POWERGOOD(DPIO_PHY1);
/* this assumes override is only used to enable lanes */
@@ -1239,10 +1217,10 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
enum pipe pipe;
uint32_t tmp;
- WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
- power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
+ WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
+ power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
- if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
+ if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
pipe = PIPE_A;
phy = DPIO_PHY0;
} else {
@@ -1270,7 +1248,7 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
- if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
+ if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
tmp |= DPIO_DYNPWRDOWNEN_CH1;
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
@@ -1301,10 +1279,10 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
{
enum dpio_phy phy;
- WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
- power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
+ WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
+ power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
- if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
+ if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
phy = DPIO_PHY0;
assert_pll_disabled(dev_priv, PIPE_A);
assert_pll_disabled(dev_priv, PIPE_B);
@@ -1516,8 +1494,6 @@ out:
static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- WARN_ON_ONCE(power_well->id != CHV_DISP_PW_PIPE_A);
-
chv_set_pipe_power_well(dev_priv, power_well, true);
vlv_display_power_well_init(dev_priv);
@@ -1526,8 +1502,6 @@ static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- WARN_ON_ONCE(power_well->id != CHV_DISP_PW_PIPE_A);
-
vlv_display_power_well_deinit(dev_priv);
chv_set_pipe_power_well(dev_priv, power_well, false);
@@ -2063,13 +2037,13 @@ static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
.is_enabled = vlv_power_well_enabled,
};
-static struct i915_power_well i9xx_always_on_power_well[] = {
+static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
{
.name = "always-on",
.always_on = 1,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
- .id = I915_DISP_PW_ALWAYS_ON,
+ .id = DISP_PW_ID_NONE,
},
};
@@ -2080,19 +2054,19 @@ static const struct i915_power_well_ops i830_pipes_power_well_ops = {
.is_enabled = i830_pipes_power_well_enabled,
};
-static struct i915_power_well i830_power_wells[] = {
+static const struct i915_power_well_desc i830_power_wells[] = {
{
.name = "always-on",
.always_on = 1,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
- .id = I915_DISP_PW_ALWAYS_ON,
+ .id = DISP_PW_ID_NONE,
},
{
.name = "pipes",
.domains = I830_PIPES_POWER_DOMAINS,
.ops = &i830_pipes_power_well_ops,
- .id = I830_DISP_PW_PIPES,
+ .id = DISP_PW_ID_NONE,
},
};
@@ -2117,13 +2091,20 @@ static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
.is_enabled = bxt_dpio_cmn_power_well_enabled,
};
-static struct i915_power_well hsw_power_wells[] = {
+static const struct i915_power_well_regs hsw_power_well_regs = {
+ .bios = HSW_PWR_WELL_CTL1,
+ .driver = HSW_PWR_WELL_CTL2,
+ .kvmr = HSW_PWR_WELL_CTL3,
+ .debug = HSW_PWR_WELL_CTL4,
+};
+
+static const struct i915_power_well_desc hsw_power_wells[] = {
{
.name = "always-on",
.always_on = 1,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
- .id = I915_DISP_PW_ALWAYS_ON,
+ .id = DISP_PW_ID_NONE,
},
{
.name = "display",
@@ -2131,18 +2112,20 @@ static struct i915_power_well hsw_power_wells[] = {
.ops = &hsw_power_well_ops,
.id = HSW_DISP_PW_GLOBAL,
{
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
.hsw.has_vga = true,
},
},
};
-static struct i915_power_well bdw_power_wells[] = {
+static const struct i915_power_well_desc bdw_power_wells[] = {
{
.name = "always-on",
.always_on = 1,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
- .id = I915_DISP_PW_ALWAYS_ON,
+ .id = DISP_PW_ID_NONE,
},
{
.name = "display",
@@ -2150,6 +2133,8 @@ static struct i915_power_well bdw_power_wells[] = {
.ops = &hsw_power_well_ops,
.id = HSW_DISP_PW_GLOBAL,
{
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
.hsw.has_vga = true,
},
@@ -2177,19 +2162,22 @@ static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
.is_enabled = vlv_power_well_enabled,
};
-static struct i915_power_well vlv_power_wells[] = {
+static const struct i915_power_well_desc vlv_power_wells[] = {
{
.name = "always-on",
.always_on = 1,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
- .id = I915_DISP_PW_ALWAYS_ON,
+ .id = DISP_PW_ID_NONE,
},
{
.name = "display",
.domains = VLV_DISPLAY_POWER_DOMAINS,
- .id = PUNIT_POWER_WELL_DISP2D,
.ops = &vlv_display_power_well_ops,
+ .id = VLV_DISP_PW_DISP2D,
+ {
+ .vlv.idx = PUNIT_PWGT_IDX_DISP2D,
+ },
},
{
.name = "dpio-tx-b-01",
@@ -2198,7 +2186,10 @@ static struct i915_power_well vlv_power_wells[] = {
VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
- .id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
+ .id = DISP_PW_ID_NONE,
+ {
+ .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01,
+ },
},
{
.name = "dpio-tx-b-23",
@@ -2207,7 +2198,10 @@ static struct i915_power_well vlv_power_wells[] = {
VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
- .id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
+ .id = DISP_PW_ID_NONE,
+ {
+ .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23,
+ },
},
{
.name = "dpio-tx-c-01",
@@ -2216,7 +2210,10 @@ static struct i915_power_well vlv_power_wells[] = {
VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
- .id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
+ .id = DISP_PW_ID_NONE,
+ {
+ .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01,
+ },
},
{
.name = "dpio-tx-c-23",
@@ -2225,23 +2222,29 @@ static struct i915_power_well vlv_power_wells[] = {
VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
- .id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
+ .id = DISP_PW_ID_NONE,
+ {
+ .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23,
+ },
},
{
.name = "dpio-common",
.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
- .id = PUNIT_POWER_WELL_DPIO_CMN_BC,
.ops = &vlv_dpio_cmn_power_well_ops,
+ .id = VLV_DISP_PW_DPIO_CMN_BC,
+ {
+ .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
+ },
},
};
-static struct i915_power_well chv_power_wells[] = {
+static const struct i915_power_well_desc chv_power_wells[] = {
{
.name = "always-on",
.always_on = 1,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
- .id = I915_DISP_PW_ALWAYS_ON,
+ .id = DISP_PW_ID_NONE,
},
{
.name = "display",
@@ -2251,20 +2254,26 @@ static struct i915_power_well chv_power_wells[] = {
* required for any pipe to work.
*/
.domains = CHV_DISPLAY_POWER_DOMAINS,
- .id = CHV_DISP_PW_PIPE_A,
.ops = &chv_pipe_power_well_ops,
+ .id = DISP_PW_ID_NONE,
},
{
.name = "dpio-common-bc",
.domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
- .id = PUNIT_POWER_WELL_DPIO_CMN_BC,
.ops = &chv_dpio_cmn_power_well_ops,
+ .id = VLV_DISP_PW_DPIO_CMN_BC,
+ {
+ .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
+ },
},
{
.name = "dpio-common-d",
.domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
- .id = PUNIT_POWER_WELL_DPIO_CMN_D,
.ops = &chv_dpio_cmn_power_well_ops,
+ .id = CHV_DISP_PW_DPIO_CMN_D,
+ {
+ .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D,
+ },
},
};
@@ -2275,18 +2284,18 @@ bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
bool ret;
power_well = lookup_power_well(dev_priv, power_well_id);
- ret = power_well->ops->is_enabled(dev_priv, power_well);
+ ret = power_well->desc->ops->is_enabled(dev_priv, power_well);
return ret;
}
-static struct i915_power_well skl_power_wells[] = {
+static const struct i915_power_well_desc skl_power_wells[] = {
{
.name = "always-on",
.always_on = 1,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
- .id = I915_DISP_PW_ALWAYS_ON,
+ .id = DISP_PW_ID_NONE,
},
{
.name = "power well 1",
@@ -2295,6 +2304,8 @@ static struct i915_power_well skl_power_wells[] = {
.ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_1,
{
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_PW_1,
.hsw.has_fuses = true,
},
},
@@ -2304,12 +2315,16 @@ static struct i915_power_well skl_power_wells[] = {
.domains = 0,
.ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_MISC_IO,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_MISC_IO,
+ },
},
{
.name = "DC off",
.domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
- .id = SKL_DISP_PW_DC_OFF,
+ .id = DISP_PW_ID_NONE,
},
{
.name = "power well 2",
@@ -2317,6 +2332,8 @@ static struct i915_power_well skl_power_wells[] = {
.ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_2,
{
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_PW_2,
.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
.hsw.has_vga = true,
.hsw.has_fuses = true,
@@ -2326,35 +2343,51 @@ static struct i915_power_well skl_power_wells[] = {
.name = "DDI A/E IO power well",
.domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_DDI_A_E,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E,
+ },
},
{
.name = "DDI B IO power well",
.domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_DDI_B,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
+ },
},
{
.name = "DDI C IO power well",
.domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_DDI_C,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
+ },
},
{
.name = "DDI D IO power well",
.domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_DDI_D,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
+ },
},
};
-static struct i915_power_well bxt_power_wells[] = {
+static const struct i915_power_well_desc bxt_power_wells[] = {
{
.name = "always-on",
.always_on = 1,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
- .id = I915_DISP_PW_ALWAYS_ON,
+ .id = DISP_PW_ID_NONE,
},
{
.name = "power well 1",
@@ -2362,6 +2395,8 @@ static struct i915_power_well bxt_power_wells[] = {
.ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_1,
{
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_PW_1,
.hsw.has_fuses = true,
},
},
@@ -2369,7 +2404,7 @@ static struct i915_power_well bxt_power_wells[] = {
.name = "DC off",
.domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
- .id = SKL_DISP_PW_DC_OFF,
+ .id = DISP_PW_ID_NONE,
},
{
.name = "power well 2",
@@ -2377,6 +2412,8 @@ static struct i915_power_well bxt_power_wells[] = {
.ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_2,
{
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_PW_2,
.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
.hsw.has_vga = true,
.hsw.has_fuses = true,
@@ -2386,7 +2423,7 @@ static struct i915_power_well bxt_power_wells[] = {
.name = "dpio-common-a",
.domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
.ops = &bxt_dpio_cmn_power_well_ops,
- .id = BXT_DPIO_CMN_A,
+ .id = BXT_DISP_PW_DPIO_CMN_A,
{
.bxt.phy = DPIO_PHY1,
},
@@ -2395,20 +2432,20 @@ static struct i915_power_well bxt_power_wells[] = {
.name = "dpio-common-bc",
.domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
.ops = &bxt_dpio_cmn_power_well_ops,
- .id = BXT_DPIO_CMN_BC,
+ .id = VLV_DISP_PW_DPIO_CMN_BC,
{
.bxt.phy = DPIO_PHY0,
},
},
};
-static struct i915_power_well glk_power_wells[] = {
+static const struct i915_power_well_desc glk_power_wells[] = {
{
.name = "always-on",
.always_on = 1,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
- .id = I915_DISP_PW_ALWAYS_ON,
+ .id = DISP_PW_ID_NONE,
},
{
.name = "power well 1",
@@ -2417,6 +2454,8 @@ static struct i915_power_well glk_power_wells[] = {
.ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_1,
{
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_PW_1,
.hsw.has_fuses = true,
},
},
@@ -2424,7 +2463,7 @@ static struct i915_power_well glk_power_wells[] = {
.name = "DC off",
.domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
- .id = SKL_DISP_PW_DC_OFF,
+ .id = DISP_PW_ID_NONE,
},
{
.name = "power well 2",
@@ -2432,6 +2471,8 @@ static struct i915_power_well glk_power_wells[] = {
.ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_2,
{
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_PW_2,
.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
.hsw.has_vga = true,
.hsw.has_fuses = true,
@@ -2441,7 +2482,7 @@ static struct i915_power_well glk_power_wells[] = {
.name = "dpio-common-a",
.domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
.ops = &bxt_dpio_cmn_power_well_ops,
- .id = BXT_DPIO_CMN_A,
+ .id = BXT_DISP_PW_DPIO_CMN_A,
{
.bxt.phy = DPIO_PHY1,
},
@@ -2450,7 +2491,7 @@ static struct i915_power_well glk_power_wells[] = {
.name = "dpio-common-b",
.domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
.ops = &bxt_dpio_cmn_power_well_ops,
- .id = BXT_DPIO_CMN_BC,
+ .id = VLV_DISP_PW_DPIO_CMN_BC,
{
.bxt.phy = DPIO_PHY0,
},
@@ -2459,7 +2500,7 @@ static struct i915_power_well glk_power_wells[] = {
.name = "dpio-common-c",
.domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
.ops = &bxt_dpio_cmn_power_well_ops,
- .id = GLK_DPIO_CMN_C,
+ .id = GLK_DISP_PW_DPIO_CMN_C,
{
.bxt.phy = DPIO_PHY2,
},
@@ -2468,47 +2509,71 @@ static struct i915_power_well glk_power_wells[] = {
.name = "AUX A",
.domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = GLK_DISP_PW_AUX_A,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
+ },
},
{
.name = "AUX B",
.domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = GLK_DISP_PW_AUX_B,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
+ },
},
{
.name = "AUX C",
.domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = GLK_DISP_PW_AUX_C,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
+ },
},
{
.name = "DDI A IO power well",
.domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = GLK_DISP_PW_DDI_A,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
+ },
},
{
.name = "DDI B IO power well",
.domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_DDI_B,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
+ },
},
{
.name = "DDI C IO power well",
.domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_DDI_C,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
+ },
},
};
-static struct i915_power_well cnl_power_wells[] = {
+static const struct i915_power_well_desc cnl_power_wells[] = {
{
.name = "always-on",
.always_on = 1,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
- .id = I915_DISP_PW_ALWAYS_ON,
+ .id = DISP_PW_ID_NONE,
},
{
.name = "power well 1",
@@ -2517,6 +2582,8 @@ static struct i915_power_well cnl_power_wells[] = {
.ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_1,
{
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_PW_1,
.hsw.has_fuses = true,
},
},
@@ -2524,31 +2591,47 @@ static struct i915_power_well cnl_power_wells[] = {
.name = "AUX A",
.domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = CNL_DISP_PW_AUX_A,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
+ },
},
{
.name = "AUX B",
.domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = CNL_DISP_PW_AUX_B,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
+ },
},
{
.name = "AUX C",
.domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = CNL_DISP_PW_AUX_C,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
+ },
},
{
.name = "AUX D",
.domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = CNL_DISP_PW_AUX_D,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = CNL_PW_CTL_IDX_AUX_D,
+ },
},
{
.name = "DC off",
.domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
- .id = SKL_DISP_PW_DC_OFF,
+ .id = DISP_PW_ID_NONE,
},
{
.name = "power well 2",
@@ -2556,6 +2639,8 @@ static struct i915_power_well cnl_power_wells[] = {
.ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_2,
{
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_PW_2,
.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
.hsw.has_vga = true,
.hsw.has_fuses = true,
@@ -2565,37 +2650,61 @@ static struct i915_power_well cnl_power_wells[] = {
.name = "DDI A IO power well",
.domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = CNL_DISP_PW_DDI_A,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
+ },
},
{
.name = "DDI B IO power well",
.domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_DDI_B,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
+ },
},
{
.name = "DDI C IO power well",
.domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_DDI_C,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
+ },
},
{
.name = "DDI D IO power well",
.domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_DDI_D,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
+ },
},
{
.name = "DDI F IO power well",
.domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = CNL_DISP_PW_DDI_F,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = CNL_PW_CTL_IDX_DDI_F,
+ },
},
{
.name = "AUX F",
.domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = CNL_DISP_PW_AUX_F,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = CNL_PW_CTL_IDX_AUX_F,
+ },
},
};
@@ -2606,147 +2715,239 @@ static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
.is_enabled = hsw_power_well_enabled,
};
-static struct i915_power_well icl_power_wells[] = {
+static const struct i915_power_well_regs icl_aux_power_well_regs = {
+ .bios = ICL_PWR_WELL_CTL_AUX1,
+ .driver = ICL_PWR_WELL_CTL_AUX2,
+ .debug = ICL_PWR_WELL_CTL_AUX4,
+};
+
+static const struct i915_power_well_regs icl_ddi_power_well_regs = {
+ .bios = ICL_PWR_WELL_CTL_DDI1,
+ .driver = ICL_PWR_WELL_CTL_DDI2,
+ .debug = ICL_PWR_WELL_CTL_DDI4,
+};
+
+static const struct i915_power_well_desc icl_power_wells[] = {
{
.name = "always-on",
.always_on = 1,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
- .id = I915_DISP_PW_ALWAYS_ON,
+ .id = DISP_PW_ID_NONE,
},
{
.name = "power well 1",
/* Handled by the DMC firmware */
.domains = 0,
.ops = &hsw_power_well_ops,
- .id = ICL_DISP_PW_1,
- .hsw.has_fuses = true,
+ .id = SKL_DISP_PW_1,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_1,
+ .hsw.has_fuses = true,
+ },
},
{
.name = "power well 2",
.domains = ICL_PW_2_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = ICL_DISP_PW_2,
- .hsw.has_fuses = true,
+ .id = SKL_DISP_PW_2,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_2,
+ .hsw.has_fuses = true,
+ },
},
{
.name = "DC off",
.domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
- .id = SKL_DISP_PW_DC_OFF,
+ .id = DISP_PW_ID_NONE,
},
{
.name = "power well 3",
.domains = ICL_PW_3_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = ICL_DISP_PW_3,
- .hsw.irq_pipe_mask = BIT(PIPE_B),
- .hsw.has_vga = true,
- .hsw.has_fuses = true,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_3,
+ .hsw.irq_pipe_mask = BIT(PIPE_B),
+ .hsw.has_vga = true,
+ .hsw.has_fuses = true,
+ },
},
{
.name = "DDI A IO",
.domains = ICL_DDI_IO_A_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = ICL_DISP_PW_DDI_A,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
+ },
},
{
.name = "DDI B IO",
.domains = ICL_DDI_IO_B_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = ICL_DISP_PW_DDI_B,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
+ },
},
{
.name = "DDI C IO",
.domains = ICL_DDI_IO_C_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = ICL_DISP_PW_DDI_C,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
+ },
},
{
.name = "DDI D IO",
.domains = ICL_DDI_IO_D_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = ICL_DISP_PW_DDI_D,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_DDI_D,
+ },
},
{
.name = "DDI E IO",
.domains = ICL_DDI_IO_E_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = ICL_DISP_PW_DDI_E,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_DDI_E,
+ },
},
{
.name = "DDI F IO",
.domains = ICL_DDI_IO_F_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = ICL_DISP_PW_DDI_F,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_DDI_F,
+ },
},
{
.name = "AUX A",
.domains = ICL_AUX_A_IO_POWER_DOMAINS,
.ops = &icl_combo_phy_aux_power_well_ops,
- .id = ICL_DISP_PW_AUX_A,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
+ },
},
{
.name = "AUX B",
.domains = ICL_AUX_B_IO_POWER_DOMAINS,
.ops = &icl_combo_phy_aux_power_well_ops,
- .id = ICL_DISP_PW_AUX_B,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
+ },
},
{
.name = "AUX C",
.domains = ICL_AUX_C_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = ICL_DISP_PW_AUX_C,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
+ },
},
{
.name = "AUX D",
.domains = ICL_AUX_D_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = ICL_DISP_PW_AUX_D,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_D,
+ },
},
{
.name = "AUX E",
.domains = ICL_AUX_E_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = ICL_DISP_PW_AUX_E,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_E,
+ },
},
{
.name = "AUX F",
.domains = ICL_AUX_F_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = ICL_DISP_PW_AUX_F,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_F,
+ },
},
{
.name = "AUX TBT1",
.domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = ICL_DISP_PW_AUX_TBT1,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
+ },
},
{
.name = "AUX TBT2",
.domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = ICL_DISP_PW_AUX_TBT2,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
+ },
},
{
.name = "AUX TBT3",
.domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = ICL_DISP_PW_AUX_TBT3,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
+ },
},
{
.name = "AUX TBT4",
.domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = ICL_DISP_PW_AUX_TBT4,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
+ },
},
{
.name = "power well 4",
.domains = ICL_PW_4_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = ICL_DISP_PW_4,
- .hsw.has_fuses = true,
- .hsw.irq_pipe_mask = BIT(PIPE_C),
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_4,
+ .hsw.has_fuses = true,
+ .hsw.irq_pipe_mask = BIT(PIPE_C),
+ },
},
};
@@ -2809,26 +3010,41 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
return mask;
}
-static void assert_power_well_ids_unique(struct drm_i915_private *dev_priv)
+static int
+__set_power_wells(struct i915_power_domains *power_domains,
+ const struct i915_power_well_desc *power_well_descs,
+ int power_well_count)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- u64 power_well_ids;
+ u64 power_well_ids = 0;
int i;
- power_well_ids = 0;
- for (i = 0; i < power_domains->power_well_count; i++) {
- enum i915_power_well_id id = power_domains->power_wells[i].id;
+ power_domains->power_well_count = power_well_count;
+ power_domains->power_wells =
+ kcalloc(power_well_count,
+ sizeof(*power_domains->power_wells),
+ GFP_KERNEL);
+ if (!power_domains->power_wells)
+ return -ENOMEM;
+
+ for (i = 0; i < power_well_count; i++) {
+ enum i915_power_well_id id = power_well_descs[i].id;
+
+ power_domains->power_wells[i].desc = &power_well_descs[i];
+
+ if (id == DISP_PW_ID_NONE)
+ continue;
WARN_ON(id >= sizeof(power_well_ids) * 8);
WARN_ON(power_well_ids & BIT_ULL(id));
power_well_ids |= BIT_ULL(id);
}
+
+ return 0;
}
-#define set_power_wells(power_domains, __power_wells) ({ \
- (power_domains)->power_wells = (__power_wells); \
- (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
-})
+#define set_power_wells(power_domains, __power_well_descs) \
+ __set_power_wells(power_domains, __power_well_descs, \
+ ARRAY_SIZE(__power_well_descs))
/**
* intel_power_domains_init - initializes the power domain structures
@@ -2840,6 +3056,7 @@ static void assert_power_well_ids_unique(struct drm_i915_private *dev_priv)
int intel_power_domains_init(struct drm_i915_private *dev_priv)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ int err;
i915_modparams.disable_power_well =
sanitize_disable_power_well_option(dev_priv,
@@ -2856,15 +3073,15 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
* the disabling order is reversed.
*/
if (IS_ICELAKE(dev_priv)) {
- set_power_wells(power_domains, icl_power_wells);
+ err = set_power_wells(power_domains, icl_power_wells);
} else if (IS_HASWELL(dev_priv)) {
- set_power_wells(power_domains, hsw_power_wells);
+ err = set_power_wells(power_domains, hsw_power_wells);
} else if (IS_BROADWELL(dev_priv)) {
- set_power_wells(power_domains, bdw_power_wells);
+ err = set_power_wells(power_domains, bdw_power_wells);
} else if (IS_GEN9_BC(dev_priv)) {
- set_power_wells(power_domains, skl_power_wells);
+ err = set_power_wells(power_domains, skl_power_wells);
} else if (IS_CANNONLAKE(dev_priv)) {
- set_power_wells(power_domains, cnl_power_wells);
+ err = set_power_wells(power_domains, cnl_power_wells);
/*
* DDI and Aux IO are getting enabled for all ports
@@ -2876,57 +3093,31 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
power_domains->power_well_count -= 2;
} else if (IS_BROXTON(dev_priv)) {
- set_power_wells(power_domains, bxt_power_wells);
+ err = set_power_wells(power_domains, bxt_power_wells);
} else if (IS_GEMINILAKE(dev_priv)) {
- set_power_wells(power_domains, glk_power_wells);
+ err = set_power_wells(power_domains, glk_power_wells);
} else if (IS_CHERRYVIEW(dev_priv)) {
- set_power_wells(power_domains, chv_power_wells);
+ err = set_power_wells(power_domains, chv_power_wells);
} else if (IS_VALLEYVIEW(dev_priv)) {
- set_power_wells(power_domains, vlv_power_wells);
+ err = set_power_wells(power_domains, vlv_power_wells);
} else if (IS_I830(dev_priv)) {
- set_power_wells(power_domains, i830_power_wells);
+ err = set_power_wells(power_domains, i830_power_wells);
} else {
- set_power_wells(power_domains, i9xx_always_on_power_well);
+ err = set_power_wells(power_domains, i9xx_always_on_power_well);
}
- assert_power_well_ids_unique(dev_priv);
-
- return 0;
+ return err;
}
/**
- * intel_power_domains_fini - finalizes the power domain structures
+ * intel_power_domains_cleanup - clean up power domains resources
* @dev_priv: i915 device instance
*
- * Finalizes the power domain structures for @dev_priv depending upon the
- * supported platform. This function also disables runtime pm and ensures that
- * the device stays powered up so that the driver can be reloaded.
+ * Release any resources acquired by intel_power_domains_init()
*/
-void intel_power_domains_fini(struct drm_i915_private *dev_priv)
+void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
{
- struct device *kdev = &dev_priv->drm.pdev->dev;
-
- /*
- * The i915.ko module is still not prepared to be loaded when
- * the power well is not enabled, so just enable it in case
- * we're going to unload/reload.
- * The following also reacquires the RPM reference the core passed
- * to the driver during loading, which is dropped in
- * intel_runtime_pm_enable(). We have to hand back the control of the
- * device to the core with this reference held.
- */
- intel_display_set_init_power(dev_priv, true);
-
- /* Remove the refcount we took to keep power well support disabled. */
- if (!i915_modparams.disable_power_well)
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
-
- /*
- * Remove the refcount we took in intel_runtime_pm_enable() in case
- * the platform doesn't support runtime PM.
- */
- if (!HAS_RUNTIME_PM(dev_priv))
- pm_runtime_put(kdev);
+ kfree(dev_priv->power_domains.power_wells);
}
static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
@@ -2936,9 +3127,9 @@ static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
mutex_lock(&power_domains->lock);
for_each_power_well(dev_priv, power_well) {
- power_well->ops->sync_hw(dev_priv, power_well);
- power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
- power_well);
+ power_well->desc->ops->sync_hw(dev_priv, power_well);
+ power_well->hw_enabled =
+ power_well->desc->ops->is_enabled(dev_priv, power_well);
}
mutex_unlock(&power_domains->lock);
}
@@ -3360,7 +3551,7 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
* The AUX IO power wells will be enabled on demand.
*/
mutex_lock(&power_domains->lock);
- well = lookup_power_well(dev_priv, ICL_DISP_PW_1);
+ well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
intel_power_well_enable(dev_priv, well);
mutex_unlock(&power_domains->lock);
@@ -3372,10 +3563,6 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
/* 7. Setup MBUS. */
icl_mbus_init(dev_priv);
-
- /* 8. CHICKEN_DCPR_1 */
- I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
- CNL_DDI_CLOCK_REG_ACCESS_ON);
}
static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
@@ -3401,7 +3588,7 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
* disabled at this point.
*/
mutex_lock(&power_domains->lock);
- well = lookup_power_well(dev_priv, ICL_DISP_PW_1);
+ well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
intel_power_well_disable(dev_priv, well);
mutex_unlock(&power_domains->lock);
@@ -3416,9 +3603,9 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
static void chv_phy_control_init(struct drm_i915_private *dev_priv)
{
struct i915_power_well *cmn_bc =
- lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
+ lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
struct i915_power_well *cmn_d =
- lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
+ lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
/*
* DISPLAY_PHY_CONTROL can get corrupted if read. As a
@@ -3441,7 +3628,7 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
* override and set the lane powerdown bits accding to the
* current lane status.
*/
- if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
+ if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
uint32_t status = I915_READ(DPLL(PIPE_A));
unsigned int mask;
@@ -3472,7 +3659,7 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
dev_priv->chv_phy_assert[DPIO_PHY0] = true;
}
- if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
+ if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
uint32_t status = I915_READ(DPIO_PHY_STATUS);
unsigned int mask;
@@ -3503,20 +3690,20 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
{
struct i915_power_well *cmn =
- lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
+ lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
struct i915_power_well *disp2d =
- lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
+ lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
/* If the display might be already active skip this */
- if (cmn->ops->is_enabled(dev_priv, cmn) &&
- disp2d->ops->is_enabled(dev_priv, disp2d) &&
+ if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
+ disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
I915_READ(DPIO_CTL) & DPIO_CMNRST)
return;
DRM_DEBUG_KMS("toggling display PHY side reset\n");
/* cmnlane needs DPLL registers */
- disp2d->ops->enable(dev_priv, disp2d);
+ disp2d->desc->ops->enable(dev_priv, disp2d);
/*
* From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
@@ -3525,9 +3712,11 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
* Simply ungating isn't enough to reset the PHY enough to get
* ports and lanes running.
*/
- cmn->ops->disable(dev_priv, cmn);
+ cmn->desc->ops->disable(dev_priv, cmn);
}
+static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
+
/**
* intel_power_domains_init_hw - initialize hardware power domain state
* @dev_priv: i915 device instance
@@ -3535,9 +3724,14 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
*
* This function initializes the hardware power domain state and enables all
* power wells belonging to the INIT power domain. Power wells in other
- * domains (and not in the INIT domain) are referenced or disabled during the
- * modeset state HW readout. After that the reference count of each power well
- * must match its HW enabled state, see intel_power_domains_verify_state().
+ * domains (and not in the INIT domain) are referenced or disabled by
+ * intel_modeset_readout_hw_state(). After that the reference count of each
+ * power well must match its HW enabled state, see
+ * intel_power_domains_verify_state().
+ *
+ * It will return with power domains disabled (to be enabled later by
+ * intel_power_domains_enable()) and must be paired with
+ * intel_power_domains_fini_hw().
*/
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
{
@@ -3563,30 +3757,117 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
mutex_unlock(&power_domains->lock);
}
- /* For now, we need the power well to be always enabled. */
- intel_display_set_init_power(dev_priv, true);
+ /*
+ * Keep all power wells enabled for any dependent HW access during
+ * initialization and to make sure we keep BIOS enabled display HW
+ * resources powered until display HW readout is complete. We drop
+ * this reference in intel_power_domains_enable().
+ */
+ intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
/* Disable power support if the user asked so. */
if (!i915_modparams.disable_power_well)
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
intel_power_domains_sync_hw(dev_priv);
+
power_domains->initializing = false;
}
/**
+ * intel_power_domains_fini_hw - deinitialize hw power domain state
+ * @dev_priv: i915 device instance
+ *
+ * De-initializes the display power domain HW state. It also ensures that the
+ * device stays powered up so that the driver can be reloaded.
+ *
+ * It must be called with power domains already disabled (after a call to
+ * intel_power_domains_disable()) and must be paired with
+ * intel_power_domains_init_hw().
+ */
+void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv)
+{
+ /* Keep the power well enabled, but cancel its rpm wakeref. */
+ intel_runtime_pm_put(dev_priv);
+
+ /* Remove the refcount we took to keep power well support disabled. */
+ if (!i915_modparams.disable_power_well)
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+
+ intel_power_domains_verify_state(dev_priv);
+}
+
+/**
+ * intel_power_domains_enable - enable toggling of display power wells
+ * @dev_priv: i915 device instance
+ *
+ * Enable the ondemand enabling/disabling of the display power wells. Note that
+ * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
+ * only at specific points of the display modeset sequence, thus they are not
+ * affected by the intel_power_domains_enable()/disable() calls. The purpose
+ * of these function is to keep the rest of power wells enabled until the end
+ * of display HW readout (which will acquire the power references reflecting
+ * the current HW state).
+ */
+void intel_power_domains_enable(struct drm_i915_private *dev_priv)
+{
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+
+ intel_power_domains_verify_state(dev_priv);
+}
+
+/**
+ * intel_power_domains_disable - disable toggling of display power wells
+ * @dev_priv: i915 device instance
+ *
+ * Disable the ondemand enabling/disabling of the display power wells. See
+ * intel_power_domains_enable() for which power wells this call controls.
+ */
+void intel_power_domains_disable(struct drm_i915_private *dev_priv)
+{
+ intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+
+ intel_power_domains_verify_state(dev_priv);
+}
+
+/**
* intel_power_domains_suspend - suspend power domain state
* @dev_priv: i915 device instance
+ * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
*
* This function prepares the hardware power domain state before entering
- * system suspend. It must be paired with intel_power_domains_init_hw().
+ * system suspend.
+ *
+ * It must be called with power domains already disabled (after a call to
+ * intel_power_domains_disable()) and paired with intel_power_domains_resume().
*/
-void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
+void intel_power_domains_suspend(struct drm_i915_private *dev_priv,
+ enum i915_drm_suspend_mode suspend_mode)
{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+
+ /*
+ * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
+ * support don't manually deinit the power domains. This also means the
+ * CSR/DMC firmware will stay active, it will power down any HW
+ * resources as required and also enable deeper system power states
+ * that would be blocked if the firmware was inactive.
+ */
+ if (!(dev_priv->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
+ suspend_mode == I915_DRM_SUSPEND_IDLE &&
+ dev_priv->csr.dmc_payload != NULL) {
+ intel_power_domains_verify_state(dev_priv);
+ return;
+ }
+
/*
* Even if power well support was disabled we still want to disable
- * power wells while we are system suspended.
+ * power wells if power domains must be deinitialized for suspend.
*/
- if (!i915_modparams.disable_power_well)
+ if (!i915_modparams.disable_power_well) {
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+ intel_power_domains_verify_state(dev_priv);
+ }
if (IS_ICELAKE(dev_priv))
icl_display_core_uninit(dev_priv);
@@ -3596,8 +3877,36 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
skl_display_core_uninit(dev_priv);
else if (IS_GEN9_LP(dev_priv))
bxt_display_core_uninit(dev_priv);
+
+ power_domains->display_core_suspended = true;
}
+/**
+ * intel_power_domains_resume - resume power domain state
+ * @dev_priv: i915 device instance
+ *
+ * This function resume the hardware power domain state during system resume.
+ *
+ * It will return with power domain support disabled (to be enabled later by
+ * intel_power_domains_enable()) and must be paired with
+ * intel_power_domains_suspend().
+ */
+void intel_power_domains_resume(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+
+ if (power_domains->display_core_suspended) {
+ intel_power_domains_init_hw(dev_priv, true);
+ power_domains->display_core_suspended = false;
+ } else {
+ intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+ }
+
+ intel_power_domains_verify_state(dev_priv);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+
static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
@@ -3607,9 +3916,9 @@ static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv)
enum intel_display_power_domain domain;
DRM_DEBUG_DRIVER("%-25s %d\n",
- power_well->name, power_well->count);
+ power_well->desc->name, power_well->count);
- for_each_power_domain(domain, power_well->domains)
+ for_each_power_domain(domain, power_well->desc->domains)
DRM_DEBUG_DRIVER(" %-23s %d\n",
intel_display_power_domain_str(domain),
power_domains->domain_use_count[domain]);
@@ -3626,7 +3935,7 @@ static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv)
* acquiring reference counts for any power wells in use and disabling the
* ones left on by BIOS but not required by any active output.
*/
-void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
+static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *power_well;
@@ -3645,22 +3954,25 @@ void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
* and PW1 power wells) are under FW control, so ignore them,
* since their state can change asynchronously.
*/
- if (!power_well->domains)
+ if (!power_well->desc->domains)
continue;
- enabled = power_well->ops->is_enabled(dev_priv, power_well);
- if ((power_well->count || power_well->always_on) != enabled)
+ enabled = power_well->desc->ops->is_enabled(dev_priv,
+ power_well);
+ if ((power_well->count || power_well->desc->always_on) !=
+ enabled)
DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
- power_well->name, power_well->count, enabled);
+ power_well->desc->name,
+ power_well->count, enabled);
domains_count = 0;
- for_each_power_domain(domain, power_well->domains)
+ for_each_power_domain(domain, power_well->desc->domains)
domains_count += power_domains->domain_use_count[domain];
if (power_well->count != domains_count) {
DRM_ERROR("power well %s refcount/domain refcount mismatch "
"(refcount %d/domains refcount %d)\n",
- power_well->name, power_well->count,
+ power_well->desc->name, power_well->count,
domains_count);
dump_domain_info = true;
}
@@ -3678,6 +3990,14 @@ void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
mutex_unlock(&power_domains->lock);
}
+#else
+
+static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
+{
+}
+
+#endif
+
/**
* intel_runtime_pm_get - grab a runtime pm reference
* @dev_priv: i915 device instance
@@ -3791,14 +4111,24 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
* This function enables runtime pm at the end of the driver load sequence.
*
* Note that this function does currently not enable runtime pm for the
- * subordinate display power domains. That is only done on the first modeset
- * using intel_display_set_init_power().
+ * subordinate display power domains. That is done by
+ * intel_power_domains_enable().
*/
void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
struct device *kdev = &pdev->dev;
+ /*
+ * Disable the system suspend direct complete optimization, which can
+ * leave the device suspended skipping the driver's suspend handlers
+ * if the device was already runtime suspended. This is needed due to
+ * the difference in our runtime and system suspend sequence and
+ * becaue the HDA driver may require us to enable the audio power
+ * domain during system suspend.
+ */
+ dev_pm_set_driver_flags(kdev, DPM_FLAG_NEVER_SKIP);
+
pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
pm_runtime_mark_last_busy(kdev);
@@ -3825,3 +4155,18 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
*/
pm_runtime_put_autosuspend(kdev);
}
+
+void intel_runtime_pm_disable(struct drm_i915_private *dev_priv)
+{
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct device *kdev = &pdev->dev;
+
+ /* Transfer rpm ownership back to core */
+ WARN(pm_runtime_get_sync(&dev_priv->drm.pdev->dev) < 0,
+ "Failed to pass rpm ownership back to core\n");
+
+ pm_runtime_dont_use_autosuspend(kdev);
+
+ if (!HAS_RUNTIME_PM(dev_priv))
+ pm_runtime_put(kdev);
+}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index f7026e887fa9..9600ccfc5b76 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -83,6 +83,7 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI);
DEFINE_WAIT(wait);
+ u32 psr_status;
vblank_start = adjusted_mode->crtc_vblank_start;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -104,8 +105,9 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
* VBL interrupts will start the PSR exit and prevent a PSR
* re-entry as well.
*/
- if (intel_psr_wait_for_idle(new_crtc_state))
- DRM_ERROR("PSR idle timed out, atomic update may fail\n");
+ if (intel_psr_wait_for_idle(new_crtc_state, &psr_status))
+ DRM_ERROR("PSR idle timed out 0x%x, atomic update may fail\n",
+ psr_status);
local_irq_disable();
@@ -957,10 +959,10 @@ g4x_plane_get_hw_state(struct intel_plane *plane,
}
static int
-intel_check_sprite_plane(struct intel_plane *plane,
- struct intel_crtc_state *crtc_state,
+intel_check_sprite_plane(struct intel_crtc_state *crtc_state,
struct intel_plane_state *state)
{
+ struct intel_plane *plane = to_intel_plane(state->base.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_framebuffer *fb = state->base.fb;
@@ -1407,8 +1409,7 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_ABGR8888:
- if (modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
- modifier == I915_FORMAT_MOD_Y_TILED_CCS)
+ if (is_ccs_modifier(modifier))
return true;
/* fall through */
case DRM_FORMAT_RGB565:
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.c b/drivers/gpu/drm/i915/intel_uc_fw.c
index 6e8e0b546743..fd496416087c 100644
--- a/drivers/gpu/drm/i915/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/intel_uc_fw.c
@@ -222,7 +222,7 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
goto fail;
}
- ggtt_pin_bias = to_i915(uc_fw->obj->base.dev)->guc.ggtt_pin_bias;
+ ggtt_pin_bias = to_i915(uc_fw->obj->base.dev)->ggtt.pin_bias;
vma = i915_gem_object_ggtt_pin(uc_fw->obj, NULL, 0, 0,
PIN_OFFSET_BIAS | ggtt_pin_bias);
if (IS_ERR(vma)) {
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 50b39aa4ffb8..3ad302c66254 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -283,14 +283,24 @@ fw_domains_reset(struct drm_i915_private *i915,
fw_domain_reset(i915, d);
}
+static inline u32 gt_thread_status(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ val = __raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG);
+ val &= GEN6_GT_THREAD_STATUS_CORE_MASK;
+
+ return val;
+}
+
static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
{
- /* w/a for a sporadic read returning 0 by waiting for the GT
+ /*
+ * w/a for a sporadic read returning 0 by waiting for the GT
* thread to wake up.
*/
- if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
- GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
- DRM_ERROR("GT thread status wait timed out\n");
+ WARN_ONCE(wait_for_atomic_us(gt_thread_status(dev_priv) == 0, 5000),
+ "GT thread status wait timed out\n");
}
static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
@@ -1729,7 +1739,7 @@ static void gen3_stop_engine(struct intel_engine_cs *engine)
}
static void i915_stop_engines(struct drm_i915_private *dev_priv,
- unsigned engine_mask)
+ unsigned int engine_mask)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -1749,7 +1759,9 @@ static bool i915_in_reset(struct pci_dev *pdev)
return gdrst & GRDOM_RESET_STATUS;
}
-static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
+static int i915_do_reset(struct drm_i915_private *dev_priv,
+ unsigned int engine_mask,
+ unsigned int retry)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
int err;
@@ -1776,7 +1788,9 @@ static bool g4x_reset_complete(struct pci_dev *pdev)
return (gdrst & GRDOM_RESET_ENABLE) == 0;
}
-static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
+static int g33_do_reset(struct drm_i915_private *dev_priv,
+ unsigned int engine_mask,
+ unsigned int retry)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
@@ -1784,7 +1798,9 @@ static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
return wait_for(g4x_reset_complete(pdev), 500);
}
-static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
+static int g4x_do_reset(struct drm_i915_private *dev_priv,
+ unsigned int engine_mask,
+ unsigned int retry)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
int ret;
@@ -1821,7 +1837,8 @@ out:
}
static int ironlake_do_reset(struct drm_i915_private *dev_priv,
- unsigned engine_mask)
+ unsigned int engine_mask,
+ unsigned int retry)
{
int ret;
@@ -1877,6 +1894,7 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
* gen6_reset_engines - reset individual engines
* @dev_priv: i915 device
* @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
+ * @retry: the count of of previous attempts to reset.
*
* This function will reset the individual engines that are set in engine_mask.
* If you provide ALL_ENGINES as mask, full global domain reset will be issued.
@@ -1887,7 +1905,8 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
* Returns 0 on success, nonzero on error.
*/
static int gen6_reset_engines(struct drm_i915_private *dev_priv,
- unsigned engine_mask)
+ unsigned int engine_mask,
+ unsigned int retry)
{
struct intel_engine_cs *engine;
const u32 hw_engine_mask[I915_NUM_ENGINES] = {
@@ -1926,7 +1945,7 @@ static int gen6_reset_engines(struct drm_i915_private *dev_priv,
* Returns 0 on success, nonzero on error.
*/
static int gen11_reset_engines(struct drm_i915_private *dev_priv,
- unsigned engine_mask)
+ unsigned int engine_mask)
{
struct intel_engine_cs *engine;
const u32 hw_engine_mask[I915_NUM_ENGINES] = {
@@ -2066,7 +2085,7 @@ int __intel_wait_for_register(struct drm_i915_private *dev_priv,
return ret;
}
-static int gen8_reset_engine_start(struct intel_engine_cs *engine)
+static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
int ret;
@@ -2086,7 +2105,7 @@ static int gen8_reset_engine_start(struct intel_engine_cs *engine)
return ret;
}
-static void gen8_reset_engine_cancel(struct intel_engine_cs *engine)
+static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
@@ -2094,33 +2113,56 @@ static void gen8_reset_engine_cancel(struct intel_engine_cs *engine)
_MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
}
+static int reset_engines(struct drm_i915_private *i915,
+ unsigned int engine_mask,
+ unsigned int retry)
+{
+ if (INTEL_GEN(i915) >= 11)
+ return gen11_reset_engines(i915, engine_mask);
+ else
+ return gen6_reset_engines(i915, engine_mask, retry);
+}
+
static int gen8_reset_engines(struct drm_i915_private *dev_priv,
- unsigned engine_mask)
+ unsigned int engine_mask,
+ unsigned int retry)
{
struct intel_engine_cs *engine;
+ const bool reset_non_ready = retry >= 1;
unsigned int tmp;
int ret;
for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
- if (gen8_reset_engine_start(engine)) {
- ret = -EIO;
- goto not_ready;
- }
+ ret = gen8_engine_reset_prepare(engine);
+ if (ret && !reset_non_ready)
+ goto skip_reset;
+
+ /*
+ * If this is not the first failed attempt to prepare,
+ * we decide to proceed anyway.
+ *
+ * By doing so we risk context corruption and with
+ * some gens (kbl), possible system hang if reset
+ * happens during active bb execution.
+ *
+ * We rather take context corruption instead of
+ * failed reset with a wedged driver/gpu. And
+ * active bb execution case should be covered by
+ * i915_stop_engines we have before the reset.
+ */
}
- if (INTEL_GEN(dev_priv) >= 11)
- ret = gen11_reset_engines(dev_priv, engine_mask);
- else
- ret = gen6_reset_engines(dev_priv, engine_mask);
+ ret = reset_engines(dev_priv, engine_mask, retry);
-not_ready:
+skip_reset:
for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
- gen8_reset_engine_cancel(engine);
+ gen8_engine_reset_cancel(engine);
return ret;
}
-typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask);
+typedef int (*reset_func)(struct drm_i915_private *,
+ unsigned int engine_mask, unsigned int retry);
static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
{
@@ -2143,12 +2185,15 @@ static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
return NULL;
}
-int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
+int intel_gpu_reset(struct drm_i915_private *dev_priv,
+ const unsigned int engine_mask)
{
reset_func reset = intel_get_gpu_reset(dev_priv);
- int retry;
+ unsigned int retry;
int ret;
+ GEM_BUG_ON(!engine_mask);
+
/*
* We want to perform per-engine reset from atomic context (e.g.
* softirq), which imposes the constraint that we cannot sleep.
@@ -2190,8 +2235,9 @@ int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
ret = -ENODEV;
if (reset) {
- GEM_TRACE("engine_mask=%x\n", engine_mask);
- ret = reset(dev_priv, engine_mask);
+ ret = reset(dev_priv, engine_mask, retry);
+ GEM_TRACE("engine_mask=%x, ret=%d, retry=%d\n",
+ engine_mask, ret, retry);
}
if (ret != -ETIMEDOUT || engine_mask != ALL_ENGINES)
break;
@@ -2237,20 +2283,28 @@ bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
bool
intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
{
- if (unlikely(i915_modparams.mmio_debug ||
- dev_priv->uncore.unclaimed_mmio_check <= 0))
- return false;
+ bool ret = false;
+
+ spin_lock_irq(&dev_priv->uncore.lock);
+
+ if (unlikely(dev_priv->uncore.unclaimed_mmio_check <= 0))
+ goto out;
if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) {
- DRM_DEBUG("Unclaimed register detected, "
- "enabling oneshot unclaimed register reporting. "
- "Please use i915.mmio_debug=N for more information.\n");
- i915_modparams.mmio_debug++;
+ if (!i915_modparams.mmio_debug) {
+ DRM_DEBUG("Unclaimed register detected, "
+ "enabling oneshot unclaimed register reporting. "
+ "Please use i915.mmio_debug=N for more information.\n");
+ i915_modparams.mmio_debug++;
+ }
dev_priv->uncore.unclaimed_mmio_check--;
- return true;
+ ret = true;
}
- return false;
+out:
+ spin_unlock_irq(&dev_priv->uncore.lock);
+
+ return ret;
}
static enum forcewake_domains
diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c
index 74bf76f3fddc..92cb82dd0c07 100644
--- a/drivers/gpu/drm/i915/intel_wopcm.c
+++ b/drivers/gpu/drm/i915/intel_wopcm.c
@@ -163,8 +163,14 @@ int intel_wopcm_init(struct intel_wopcm *wopcm)
u32 guc_wopcm_rsvd;
int err;
+ if (!USES_GUC(dev_priv))
+ return 0;
+
GEM_BUG_ON(!wopcm->size);
+ if (i915_inject_load_failure())
+ return -E2BIG;
+
if (guc_fw_size >= wopcm->size) {
DRM_ERROR("GuC FW (%uKiB) is too big to fit in WOPCM.",
guc_fw_size / 1024);
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index 7efb326badcd..e272127783fe 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -906,7 +906,11 @@ gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val)
if (IS_ERR(obj))
return ERR_CAST(obj);
- cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ err = i915_gem_object_set_to_wc_domain(obj, true);
+ if (err)
+ goto err;
+
+ cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
goto err;
@@ -936,13 +940,10 @@ gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val)
}
*cmd = MI_BATCH_BUFFER_END;
+ i915_gem_chipset_flush(i915);
i915_gem_object_unpin_map(obj);
- err = i915_gem_object_set_to_gtt_domain(obj, false);
- if (err)
- goto err;
-
batch = i915_vma_instance(obj, vma->vm, NULL);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
new file mode 100644
index 000000000000..d0aa19d17653
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -0,0 +1,221 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include <linux/random.h>
+
+#include "../i915_selftest.h"
+
+#include "mock_context.h"
+#include "igt_flush_test.h"
+
+static int switch_to_context(struct drm_i915_private *i915,
+ struct i915_gem_context *ctx)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ intel_runtime_pm_get(i915);
+
+ for_each_engine(engine, i915, id) {
+ struct i915_request *rq;
+
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_add(rq);
+ }
+
+ intel_runtime_pm_put(i915);
+
+ return err;
+}
+
+static void trash_stolen(struct drm_i915_private *i915)
+{
+ struct i915_ggtt *ggtt = &i915->ggtt;
+ const u64 slot = ggtt->error_capture.start;
+ const resource_size_t size = resource_size(&i915->dsm);
+ unsigned long page;
+ u32 prng = 0x12345678;
+
+ for (page = 0; page < size; page += PAGE_SIZE) {
+ const dma_addr_t dma = i915->dsm.start + page;
+ u32 __iomem *s;
+ int x;
+
+ ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
+
+ s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
+ for (x = 0; x < PAGE_SIZE / sizeof(u32); x++) {
+ prng = next_pseudo_random32(prng);
+ iowrite32(prng, &s[x]);
+ }
+ io_mapping_unmap_atomic(s);
+ }
+
+ ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
+}
+
+static void simulate_hibernate(struct drm_i915_private *i915)
+{
+ intel_runtime_pm_get(i915);
+
+ /*
+ * As a final sting in the tail, invalidate stolen. Under a real S4,
+ * stolen is lost and needs to be refilled on resume. However, under
+ * CI we merely do S4-device testing (as full S4 is too unreliable
+ * for automated testing across a cluster), so to simulate the effect
+ * of stolen being trashed across S4, we trash it ourselves.
+ */
+ trash_stolen(i915);
+
+ intel_runtime_pm_put(i915);
+}
+
+static int pm_prepare(struct drm_i915_private *i915)
+{
+ int err = 0;
+
+ if (i915_gem_suspend(i915)) {
+ pr_err("i915_gem_suspend failed\n");
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static void pm_suspend(struct drm_i915_private *i915)
+{
+ intel_runtime_pm_get(i915);
+
+ i915_gem_suspend_gtt_mappings(i915);
+ i915_gem_suspend_late(i915);
+
+ intel_runtime_pm_put(i915);
+}
+
+static void pm_hibernate(struct drm_i915_private *i915)
+{
+ intel_runtime_pm_get(i915);
+
+ i915_gem_suspend_gtt_mappings(i915);
+
+ i915_gem_freeze(i915);
+ i915_gem_freeze_late(i915);
+
+ intel_runtime_pm_put(i915);
+}
+
+static void pm_resume(struct drm_i915_private *i915)
+{
+ /*
+ * Both suspend and hibernate follow the same wakeup path and assume
+ * that runtime-pm just works.
+ */
+ intel_runtime_pm_get(i915);
+
+ intel_engines_sanitize(i915);
+ i915_gem_sanitize(i915);
+ i915_gem_resume(i915);
+
+ intel_runtime_pm_put(i915);
+}
+
+static int igt_gem_suspend(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_gem_context *ctx;
+ struct drm_file *file;
+ int err;
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ err = -ENOMEM;
+ mutex_lock(&i915->drm.struct_mutex);
+ ctx = live_context(i915, file);
+ if (!IS_ERR(ctx))
+ err = switch_to_context(i915, ctx);
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (err)
+ goto out;
+
+ err = pm_prepare(i915);
+ if (err)
+ goto out;
+
+ pm_suspend(i915);
+
+ /* Here be dragons! Note that with S3RST any S3 may become S4! */
+ simulate_hibernate(i915);
+
+ pm_resume(i915);
+
+ mutex_lock(&i915->drm.struct_mutex);
+ err = switch_to_context(i915, ctx);
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+ mutex_unlock(&i915->drm.struct_mutex);
+out:
+ mock_file_free(i915, file);
+ return err;
+}
+
+static int igt_gem_hibernate(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_gem_context *ctx;
+ struct drm_file *file;
+ int err;
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ err = -ENOMEM;
+ mutex_lock(&i915->drm.struct_mutex);
+ ctx = live_context(i915, file);
+ if (!IS_ERR(ctx))
+ err = switch_to_context(i915, ctx);
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (err)
+ goto out;
+
+ err = pm_prepare(i915);
+ if (err)
+ goto out;
+
+ pm_hibernate(i915);
+
+ /* Here be dragons! */
+ simulate_hibernate(i915);
+
+ pm_resume(i915);
+
+ mutex_lock(&i915->drm.struct_mutex);
+ err = switch_to_context(i915, ctx);
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+ mutex_unlock(&i915->drm.struct_mutex);
+out:
+ mock_file_free(i915, file);
+ return err;
+}
+
+int i915_gem_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_gem_suspend),
+ SUBTEST(igt_gem_hibernate),
+ };
+
+ return i915_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
index 3a095c37c120..4e6a221063ac 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
@@ -33,7 +33,8 @@ static int cpu_set(struct drm_i915_gem_object *obj,
{
unsigned int needs_clflush;
struct page *page;
- u32 *map;
+ void *map;
+ u32 *cpu;
int err;
err = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
@@ -42,24 +43,19 @@ static int cpu_set(struct drm_i915_gem_object *obj,
page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
map = kmap_atomic(page);
+ cpu = map + offset_in_page(offset);
- if (needs_clflush & CLFLUSH_BEFORE) {
- mb();
- clflush(map+offset_in_page(offset) / sizeof(*map));
- mb();
- }
+ if (needs_clflush & CLFLUSH_BEFORE)
+ drm_clflush_virt_range(cpu, sizeof(*cpu));
- map[offset_in_page(offset) / sizeof(*map)] = v;
+ *cpu = v;
- if (needs_clflush & CLFLUSH_AFTER) {
- mb();
- clflush(map+offset_in_page(offset) / sizeof(*map));
- mb();
- }
+ if (needs_clflush & CLFLUSH_AFTER)
+ drm_clflush_virt_range(cpu, sizeof(*cpu));
kunmap_atomic(map);
-
i915_gem_obj_finish_shmem_access(obj);
+
return 0;
}
@@ -69,7 +65,8 @@ static int cpu_get(struct drm_i915_gem_object *obj,
{
unsigned int needs_clflush;
struct page *page;
- u32 *map;
+ void *map;
+ u32 *cpu;
int err;
err = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
@@ -78,17 +75,16 @@ static int cpu_get(struct drm_i915_gem_object *obj,
page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
map = kmap_atomic(page);
+ cpu = map + offset_in_page(offset);
- if (needs_clflush & CLFLUSH_BEFORE) {
- mb();
- clflush(map+offset_in_page(offset) / sizeof(*map));
- mb();
- }
+ if (needs_clflush & CLFLUSH_BEFORE)
+ drm_clflush_virt_range(cpu, sizeof(*cpu));
- *v = map[offset_in_page(offset) / sizeof(*map)];
- kunmap_atomic(map);
+ *v = *cpu;
+ kunmap_atomic(map);
i915_gem_obj_finish_shmem_access(obj);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
index ba4f322d56b8..6d3516d5bff9 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
@@ -282,7 +282,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
view.partial.offset,
view.partial.size,
vma->size >> PAGE_SHIFT,
- tile_row_pages(obj),
+ tile->tiling ? tile_row_pages(obj) : 0,
vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
offset >> PAGE_SHIFT,
(unsigned int)offset_in_page(offset),
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index a00e2bd08bce..a15713cae3b3 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -17,6 +17,7 @@ selftest(objects, i915_gem_object_live_selftests)
selftest(dmabuf, i915_gem_dmabuf_live_selftests)
selftest(coherency, i915_gem_coherency_live_selftests)
selftest(gtt, i915_gem_gtt_live_selftests)
+selftest(gem, i915_gem_live_selftests)
selftest(evict, i915_gem_evict_live_selftests)
selftest(hugepages, i915_gem_huge_page_live_selftests)
selftest(contexts, i915_gem_context_live_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
index 570e325af93e..cdbc8f134e5e 100644
--- a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
@@ -611,17 +611,9 @@ static const char *mock_name(struct dma_fence *fence)
return "mock";
}
-static bool mock_enable_signaling(struct dma_fence *fence)
-{
- return true;
-}
-
static const struct dma_fence_ops mock_fence_ops = {
.get_driver_name = mock_name,
.get_timeline_name = mock_name,
- .enable_signaling = mock_enable_signaling,
- .wait = dma_fence_default_wait,
- .release = dma_fence_free,
};
static DEFINE_SPINLOCK(mock_fence_lock);
diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c
index 407c98fb9170..90ba88c972cf 100644
--- a/drivers/gpu/drm/i915/selftests/intel_guc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_guc.c
@@ -65,6 +65,40 @@ static int check_all_doorbells(struct intel_guc *guc)
return 0;
}
+static int ring_doorbell_nop(struct intel_guc_client *client)
+{
+ struct guc_process_desc *desc = __get_process_desc(client);
+ int err;
+
+ client->use_nop_wqi = true;
+
+ spin_lock_irq(&client->wq_lock);
+
+ guc_wq_item_append(client, 0, 0, 0, 0);
+ guc_ring_doorbell(client);
+
+ spin_unlock_irq(&client->wq_lock);
+
+ client->use_nop_wqi = false;
+
+ /* if there are no issues GuC will update the WQ head and keep the
+ * WQ in active status
+ */
+ err = wait_for(READ_ONCE(desc->head) == READ_ONCE(desc->tail), 10);
+ if (err) {
+ pr_err("doorbell %u ring failed!\n", client->doorbell_id);
+ return -EIO;
+ }
+
+ if (desc->wq_status != WQ_STATUS_ACTIVE) {
+ pr_err("doorbell %u ring put WQ in bad state (%u)!\n",
+ client->doorbell_id, desc->wq_status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
/*
* Basic client sanity check, handy to validate create_clients.
*/
@@ -332,6 +366,10 @@ static int igt_guc_doorbells(void *arg)
err = check_all_doorbells(guc);
if (err)
goto out;
+
+ err = ring_doorbell_nop(clients[i]);
+ if (err)
+ goto out;
}
out:
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index 65d66cdedd26..db378226ac10 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -1018,8 +1018,41 @@ static int evict_vma(void *data)
return err;
}
+static int evict_fence(void *data)
+{
+ struct evict_vma *arg = data;
+ struct drm_i915_private *i915 = arg->vma->vm->i915;
+ int err;
+
+ complete(&arg->completion);
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ /* Mark the fence register as dirty to force the mmio update. */
+ err = i915_gem_object_set_tiling(arg->vma->obj, I915_TILING_Y, 512);
+ if (err) {
+ pr_err("Invalid Y-tiling settings; err:%d\n", err);
+ goto out_unlock;
+ }
+
+ err = i915_vma_pin_fence(arg->vma);
+ if (err) {
+ pr_err("Unable to pin Y-tiled fence; err:%d\n", err);
+ goto out_unlock;
+ }
+
+ i915_vma_unpin_fence(arg->vma);
+
+out_unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ return err;
+}
+
static int __igt_reset_evict_vma(struct drm_i915_private *i915,
- struct i915_address_space *vm)
+ struct i915_address_space *vm,
+ int (*fn)(void *),
+ unsigned int flags)
{
struct drm_i915_gem_object *obj;
struct task_struct *tsk = NULL;
@@ -1040,12 +1073,20 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
if (err)
goto unlock;
- obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ obj = i915_gem_object_create_internal(i915, SZ_1M);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto fini;
}
+ if (flags & EXEC_OBJECT_NEEDS_FENCE) {
+ err = i915_gem_object_set_tiling(obj, I915_TILING_X, 512);
+ if (err) {
+ pr_err("Invalid X-tiling settings; err:%d\n", err);
+ goto out_obj;
+ }
+ }
+
arg.vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(arg.vma)) {
err = PTR_ERR(arg.vma);
@@ -1059,11 +1100,28 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
}
err = i915_vma_pin(arg.vma, 0, 0,
- i915_vma_is_ggtt(arg.vma) ? PIN_GLOBAL : PIN_USER);
- if (err)
+ i915_vma_is_ggtt(arg.vma) ?
+ PIN_GLOBAL | PIN_MAPPABLE :
+ PIN_USER);
+ if (err) {
+ i915_request_add(rq);
goto out_obj;
+ }
+
+ if (flags & EXEC_OBJECT_NEEDS_FENCE) {
+ err = i915_vma_pin_fence(arg.vma);
+ if (err) {
+ pr_err("Unable to pin X-tiled fence; err:%d\n", err);
+ i915_vma_unpin(arg.vma);
+ i915_request_add(rq);
+ goto out_obj;
+ }
+ }
+
+ err = i915_vma_move_to_active(arg.vma, rq, flags);
- err = i915_vma_move_to_active(arg.vma, rq, EXEC_OBJECT_WRITE);
+ if (flags & EXEC_OBJECT_NEEDS_FENCE)
+ i915_vma_unpin_fence(arg.vma);
i915_vma_unpin(arg.vma);
i915_request_get(rq);
@@ -1086,7 +1144,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
init_completion(&arg.completion);
- tsk = kthread_run(evict_vma, &arg, "igt/evict_vma");
+ tsk = kthread_run(fn, &arg, "igt/evict_vma");
if (IS_ERR(tsk)) {
err = PTR_ERR(tsk);
tsk = NULL;
@@ -1137,29 +1195,47 @@ static int igt_reset_evict_ggtt(void *arg)
{
struct drm_i915_private *i915 = arg;
- return __igt_reset_evict_vma(i915, &i915->ggtt.vm);
+ return __igt_reset_evict_vma(i915, &i915->ggtt.vm,
+ evict_vma, EXEC_OBJECT_WRITE);
}
static int igt_reset_evict_ppgtt(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_gem_context *ctx;
+ struct drm_file *file;
int err;
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
mutex_lock(&i915->drm.struct_mutex);
- ctx = kernel_context(i915);
+ ctx = live_context(i915, file);
mutex_unlock(&i915->drm.struct_mutex);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out;
+ }
err = 0;
if (ctx->ppgtt) /* aliasing == global gtt locking, covered above */
- err = __igt_reset_evict_vma(i915, &ctx->ppgtt->vm);
+ err = __igt_reset_evict_vma(i915, &ctx->ppgtt->vm,
+ evict_vma, EXEC_OBJECT_WRITE);
- kernel_context_close(ctx);
+out:
+ mock_file_free(i915, file);
return err;
}
+static int igt_reset_evict_fence(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+
+ return __igt_reset_evict_vma(i915, &i915->ggtt.vm,
+ evict_fence, EXEC_OBJECT_NEEDS_FENCE);
+}
+
static int wait_for_others(struct drm_i915_private *i915,
struct intel_engine_cs *exclude)
{
@@ -1409,6 +1485,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_reset_wait),
SUBTEST(igt_reset_evict_ggtt),
SUBTEST(igt_reset_evict_ppgtt),
+ SUBTEST(igt_reset_evict_fence),
SUBTEST(igt_handle_error),
};
bool saved_hangcheck;
diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c
index 8904f1ce64e3..d937bdff26f9 100644
--- a/drivers/gpu/drm/i915/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/selftests/mock_context.c
@@ -43,6 +43,7 @@ mock_context(struct drm_i915_private *i915,
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
INIT_LIST_HEAD(&ctx->handles_list);
+ INIT_LIST_HEAD(&ctx->hw_id_link);
for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
struct intel_context *ce = &ctx->__engine[n];
@@ -50,11 +51,9 @@ mock_context(struct drm_i915_private *i915,
ce->gem_context = ctx;
}
- ret = ida_simple_get(&i915->contexts.hw_ida,
- 0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
+ ret = i915_gem_context_pin_hw_id(ctx);
if (ret < 0)
goto err_handles;
- ctx->hw_id = ret;
if (name) {
ctx->name = kstrdup(name, GFP_KERNEL);
@@ -85,11 +84,7 @@ void mock_context_close(struct i915_gem_context *ctx)
void mock_init_contexts(struct drm_i915_private *i915)
{
- INIT_LIST_HEAD(&i915->contexts.list);
- ida_init(&i915->contexts.hw_ida);
-
- INIT_WORK(&i915->contexts.free_work, contexts_free_worker);
- init_llist_head(&i915->contexts.free_list);
+ init_contexts(i915);
}
struct i915_gem_context *
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index a140ea5c3a7c..6ae418c76015 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -118,6 +118,8 @@ void mock_init_ggtt(struct drm_i915_private *i915)
ggtt->vm.vma_ops.clear_pages = clear_pages;
i915_address_space_init(&ggtt->vm, i915);
+
+ ggtt->vm.is_ggtt = true;
}
void mock_fini_ggtt(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 203f247d4854..40605fdf0e33 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -281,16 +281,13 @@ static void ipu_plane_state_reset(struct drm_plane *plane)
ipu_state = to_ipu_plane_state(plane->state);
__drm_atomic_helper_plane_destroy_state(plane->state);
kfree(ipu_state);
+ plane->state = NULL;
}
ipu_state = kzalloc(sizeof(*ipu_state), GFP_KERNEL);
- if (ipu_state) {
- ipu_state->base.plane = plane;
- ipu_state->base.rotation = DRM_MODE_ROTATE_0;
- }
-
- plane->state = &ipu_state->base;
+ if (ipu_state)
+ __drm_atomic_helper_plane_reset(plane, &ipu_state->base);
}
static struct drm_plane_state *
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 978782a77629..28d191192945 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -132,6 +132,11 @@ static void mtk_ovl_config(struct mtk_ddp_comp *comp, unsigned int w,
writel(0x0, comp->regs + DISP_REG_OVL_RST);
}
+static unsigned int mtk_ovl_layer_nr(struct mtk_ddp_comp *comp)
+{
+ return 4;
+}
+
static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx)
{
unsigned int reg;
@@ -157,6 +162,11 @@ static void mtk_ovl_layer_off(struct mtk_ddp_comp *comp, unsigned int idx)
static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt)
{
+ /* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX"
+ * is defined in mediatek HW data sheet.
+ * The alphabet order in XXX is no relation to data
+ * arrangement in memory.
+ */
switch (fmt) {
default:
case DRM_FORMAT_RGB565:
@@ -221,6 +231,7 @@ static const struct mtk_ddp_comp_funcs mtk_disp_ovl_funcs = {
.stop = mtk_ovl_stop,
.enable_vblank = mtk_ovl_enable_vblank,
.disable_vblank = mtk_ovl_disable_vblank,
+ .layer_nr = mtk_ovl_layer_nr,
.layer_on = mtk_ovl_layer_on,
.layer_off = mtk_ovl_layer_off,
.layer_config = mtk_ovl_layer_config,
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
index 585943c81e1f..b0a5cffe345a 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -31,14 +31,31 @@
#define RDMA_REG_UPDATE_INT BIT(0)
#define DISP_REG_RDMA_GLOBAL_CON 0x0010
#define RDMA_ENGINE_EN BIT(0)
+#define RDMA_MODE_MEMORY BIT(1)
#define DISP_REG_RDMA_SIZE_CON_0 0x0014
+#define RDMA_MATRIX_ENABLE BIT(17)
+#define RDMA_MATRIX_INT_MTX_SEL GENMASK(23, 20)
+#define RDMA_MATRIX_INT_MTX_BT601_to_RGB (6 << 20)
#define DISP_REG_RDMA_SIZE_CON_1 0x0018
#define DISP_REG_RDMA_TARGET_LINE 0x001c
+#define DISP_RDMA_MEM_CON 0x0024
+#define MEM_MODE_INPUT_FORMAT_RGB565 (0x000 << 4)
+#define MEM_MODE_INPUT_FORMAT_RGB888 (0x001 << 4)
+#define MEM_MODE_INPUT_FORMAT_RGBA8888 (0x002 << 4)
+#define MEM_MODE_INPUT_FORMAT_ARGB8888 (0x003 << 4)
+#define MEM_MODE_INPUT_FORMAT_UYVY (0x004 << 4)
+#define MEM_MODE_INPUT_FORMAT_YUYV (0x005 << 4)
+#define MEM_MODE_INPUT_SWAP BIT(8)
+#define DISP_RDMA_MEM_SRC_PITCH 0x002c
+#define DISP_RDMA_MEM_GMC_SETTING_0 0x0030
#define DISP_REG_RDMA_FIFO_CON 0x0040
#define RDMA_FIFO_UNDERFLOW_EN BIT(31)
#define RDMA_FIFO_PSEUDO_SIZE(bytes) (((bytes) / 16) << 16)
#define RDMA_OUTPUT_VALID_FIFO_THRESHOLD(bytes) ((bytes) / 16)
#define RDMA_FIFO_SIZE(rdma) ((rdma)->data->fifo_size)
+#define DISP_RDMA_MEM_START_ADDR 0x0f00
+
+#define RDMA_MEM_GMC 0x40402020
struct mtk_disp_rdma_data {
unsigned int fifo_size;
@@ -138,12 +155,87 @@ static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width,
writel(reg, comp->regs + DISP_REG_RDMA_FIFO_CON);
}
+static unsigned int rdma_fmt_convert(struct mtk_disp_rdma *rdma,
+ unsigned int fmt)
+{
+ /* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX"
+ * is defined in mediatek HW data sheet.
+ * The alphabet order in XXX is no relation to data
+ * arrangement in memory.
+ */
+ switch (fmt) {
+ default:
+ case DRM_FORMAT_RGB565:
+ return MEM_MODE_INPUT_FORMAT_RGB565;
+ case DRM_FORMAT_BGR565:
+ return MEM_MODE_INPUT_FORMAT_RGB565 | MEM_MODE_INPUT_SWAP;
+ case DRM_FORMAT_RGB888:
+ return MEM_MODE_INPUT_FORMAT_RGB888;
+ case DRM_FORMAT_BGR888:
+ return MEM_MODE_INPUT_FORMAT_RGB888 | MEM_MODE_INPUT_SWAP;
+ case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_RGBA8888:
+ return MEM_MODE_INPUT_FORMAT_ARGB8888;
+ case DRM_FORMAT_BGRX8888:
+ case DRM_FORMAT_BGRA8888:
+ return MEM_MODE_INPUT_FORMAT_ARGB8888 | MEM_MODE_INPUT_SWAP;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ return MEM_MODE_INPUT_FORMAT_RGBA8888;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ return MEM_MODE_INPUT_FORMAT_RGBA8888 | MEM_MODE_INPUT_SWAP;
+ case DRM_FORMAT_UYVY:
+ return MEM_MODE_INPUT_FORMAT_UYVY;
+ case DRM_FORMAT_YUYV:
+ return MEM_MODE_INPUT_FORMAT_YUYV;
+ }
+}
+
+static unsigned int mtk_rdma_layer_nr(struct mtk_ddp_comp *comp)
+{
+ return 1;
+}
+
+static void mtk_rdma_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
+ struct mtk_plane_state *state)
+{
+ struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
+ struct mtk_plane_pending_state *pending = &state->pending;
+ unsigned int addr = pending->addr;
+ unsigned int pitch = pending->pitch & 0xffff;
+ unsigned int fmt = pending->format;
+ unsigned int con;
+
+ con = rdma_fmt_convert(rdma, fmt);
+ writel_relaxed(con, comp->regs + DISP_RDMA_MEM_CON);
+
+ if (fmt == DRM_FORMAT_UYVY || fmt == DRM_FORMAT_YUYV) {
+ rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0,
+ RDMA_MATRIX_ENABLE, RDMA_MATRIX_ENABLE);
+ rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0,
+ RDMA_MATRIX_INT_MTX_SEL,
+ RDMA_MATRIX_INT_MTX_BT601_to_RGB);
+ } else {
+ rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0,
+ RDMA_MATRIX_ENABLE, 0);
+ }
+
+ writel_relaxed(addr, comp->regs + DISP_RDMA_MEM_START_ADDR);
+ writel_relaxed(pitch, comp->regs + DISP_RDMA_MEM_SRC_PITCH);
+ writel(RDMA_MEM_GMC, comp->regs + DISP_RDMA_MEM_GMC_SETTING_0);
+ rdma_update_bits(comp, DISP_REG_RDMA_GLOBAL_CON,
+ RDMA_MODE_MEMORY, RDMA_MODE_MEMORY);
+}
+
static const struct mtk_ddp_comp_funcs mtk_disp_rdma_funcs = {
.config = mtk_rdma_config,
.start = mtk_rdma_start,
.stop = mtk_rdma_stop,
.enable_vblank = mtk_rdma_enable_vblank,
.disable_vblank = mtk_rdma_disable_vblank,
+ .layer_nr = mtk_rdma_layer_nr,
+ .layer_config = mtk_rdma_layer_config,
};
static int mtk_disp_rdma_bind(struct device *dev, struct device *master,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 2d6aa150a9ff..0b976dfd04df 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -45,7 +45,8 @@ struct mtk_drm_crtc {
bool pending_needs_vblank;
struct drm_pending_vblank_event *event;
- struct drm_plane planes[OVL_LAYER_NR];
+ struct drm_plane *planes;
+ unsigned int layer_nr;
bool pending_planes;
void __iomem *config_regs;
@@ -171,9 +172,9 @@ static void mtk_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
- struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
+ struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
- mtk_ddp_comp_enable_vblank(ovl, &mtk_crtc->base);
+ mtk_ddp_comp_enable_vblank(comp, &mtk_crtc->base);
return 0;
}
@@ -181,9 +182,9 @@ static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc)
static void mtk_drm_crtc_disable_vblank(struct drm_crtc *crtc)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
- struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
+ struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
- mtk_ddp_comp_disable_vblank(ovl);
+ mtk_ddp_comp_disable_vblank(comp);
}
static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc)
@@ -286,7 +287,7 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
}
/* Initially configure all planes */
- for (i = 0; i < OVL_LAYER_NR; i++) {
+ for (i = 0; i < mtk_crtc->layer_nr; i++) {
struct drm_plane *plane = &mtk_crtc->planes[i];
struct mtk_plane_state *plane_state;
@@ -334,7 +335,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state);
- struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
+ struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
unsigned int i;
/*
@@ -343,7 +344,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
* queue update module registers on vblank.
*/
if (state->pending_config) {
- mtk_ddp_comp_config(ovl, state->pending_width,
+ mtk_ddp_comp_config(comp, state->pending_width,
state->pending_height,
state->pending_vrefresh, 0);
@@ -351,14 +352,14 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
}
if (mtk_crtc->pending_planes) {
- for (i = 0; i < OVL_LAYER_NR; i++) {
+ for (i = 0; i < mtk_crtc->layer_nr; i++) {
struct drm_plane *plane = &mtk_crtc->planes[i];
struct mtk_plane_state *plane_state;
plane_state = to_mtk_plane_state(plane->state);
if (plane_state->pending.config) {
- mtk_ddp_comp_layer_config(ovl, i, plane_state);
+ mtk_ddp_comp_layer_config(comp, i, plane_state);
plane_state->pending.config = false;
}
}
@@ -370,12 +371,12 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
- struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
+ struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
int ret;
DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);
- ret = mtk_smi_larb_get(ovl->larb_dev);
+ ret = mtk_smi_larb_get(comp->larb_dev);
if (ret) {
DRM_ERROR("Failed to get larb: %d\n", ret);
return;
@@ -383,7 +384,7 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
ret = mtk_crtc_ddp_hw_init(mtk_crtc);
if (ret) {
- mtk_smi_larb_put(ovl->larb_dev);
+ mtk_smi_larb_put(comp->larb_dev);
return;
}
@@ -395,7 +396,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
- struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
+ struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
int i;
DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);
@@ -403,7 +404,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
return;
/* Set all pending plane state to disabled */
- for (i = 0; i < OVL_LAYER_NR; i++) {
+ for (i = 0; i < mtk_crtc->layer_nr; i++) {
struct drm_plane *plane = &mtk_crtc->planes[i];
struct mtk_plane_state *plane_state;
@@ -418,7 +419,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
drm_crtc_vblank_off(crtc);
mtk_crtc_ddp_hw_fini(mtk_crtc);
- mtk_smi_larb_put(ovl->larb_dev);
+ mtk_smi_larb_put(comp->larb_dev);
mtk_crtc->enabled = false;
}
@@ -450,7 +451,7 @@ static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc,
if (mtk_crtc->event)
mtk_crtc->pending_needs_vblank = true;
- for (i = 0; i < OVL_LAYER_NR; i++) {
+ for (i = 0; i < mtk_crtc->layer_nr; i++) {
struct drm_plane *plane = &mtk_crtc->planes[i];
struct mtk_plane_state *plane_state;
@@ -516,7 +517,7 @@ err_cleanup_crtc:
return ret;
}
-void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl)
+void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_drm_private *priv = crtc->dev->dev_private;
@@ -598,7 +599,12 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
mtk_crtc->ddp_comp[i] = comp;
}
- for (zpos = 0; zpos < OVL_LAYER_NR; zpos++) {
+ mtk_crtc->layer_nr = mtk_ddp_comp_layer_nr(mtk_crtc->ddp_comp[0]);
+ mtk_crtc->planes = devm_kzalloc(dev, mtk_crtc->layer_nr *
+ sizeof(struct drm_plane),
+ GFP_KERNEL);
+
+ for (zpos = 0; zpos < mtk_crtc->layer_nr; zpos++) {
type = (zpos == 0) ? DRM_PLANE_TYPE_PRIMARY :
(zpos == 1) ? DRM_PLANE_TYPE_CURSOR :
DRM_PLANE_TYPE_OVERLAY;
@@ -609,7 +615,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
}
ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0],
- &mtk_crtc->planes[1], pipe);
+ mtk_crtc->layer_nr > 1 ? &mtk_crtc->planes[1] :
+ NULL, pipe);
if (ret < 0)
goto unprepare;
drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
index 9d9410c67ae9..091adb2087eb 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
@@ -18,13 +18,12 @@
#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_plane.h"
-#define OVL_LAYER_NR 4
#define MTK_LUT_SIZE 512
#define MTK_MAX_BPC 10
#define MTK_MIN_BPC 3
void mtk_drm_crtc_commit(struct drm_crtc *crtc);
-void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl);
+void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp);
int mtk_drm_crtc_create(struct drm_device *drm_dev,
const enum mtk_ddp_comp_id *path,
unsigned int path_len);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
index 87e4191c250e..546b3e3b300b 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
@@ -106,6 +106,8 @@
#define OVL1_MOUT_EN_COLOR1 0x1
#define GAMMA_MOUT_EN_RDMA1 0x1
#define RDMA0_SOUT_DPI0 0x2
+#define RDMA0_SOUT_DPI1 0x3
+#define RDMA0_SOUT_DSI1 0x1
#define RDMA0_SOUT_DSI2 0x4
#define RDMA0_SOUT_DSI3 0x5
#define RDMA1_SOUT_DPI0 0x2
@@ -122,6 +124,8 @@
#define DPI0_SEL_IN_RDMA2 0x3
#define DPI1_SEL_IN_RDMA1 (0x1 << 8)
#define DPI1_SEL_IN_RDMA2 (0x3 << 8)
+#define DSI0_SEL_IN_RDMA1 0x1
+#define DSI0_SEL_IN_RDMA2 0x4
#define DSI1_SEL_IN_RDMA1 0x1
#define DSI1_SEL_IN_RDMA2 0x4
#define DSI2_SEL_IN_RDMA1 (0x1 << 16)
@@ -224,6 +228,12 @@ static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur,
} else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI0) {
*addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
value = RDMA0_SOUT_DPI0;
+ } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI1) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
+ value = RDMA0_SOUT_DPI1;
+ } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI1) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
+ value = RDMA0_SOUT_DSI1;
} else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI2) {
*addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
value = RDMA0_SOUT_DSI2;
@@ -282,6 +292,9 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur,
} else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) {
*addr = DISP_REG_CONFIG_DPI_SEL_IN;
value = DPI1_SEL_IN_RDMA1;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI0) {
+ *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
+ value = DSI0_SEL_IN_RDMA1;
} else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) {
*addr = DISP_REG_CONFIG_DSIO_SEL_IN;
value = DSI1_SEL_IN_RDMA1;
@@ -297,8 +310,11 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur,
} else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) {
*addr = DISP_REG_CONFIG_DPI_SEL_IN;
value = DPI1_SEL_IN_RDMA2;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) {
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI0) {
*addr = DISP_REG_CONFIG_DSIE_SEL_IN;
+ value = DSI0_SEL_IN_RDMA2;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) {
+ *addr = DISP_REG_CONFIG_DSIO_SEL_IN;
value = DSI1_SEL_IN_RDMA2;
} else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) {
*addr = DISP_REG_CONFIG_DSIE_SEL_IN;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
index 7413ffeb3c9d..8399229e6ad2 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
@@ -78,6 +78,7 @@ struct mtk_ddp_comp_funcs {
void (*stop)(struct mtk_ddp_comp *comp);
void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc);
void (*disable_vblank)(struct mtk_ddp_comp *comp);
+ unsigned int (*layer_nr)(struct mtk_ddp_comp *comp);
void (*layer_on)(struct mtk_ddp_comp *comp, unsigned int idx);
void (*layer_off)(struct mtk_ddp_comp *comp, unsigned int idx);
void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx,
@@ -128,6 +129,14 @@ static inline void mtk_ddp_comp_disable_vblank(struct mtk_ddp_comp *comp)
comp->funcs->disable_vblank(comp);
}
+static inline unsigned int mtk_ddp_comp_layer_nr(struct mtk_ddp_comp *comp)
+{
+ if (comp->funcs && comp->funcs->layer_nr)
+ return comp->funcs->layer_nr(comp);
+
+ return 0;
+}
+
static inline void mtk_ddp_comp_layer_on(struct mtk_ddp_comp *comp,
unsigned int idx)
{
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 39721119713b..47ec604289b7 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -381,7 +381,7 @@ static int mtk_drm_bind(struct device *dev)
err_deinit:
mtk_drm_kms_deinit(drm);
err_free:
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return ret;
}
@@ -390,7 +390,7 @@ static void mtk_drm_unbind(struct device *dev)
struct mtk_drm_private *private = dev_get_drvdata(dev);
drm_dev_unregister(private->drm);
- drm_dev_unref(private->drm);
+ drm_dev_put(private->drm);
private->drm = NULL;
}
@@ -564,7 +564,7 @@ static int mtk_drm_remove(struct platform_device *pdev)
drm_dev_unregister(drm);
mtk_drm_kms_deinit(drm);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
component_master_del(&pdev->dev, &mtk_drm_ops);
pm_runtime_disable(&pdev->dev);
@@ -580,29 +580,24 @@ static int mtk_drm_sys_suspend(struct device *dev)
{
struct mtk_drm_private *private = dev_get_drvdata(dev);
struct drm_device *drm = private->drm;
+ int ret;
- drm_kms_helper_poll_disable(drm);
-
- private->suspend_state = drm_atomic_helper_suspend(drm);
- if (IS_ERR(private->suspend_state)) {
- drm_kms_helper_poll_enable(drm);
- return PTR_ERR(private->suspend_state);
- }
-
+ ret = drm_mode_config_helper_suspend(drm);
DRM_DEBUG_DRIVER("mtk_drm_sys_suspend\n");
- return 0;
+
+ return ret;
}
static int mtk_drm_sys_resume(struct device *dev)
{
struct mtk_drm_private *private = dev_get_drvdata(dev);
struct drm_device *drm = private->drm;
+ int ret;
- drm_atomic_helper_resume(drm, private->suspend_state);
- drm_kms_helper_poll_enable(drm);
-
+ ret = drm_mode_config_helper_resume(drm);
DRM_DEBUG_DRIVER("mtk_drm_sys_resume\n");
- return 0;
+
+ return ret;
}
#endif
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 74cdde2ee474..ac6af4bd9df6 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -42,29 +42,10 @@ static const struct pci_device_id pciidlist[] = {
MODULE_DEVICE_TABLE(pci, pciidlist);
-static void mgag200_kick_out_firmware_fb(struct pci_dev *pdev)
-{
- struct apertures_struct *ap;
- bool primary = false;
-
- ap = alloc_apertures(1);
- if (!ap)
- return;
-
- ap->ranges[0].base = pci_resource_start(pdev, 0);
- ap->ranges[0].size = pci_resource_len(pdev, 0);
-
-#ifdef CONFIG_X86
- primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
-#endif
- drm_fb_helper_remove_conflicting_framebuffers(ap, "mgag200drmfb", primary);
- kfree(ap);
-}
-
static int mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- mgag200_kick_out_firmware_fb(pdev);
+ drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "mgag200drmfb");
return drm_get_pci_dev(pdev, ent, &driver);
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index 780f983b0294..79d54103d470 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -124,20 +124,11 @@ static int mga_probe_vram(struct mga_device *mdev, void __iomem *mem)
static int mga_vram_init(struct mga_device *mdev)
{
void __iomem *mem;
- struct apertures_struct *aper = alloc_apertures(1);
- if (!aper)
- return -ENOMEM;
/* BAR 0 is VRAM */
mdev->mc.vram_base = pci_resource_start(mdev->dev->pdev, 0);
mdev->mc.vram_window = pci_resource_len(mdev->dev->pdev, 0);
- aper->ranges[0].base = mdev->mc.vram_base;
- aper->ranges[0].size = mdev->mc.vram_window;
-
- drm_fb_helper_remove_conflicting_framebuffers(aper, "mgafb", true);
- kfree(aper);
-
if (!devm_request_mem_region(mdev->dev->dev, mdev->mc.vram_base, mdev->mc.vram_window,
"mgadrmfb_vram")) {
DRM_ERROR("can't reserve VRAM\n");
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index b640e39ebaca..015341e2dd4c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -21,6 +21,8 @@
#include <linux/debugfs.h>
#include <linux/dma-buf.h>
+#include <drm/drm_atomic_uapi.h>
+
#include "msm_drv.h"
#include "dpu_kms.h"
#include "dpu_formats.h"
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index c1f1779c980f..4bcdeca7479d 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -15,6 +15,8 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <drm/drm_atomic_uapi.h>
+
#include "msm_drv.h"
#include "msm_gem.h"
#include "msm_kms.h"
diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c
index 349c12f670eb..77263cf97b20 100644
--- a/drivers/gpu/drm/msm/msm_fence.c
+++ b/drivers/gpu/drm/msm/msm_fence.c
@@ -119,11 +119,6 @@ static const char *msm_fence_get_timeline_name(struct dma_fence *fence)
return f->fctx->name;
}
-static bool msm_fence_enable_signaling(struct dma_fence *fence)
-{
- return true;
-}
-
static bool msm_fence_signaled(struct dma_fence *fence)
{
struct msm_fence *f = to_msm_fence(fence);
@@ -133,10 +128,7 @@ static bool msm_fence_signaled(struct dma_fence *fence)
static const struct dma_fence_ops msm_fence_ops = {
.get_driver_name = msm_fence_get_driver_name,
.get_timeline_name = msm_fence_get_timeline_name,
- .enable_signaling = msm_fence_enable_signaling,
.signaled = msm_fence_signaled,
- .wait = dma_fence_default_wait,
- .release = dma_fence_free,
};
struct dma_fence *
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 8412119bd940..a9bb656058e5 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -2174,7 +2174,7 @@ nv50_display_create(struct drm_device *dev)
nouveau_display(dev)->fini = nv50_display_fini;
disp->disp = &nouveau_display(dev)->disp;
dev->mode_config.funcs = &nv50_disp_func;
- dev->driver->driver_features |= DRIVER_PREFER_XBGR_30BPP;
+ dev->mode_config.quirk_addfb_prefer_xbgr_30bpp = true;
/* small shared memory area we use for notifiers and semaphores */
ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 51932c72334e..eb4f766b5958 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -400,8 +400,10 @@ nouveau_connector_destroy(struct drm_connector *connector)
kfree(nv_connector->edid);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
- if (nv_connector->aux.transfer)
+ if (nv_connector->aux.transfer) {
+ drm_dp_cec_unregister_connector(&nv_connector->aux);
drm_dp_aux_unregister(&nv_connector->aux);
+ }
kfree(connector);
}
@@ -608,6 +610,7 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
nouveau_connector_set_encoder(connector, nv_encoder);
conn_status = connector_status_connected;
+ drm_dp_cec_set_edid(&nv_connector->aux, nv_connector->edid);
goto out;
}
@@ -1108,11 +1111,14 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) {
NV_DEBUG(drm, "service %s\n", name);
+ drm_dp_cec_irq(&nv_connector->aux);
if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP)))
nv50_mstm_service(nv_encoder->dp.mstm);
} else {
bool plugged = (rep->mask != NVIF_NOTIFY_CONN_V0_UNPLUG);
+ if (!plugged)
+ drm_dp_cec_unset_edid(&nv_connector->aux);
NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name);
if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP))) {
if (!plugged)
@@ -1302,7 +1308,6 @@ nouveau_connector_create(struct drm_device *dev, int index)
kfree(nv_connector);
return ERR_PTR(ret);
}
-
funcs = &nouveau_connector_funcs;
break;
default:
@@ -1356,6 +1361,14 @@ nouveau_connector_create(struct drm_device *dev, int index)
break;
}
+ switch (type) {
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ case DRM_MODE_CONNECTOR_eDP:
+ drm_dp_cec_register_connector(&nv_connector->aux,
+ connector->name, dev->dev);
+ break;
+ }
+
ret = nvif_notify_init(&disp->disp.object, nouveau_connector_hotplug,
true, NV04_DISP_NTFY_CONN,
&(struct nvif_notify_conn_req_v0) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index dc7454e7f19a..0acc07555bcd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -32,6 +32,8 @@
#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
#include <drm/drm_dp_helper.h>
+#include <drm/drm_util.h>
+
#include "nouveau_crtc.h"
#include "nouveau_encoder.h"
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 844498c4267c..20a260887be3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -379,7 +379,6 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA |
FBINFO_HWACCEL_FILLRECT |
FBINFO_HWACCEL_IMAGEBLIT;
- info->flags |= FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &nouveau_fbcon_sw_ops;
info->fix.smem_start = fb->nvbo->bo.mem.bus.base +
fb->nvbo->bo.mem.bus.offset;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 412d49bc6e56..99be61ddeb75 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -526,6 +526,5 @@ static const struct dma_fence_ops nouveau_fence_ops_uevent = {
.get_timeline_name = nouveau_fence_get_timeline_name,
.enable_signaling = nouveau_fence_enable_signaling,
.signaled = nouveau_fence_is_signaled,
- .wait = dma_fence_default_wait,
.release = nouveau_fence_release
};
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
index 9eabd7201a12..28a3ce8f88d2 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
@@ -18,77 +18,27 @@
struct panel_drv_data {
struct omap_dss_device dssdev;
- struct omap_dss_device *in;
struct device *dev;
-
- struct videomode vm;
-};
-
-static const struct videomode tvc_pal_vm = {
- .hactive = 720,
- .vactive = 574,
- .pixelclock = 13500000,
- .hsync_len = 64,
- .hfront_porch = 12,
- .hback_porch = 68,
- .vsync_len = 5,
- .vfront_porch = 5,
- .vback_porch = 41,
-
- .flags = DISPLAY_FLAGS_INTERLACED | DISPLAY_FLAGS_HSYNC_LOW |
- DISPLAY_FLAGS_VSYNC_LOW,
};
#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
-static int tvc_connect(struct omap_dss_device *dssdev)
+static int tvc_connect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in;
- int r;
-
- dev_dbg(ddata->dev, "connect\n");
-
- if (omapdss_device_is_connected(dssdev))
- return 0;
-
- in = omapdss_of_find_source_for_first_ep(ddata->dev->of_node);
- if (IS_ERR(in)) {
- dev_err(ddata->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- r = in->ops.atv->connect(in, dssdev);
- if (r) {
- omap_dss_put_device(in);
- return r;
- }
-
- ddata->in = in;
return 0;
}
-static void tvc_disconnect(struct omap_dss_device *dssdev)
+static void tvc_disconnect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- dev_dbg(ddata->dev, "disconnect\n");
-
- if (!omapdss_device_is_connected(dssdev))
- return;
-
- in->ops.atv->disconnect(in, dssdev);
-
- omap_dss_put_device(in);
- ddata->in = NULL;
}
static int tvc_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
int r;
dev_dbg(ddata->dev, "enable\n");
@@ -99,9 +49,7 @@ static int tvc_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.atv->set_timings(in, &ddata->vm);
-
- r = in->ops.atv->enable(in);
+ r = src->ops->enable(src);
if (r)
return r;
@@ -113,83 +61,30 @@ static int tvc_enable(struct omap_dss_device *dssdev)
static void tvc_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
dev_dbg(ddata->dev, "disable\n");
if (!omapdss_device_is_enabled(dssdev))
return;
- in->ops.atv->disable(in);
+ src->ops->disable(src);
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void tvc_set_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- ddata->vm = *vm;
- dssdev->panel.vm = *vm;
-
- in->ops.atv->set_timings(in, vm);
-}
-
-static void tvc_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- *vm = ddata->vm;
-}
-
-static int tvc_check_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- return in->ops.atv->check_timings(in, vm);
-}
-
-static u32 tvc_get_wss(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- return in->ops.atv->get_wss(in);
-}
-
-static int tvc_set_wss(struct omap_dss_device *dssdev, u32 wss)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- return in->ops.atv->set_wss(in, wss);
-}
-
-static struct omap_dss_driver tvc_driver = {
+static const struct omap_dss_device_ops tvc_ops = {
.connect = tvc_connect,
.disconnect = tvc_disconnect,
.enable = tvc_enable,
.disable = tvc_disable,
-
- .set_timings = tvc_set_timings,
- .get_timings = tvc_get_timings,
- .check_timings = tvc_check_timings,
-
- .get_wss = tvc_get_wss,
- .set_wss = tvc_set_wss,
};
static int tvc_probe(struct platform_device *pdev)
{
struct panel_drv_data *ddata;
struct omap_dss_device *dssdev;
- int r;
ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
if (!ddata)
@@ -198,20 +93,15 @@ static int tvc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ddata);
ddata->dev = &pdev->dev;
- ddata->vm = tvc_pal_vm;
-
dssdev = &ddata->dssdev;
- dssdev->driver = &tvc_driver;
+ dssdev->ops = &tvc_ops;
dssdev->dev = &pdev->dev;
dssdev->type = OMAP_DISPLAY_TYPE_VENC;
dssdev->owner = THIS_MODULE;
- dssdev->panel.vm = tvc_pal_vm;
+ dssdev->of_ports = BIT(0);
- r = omapdss_register_display(dssdev);
- if (r) {
- dev_err(&pdev->dev, "Failed to register panel\n");
- return r;
- }
+ omapdss_display_init(dssdev);
+ omapdss_device_register(dssdev);
return 0;
}
@@ -221,10 +111,9 @@ static int __exit tvc_remove(struct platform_device *pdev)
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- omapdss_unregister_display(&ddata->dssdev);
+ omapdss_device_unregister(&ddata->dssdev);
tvc_disable(dssdev);
- tvc_disconnect(dssdev);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
index 6d8cbd9e2110..24b14f44248e 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
@@ -19,30 +19,8 @@
#include "../dss/omapdss.h"
-static const struct videomode dvic_default_vm = {
- .hactive = 640,
- .vactive = 480,
-
- .pixelclock = 23500000,
-
- .hfront_porch = 48,
- .hsync_len = 32,
- .hback_porch = 80,
-
- .vfront_porch = 3,
- .vsync_len = 4,
- .vback_porch = 7,
-
- .flags = DISPLAY_FLAGS_HSYNC_HIGH | DISPLAY_FLAGS_VSYNC_HIGH |
- DISPLAY_FLAGS_SYNC_NEGEDGE | DISPLAY_FLAGS_DE_HIGH |
- DISPLAY_FLAGS_PIXDATA_POSEDGE,
-};
-
struct panel_drv_data {
struct omap_dss_device dssdev;
- struct omap_dss_device *in;
-
- struct videomode vm;
struct i2c_adapter *i2c_adapter;
@@ -57,49 +35,20 @@ struct panel_drv_data {
#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
-static int dvic_connect(struct omap_dss_device *dssdev)
+static int dvic_connect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in;
- int r;
-
- if (omapdss_device_is_connected(dssdev))
- return 0;
-
- in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
- if (IS_ERR(in)) {
- dev_err(dssdev->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- r = in->ops.dvi->connect(in, dssdev);
- if (r) {
- omap_dss_put_device(in);
- return r;
- }
-
- ddata->in = in;
return 0;
}
-static void dvic_disconnect(struct omap_dss_device *dssdev)
+static void dvic_disconnect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- if (!omapdss_device_is_connected(dssdev))
- return;
-
- in->ops.dvi->disconnect(in, dssdev);
-
- omap_dss_put_device(in);
- ddata->in = NULL;
}
static int dvic_enable(struct omap_dss_device *dssdev)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
int r;
if (!omapdss_device_is_connected(dssdev))
@@ -108,9 +57,7 @@ static int dvic_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.dvi->set_timings(in, &ddata->vm);
-
- r = in->ops.dvi->enable(in);
+ r = src->ops->enable(src);
if (r)
return r;
@@ -121,46 +68,16 @@ static int dvic_enable(struct omap_dss_device *dssdev)
static void dvic_disable(struct omap_dss_device *dssdev)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
if (!omapdss_device_is_enabled(dssdev))
return;
- in->ops.dvi->disable(in);
+ src->ops->disable(src);
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void dvic_set_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- ddata->vm = *vm;
- dssdev->panel.vm = *vm;
-
- in->ops.dvi->set_timings(in, vm);
-}
-
-static void dvic_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- *vm = ddata->vm;
-}
-
-static int dvic_check_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- return in->ops.dvi->check_timings(in, vm);
-}
-
static int dvic_ddc_read(struct i2c_adapter *adapter,
unsigned char *buf, u16 count, u8 offset)
{
@@ -198,12 +115,6 @@ static int dvic_read_edid(struct omap_dss_device *dssdev,
struct panel_drv_data *ddata = to_panel_data(dssdev);
int r, l, bytes_read;
- if (ddata->hpd_gpio && !gpiod_get_value_cansleep(ddata->hpd_gpio))
- return -ENODEV;
-
- if (!ddata->i2c_adapter)
- return -ENODEV;
-
l = min(EDID_LENGTH, len);
r = dvic_ddc_read(ddata->i2c_adapter, edid, l, 0);
if (r)
@@ -243,78 +154,41 @@ static bool dvic_detect(struct omap_dss_device *dssdev)
return r == 0;
}
-static int dvic_register_hpd_cb(struct omap_dss_device *dssdev,
+static void dvic_register_hpd_cb(struct omap_dss_device *dssdev,
void (*cb)(void *cb_data,
enum drm_connector_status status),
void *cb_data)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- if (!ddata->hpd_gpio)
- return -ENOTSUPP;
-
mutex_lock(&ddata->hpd_lock);
ddata->hpd_cb = cb;
ddata->hpd_cb_data = cb_data;
mutex_unlock(&ddata->hpd_lock);
- return 0;
}
static void dvic_unregister_hpd_cb(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- if (!ddata->hpd_gpio)
- return;
-
mutex_lock(&ddata->hpd_lock);
ddata->hpd_cb = NULL;
ddata->hpd_cb_data = NULL;
mutex_unlock(&ddata->hpd_lock);
}
-static void dvic_enable_hpd(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- if (!ddata->hpd_gpio)
- return;
-
- mutex_lock(&ddata->hpd_lock);
- ddata->hpd_enabled = true;
- mutex_unlock(&ddata->hpd_lock);
-}
-
-static void dvic_disable_hpd(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- if (!ddata->hpd_gpio)
- return;
-
- mutex_lock(&ddata->hpd_lock);
- ddata->hpd_enabled = false;
- mutex_unlock(&ddata->hpd_lock);
-}
-
-static struct omap_dss_driver dvic_driver = {
+static const struct omap_dss_device_ops dvic_ops = {
.connect = dvic_connect,
.disconnect = dvic_disconnect,
.enable = dvic_enable,
.disable = dvic_disable,
- .set_timings = dvic_set_timings,
- .get_timings = dvic_get_timings,
- .check_timings = dvic_check_timings,
-
.read_edid = dvic_read_edid,
.detect = dvic_detect,
.register_hpd_cb = dvic_register_hpd_cb,
.unregister_hpd_cb = dvic_unregister_hpd_cb,
- .enable_hpd = dvic_enable_hpd,
- .disable_hpd = dvic_disable_hpd,
};
static irqreturn_t dvic_hpd_isr(int irq, void *data)
@@ -396,28 +270,24 @@ static int dvic_probe(struct platform_device *pdev)
if (r)
return r;
- ddata->vm = dvic_default_vm;
-
dssdev = &ddata->dssdev;
- dssdev->driver = &dvic_driver;
+ dssdev->ops = &dvic_ops;
dssdev->dev = &pdev->dev;
dssdev->type = OMAP_DISPLAY_TYPE_DVI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.vm = dvic_default_vm;
+ dssdev->of_ports = BIT(0);
- r = omapdss_register_display(dssdev);
- if (r) {
- dev_err(&pdev->dev, "Failed to register panel\n");
- goto err_reg;
- }
-
- return 0;
+ if (ddata->hpd_gpio)
+ dssdev->ops_flags |= OMAP_DSS_DEVICE_OP_DETECT
+ | OMAP_DSS_DEVICE_OP_HPD;
+ if (ddata->i2c_adapter)
+ dssdev->ops_flags |= OMAP_DSS_DEVICE_OP_DETECT
+ | OMAP_DSS_DEVICE_OP_EDID;
-err_reg:
- i2c_put_adapter(ddata->i2c_adapter);
- mutex_destroy(&ddata->hpd_lock);
+ omapdss_display_init(dssdev);
+ omapdss_device_register(dssdev);
- return r;
+ return 0;
}
static int __exit dvic_remove(struct platform_device *pdev)
@@ -425,10 +295,9 @@ static int __exit dvic_remove(struct platform_device *pdev)
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- omapdss_unregister_display(&ddata->dssdev);
+ omapdss_device_unregister(&ddata->dssdev);
dvic_disable(dssdev);
- dvic_disconnect(dssdev);
i2c_put_adapter(ddata->i2c_adapter);
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
index ca30ed9da7eb..e602fa4a50a4 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
@@ -10,95 +10,41 @@
*/
#include <linux/gpio/consumer.h>
-#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/mutex.h>
-
-#include <drm/drm_edid.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
#include "../dss/omapdss.h"
-static const struct videomode hdmic_default_vm = {
- .hactive = 640,
- .vactive = 480,
- .pixelclock = 25175000,
- .hsync_len = 96,
- .hfront_porch = 16,
- .hback_porch = 48,
- .vsync_len = 2,
- .vfront_porch = 11,
- .vback_porch = 31,
-
- .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
-};
-
struct panel_drv_data {
struct omap_dss_device dssdev;
- struct omap_dss_device *in;
void (*hpd_cb)(void *cb_data, enum drm_connector_status status);
void *hpd_cb_data;
- bool hpd_enabled;
struct mutex hpd_lock;
struct device *dev;
- struct videomode vm;
-
- int hpd_gpio;
+ struct gpio_desc *hpd_gpio;
};
#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
-static int hdmic_connect(struct omap_dss_device *dssdev)
+static int hdmic_connect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in;
- int r;
-
- dev_dbg(ddata->dev, "connect\n");
-
- if (omapdss_device_is_connected(dssdev))
- return 0;
-
- in = omapdss_of_find_source_for_first_ep(ddata->dev->of_node);
- if (IS_ERR(in)) {
- dev_err(ddata->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- r = in->ops.hdmi->connect(in, dssdev);
- if (r) {
- omap_dss_put_device(in);
- return r;
- }
-
- ddata->in = in;
return 0;
}
-static void hdmic_disconnect(struct omap_dss_device *dssdev)
+static void hdmic_disconnect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- dev_dbg(ddata->dev, "disconnect\n");
-
- if (!omapdss_device_is_connected(dssdev))
- return;
-
- in->ops.hdmi->disconnect(in, dssdev);
-
- omap_dss_put_device(in);
- ddata->in = NULL;
}
static int hdmic_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
int r;
dev_dbg(ddata->dev, "enable\n");
@@ -109,9 +55,7 @@ static int hdmic_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.hdmi->set_timings(in, &ddata->vm);
-
- r = in->ops.hdmi->enable(in);
+ r = src->ops->enable(src);
if (r)
return r;
@@ -123,171 +67,58 @@ static int hdmic_enable(struct omap_dss_device *dssdev)
static void hdmic_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
dev_dbg(ddata->dev, "disable\n");
if (!omapdss_device_is_enabled(dssdev))
return;
- in->ops.hdmi->disable(in);
+ src->ops->disable(src);
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void hdmic_set_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- ddata->vm = *vm;
- dssdev->panel.vm = *vm;
-
- in->ops.hdmi->set_timings(in, vm);
-}
-
-static void hdmic_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- *vm = ddata->vm;
-}
-
-static int hdmic_check_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- return in->ops.hdmi->check_timings(in, vm);
-}
-
-static int hdmic_read_edid(struct omap_dss_device *dssdev,
- u8 *edid, int len)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- return in->ops.hdmi->read_edid(in, edid, len);
-}
-
static bool hdmic_detect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
- bool connected;
-
- if (gpio_is_valid(ddata->hpd_gpio))
- connected = gpio_get_value_cansleep(ddata->hpd_gpio);
- else
- connected = in->ops.hdmi->detect(in);
- if (!connected && in->ops.hdmi->lost_hotplug)
- in->ops.hdmi->lost_hotplug(in);
- return connected;
+
+ return gpiod_get_value_cansleep(ddata->hpd_gpio);
}
-static int hdmic_register_hpd_cb(struct omap_dss_device *dssdev,
- void (*cb)(void *cb_data,
+static void hdmic_register_hpd_cb(struct omap_dss_device *dssdev,
+ void (*cb)(void *cb_data,
enum drm_connector_status status),
- void *cb_data)
+ void *cb_data)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- if (gpio_is_valid(ddata->hpd_gpio)) {
- mutex_lock(&ddata->hpd_lock);
- ddata->hpd_cb = cb;
- ddata->hpd_cb_data = cb_data;
- mutex_unlock(&ddata->hpd_lock);
- return 0;
- } else if (in->ops.hdmi->register_hpd_cb) {
- return in->ops.hdmi->register_hpd_cb(in, cb, cb_data);
- }
- return -ENOTSUPP;
+ mutex_lock(&ddata->hpd_lock);
+ ddata->hpd_cb = cb;
+ ddata->hpd_cb_data = cb_data;
+ mutex_unlock(&ddata->hpd_lock);
}
static void hdmic_unregister_hpd_cb(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- if (gpio_is_valid(ddata->hpd_gpio)) {
- mutex_lock(&ddata->hpd_lock);
- ddata->hpd_cb = NULL;
- ddata->hpd_cb_data = NULL;
- mutex_unlock(&ddata->hpd_lock);
- } else if (in->ops.hdmi->unregister_hpd_cb) {
- in->ops.hdmi->unregister_hpd_cb(in);
- }
-}
-
-static void hdmic_enable_hpd(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- if (gpio_is_valid(ddata->hpd_gpio)) {
- mutex_lock(&ddata->hpd_lock);
- ddata->hpd_enabled = true;
- mutex_unlock(&ddata->hpd_lock);
- } else if (in->ops.hdmi->enable_hpd) {
- in->ops.hdmi->enable_hpd(in);
- }
-}
-
-static void hdmic_disable_hpd(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- if (gpio_is_valid(ddata->hpd_gpio)) {
- mutex_lock(&ddata->hpd_lock);
- ddata->hpd_enabled = false;
- mutex_unlock(&ddata->hpd_lock);
- } else if (in->ops.hdmi->disable_hpd) {
- in->ops.hdmi->disable_hpd(in);
- }
-}
-
-static int hdmic_set_hdmi_mode(struct omap_dss_device *dssdev, bool hdmi_mode)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- return in->ops.hdmi->set_hdmi_mode(in, hdmi_mode);
-}
-
-static int hdmic_set_infoframe(struct omap_dss_device *dssdev,
- const struct hdmi_avi_infoframe *avi)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
- return in->ops.hdmi->set_infoframe(in, avi);
+ mutex_lock(&ddata->hpd_lock);
+ ddata->hpd_cb = NULL;
+ ddata->hpd_cb_data = NULL;
+ mutex_unlock(&ddata->hpd_lock);
}
-static struct omap_dss_driver hdmic_driver = {
+static const struct omap_dss_device_ops hdmic_ops = {
.connect = hdmic_connect,
.disconnect = hdmic_disconnect,
.enable = hdmic_enable,
.disable = hdmic_disable,
- .set_timings = hdmic_set_timings,
- .get_timings = hdmic_get_timings,
- .check_timings = hdmic_check_timings,
-
- .read_edid = hdmic_read_edid,
.detect = hdmic_detect,
.register_hpd_cb = hdmic_register_hpd_cb,
.unregister_hpd_cb = hdmic_unregister_hpd_cb,
- .enable_hpd = hdmic_enable_hpd,
- .disable_hpd = hdmic_disable_hpd,
- .set_hdmi_mode = hdmic_set_hdmi_mode,
- .set_hdmi_infoframe = hdmic_set_infoframe,
};
static irqreturn_t hdmic_hpd_isr(int irq, void *data)
@@ -295,7 +126,7 @@ static irqreturn_t hdmic_hpd_isr(int irq, void *data)
struct panel_drv_data *ddata = data;
mutex_lock(&ddata->hpd_lock);
- if (ddata->hpd_enabled && ddata->hpd_cb) {
+ if (ddata->hpd_cb) {
enum drm_connector_status status;
if (hdmic_detect(&ddata->dssdev))
@@ -310,26 +141,11 @@ static irqreturn_t hdmic_hpd_isr(int irq, void *data)
return IRQ_HANDLED;
}
-static int hdmic_probe_of(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct device_node *node = pdev->dev.of_node;
- int gpio;
-
- /* HPD GPIO */
- gpio = of_get_named_gpio(node, "hpd-gpios", 0);
- if (gpio_is_valid(gpio))
- ddata->hpd_gpio = gpio;
- else
- ddata->hpd_gpio = -ENODEV;
-
- return 0;
-}
-
static int hdmic_probe(struct platform_device *pdev)
{
struct panel_drv_data *ddata;
struct omap_dss_device *dssdev;
+ struct gpio_desc *gpio;
int r;
ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
@@ -339,20 +155,20 @@ static int hdmic_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ddata);
ddata->dev = &pdev->dev;
- r = hdmic_probe_of(pdev);
- if (r)
- return r;
-
mutex_init(&ddata->hpd_lock);
- if (gpio_is_valid(ddata->hpd_gpio)) {
- r = devm_gpio_request_one(&pdev->dev, ddata->hpd_gpio,
- GPIOF_DIR_IN, "hdmi_hpd");
- if (r)
- return r;
+ /* HPD GPIO */
+ gpio = devm_gpiod_get_optional(&pdev->dev, "hpd", GPIOD_IN);
+ if (IS_ERR(gpio)) {
+ dev_err(&pdev->dev, "failed to parse HPD gpio\n");
+ return PTR_ERR(gpio);
+ }
+
+ ddata->hpd_gpio = gpio;
+ if (ddata->hpd_gpio) {
r = devm_request_threaded_irq(&pdev->dev,
- gpio_to_irq(ddata->hpd_gpio),
+ gpiod_to_irq(ddata->hpd_gpio),
NULL, hdmic_hpd_isr,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
IRQF_ONESHOT,
@@ -361,20 +177,18 @@ static int hdmic_probe(struct platform_device *pdev)
return r;
}
- ddata->vm = hdmic_default_vm;
-
dssdev = &ddata->dssdev;
- dssdev->driver = &hdmic_driver;
+ dssdev->ops = &hdmic_ops;
dssdev->dev = &pdev->dev;
dssdev->type = OMAP_DISPLAY_TYPE_HDMI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.vm = hdmic_default_vm;
+ dssdev->of_ports = BIT(0);
+ dssdev->ops_flags = ddata->hpd_gpio
+ ? OMAP_DSS_DEVICE_OP_DETECT | OMAP_DSS_DEVICE_OP_HPD
+ : 0;
- r = omapdss_register_display(dssdev);
- if (r) {
- dev_err(&pdev->dev, "Failed to register panel\n");
- return r;
- }
+ omapdss_display_init(dssdev);
+ omapdss_device_register(dssdev);
return 0;
}
@@ -384,10 +198,9 @@ static int __exit hdmic_remove(struct platform_device *pdev)
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- omapdss_unregister_display(&ddata->dssdev);
+ omapdss_device_unregister(&ddata->dssdev);
hdmic_disable(dssdev);
- hdmic_disconnect(dssdev);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
index afee1b8b457a..4fefd80f53bb 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
@@ -23,75 +23,28 @@
struct panel_drv_data {
struct omap_dss_device dssdev;
- struct omap_dss_device *in;
struct gpio_desc *enable_gpio;
-
- struct videomode vm;
};
#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
-static int opa362_connect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst)
+static int opa362_connect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in;
- int r;
-
- dev_dbg(dssdev->dev, "connect\n");
-
- if (omapdss_device_is_connected(dssdev))
- return -EBUSY;
-
- in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
- if (IS_ERR(in)) {
- dev_err(dssdev->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- r = in->ops.atv->connect(in, dssdev);
- if (r) {
- omap_dss_put_device(in);
- return r;
- }
-
- dst->src = dssdev;
- dssdev->dst = dst;
-
- ddata->in = in;
- return 0;
+ return omapdss_device_connect(dst->dss, dst, dst->next);
}
-static void opa362_disconnect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst)
+static void opa362_disconnect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- dev_dbg(dssdev->dev, "disconnect\n");
-
- WARN_ON(!omapdss_device_is_connected(dssdev));
- if (!omapdss_device_is_connected(dssdev))
- return;
-
- WARN_ON(dst != dssdev->dst);
- if (dst != dssdev->dst)
- return;
-
- dst->src = NULL;
- dssdev->dst = NULL;
-
- in->ops.atv->disconnect(in, &ddata->dssdev);
-
- omap_dss_put_device(in);
- ddata->in = NULL;
+ omapdss_device_disconnect(dst, dst->next);
}
static int opa362_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
int r;
dev_dbg(dssdev->dev, "enable\n");
@@ -102,9 +55,7 @@ static int opa362_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.atv->set_timings(in, &ddata->vm);
-
- r = in->ops.atv->enable(in);
+ r = src->ops->enable(src);
if (r)
return r;
@@ -119,7 +70,7 @@ static int opa362_enable(struct omap_dss_device *dssdev)
static void opa362_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
dev_dbg(dssdev->dev, "disable\n");
@@ -129,56 +80,16 @@ static void opa362_disable(struct omap_dss_device *dssdev)
if (ddata->enable_gpio)
gpiod_set_value_cansleep(ddata->enable_gpio, 0);
- in->ops.atv->disable(in);
+ src->ops->disable(src);
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void opa362_set_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- dev_dbg(dssdev->dev, "set_timings\n");
-
- ddata->vm = *vm;
- dssdev->panel.vm = *vm;
-
- in->ops.atv->set_timings(in, vm);
-}
-
-static void opa362_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- dev_dbg(dssdev->dev, "get_timings\n");
-
- *vm = ddata->vm;
-}
-
-static int opa362_check_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- dev_dbg(dssdev->dev, "check_timings\n");
-
- return in->ops.atv->check_timings(in, vm);
-}
-
-static const struct omapdss_atv_ops opa362_atv_ops = {
+static const struct omap_dss_device_ops opa362_ops = {
.connect = opa362_connect,
.disconnect = opa362_disconnect,
-
.enable = opa362_enable,
.disable = opa362_disable,
-
- .check_timings = opa362_check_timings,
- .set_timings = opa362_set_timings,
- .get_timings = opa362_get_timings,
};
static int opa362_probe(struct platform_device *pdev)
@@ -186,7 +97,6 @@ static int opa362_probe(struct platform_device *pdev)
struct panel_drv_data *ddata;
struct omap_dss_device *dssdev;
struct gpio_desc *gpio;
- int r;
dev_dbg(&pdev->dev, "probe\n");
@@ -203,18 +113,22 @@ static int opa362_probe(struct platform_device *pdev)
ddata->enable_gpio = gpio;
dssdev = &ddata->dssdev;
- dssdev->ops.atv = &opa362_atv_ops;
+ dssdev->ops = &opa362_ops;
dssdev->dev = &pdev->dev;
dssdev->type = OMAP_DISPLAY_TYPE_VENC;
dssdev->output_type = OMAP_DISPLAY_TYPE_VENC;
dssdev->owner = THIS_MODULE;
+ dssdev->of_ports = BIT(1) | BIT(0);
- r = omapdss_register_output(dssdev);
- if (r) {
- dev_err(&pdev->dev, "Failed to register output\n");
- return r;
+ dssdev->next = omapdss_of_find_connected_device(pdev->dev.of_node, 1);
+ if (IS_ERR(dssdev->next)) {
+ if (PTR_ERR(dssdev->next) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to find video sink\n");
+ return PTR_ERR(dssdev->next);
}
+ omapdss_device_register(dssdev);
+
return 0;
}
@@ -223,7 +137,9 @@ static int __exit opa362_remove(struct platform_device *pdev)
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- omapdss_unregister_output(&ddata->dssdev);
+ if (dssdev->next)
+ omapdss_device_put(dssdev->next);
+ omapdss_device_unregister(&ddata->dssdev);
WARN_ON(omapdss_device_is_enabled(dssdev));
if (omapdss_device_is_enabled(dssdev))
@@ -231,7 +147,7 @@ static int __exit opa362_remove(struct platform_device *pdev)
WARN_ON(omapdss_device_is_connected(dssdev));
if (omapdss_device_is_connected(dssdev))
- opa362_disconnect(dssdev, dssdev->dst);
+ omapdss_device_disconnect(NULL, dssdev);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
index ed7ae384c3ed..f1a748353279 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
@@ -13,77 +13,33 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/of_gpio.h>
#include "../dss/omapdss.h"
struct panel_drv_data {
struct omap_dss_device dssdev;
- struct omap_dss_device *in;
- int pd_gpio;
-
- struct videomode vm;
+ struct gpio_desc *pd_gpio;
};
#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
-static int tfp410_connect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst)
+static int tfp410_connect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in;
- int r;
-
- if (omapdss_device_is_connected(dssdev))
- return -EBUSY;
-
- in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
- if (IS_ERR(in)) {
- dev_err(dssdev->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- r = in->ops.dpi->connect(in, dssdev);
- if (r) {
- omap_dss_put_device(in);
- return r;
- }
-
- dst->src = dssdev;
- dssdev->dst = dst;
-
- ddata->in = in;
- return 0;
+ return omapdss_device_connect(dst->dss, dst, dst->next);
}
-static void tfp410_disconnect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst)
+static void tfp410_disconnect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- WARN_ON(!omapdss_device_is_connected(dssdev));
- if (!omapdss_device_is_connected(dssdev))
- return;
-
- WARN_ON(dst != dssdev->dst);
- if (dst != dssdev->dst)
- return;
-
- dst->src = NULL;
- dssdev->dst = NULL;
-
- in->ops.dpi->disconnect(in, &ddata->dssdev);
-
- omap_dss_put_device(in);
- ddata->in = NULL;
+ omapdss_device_disconnect(dst, dst->next);
}
static int tfp410_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
int r;
if (!omapdss_device_is_connected(dssdev))
@@ -92,14 +48,12 @@ static int tfp410_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.dpi->set_timings(in, &ddata->vm);
-
- r = in->ops.dpi->enable(in);
+ r = src->ops->enable(src);
if (r)
return r;
- if (gpio_is_valid(ddata->pd_gpio))
- gpio_set_value_cansleep(ddata->pd_gpio, 1);
+ if (ddata->pd_gpio)
+ gpiod_set_value_cansleep(ddata->pd_gpio, 0);
dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
@@ -109,94 +63,31 @@ static int tfp410_enable(struct omap_dss_device *dssdev)
static void tfp410_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
if (!omapdss_device_is_enabled(dssdev))
return;
- if (gpio_is_valid(ddata->pd_gpio))
- gpio_set_value_cansleep(ddata->pd_gpio, 0);
+ if (ddata->pd_gpio)
+ gpiod_set_value_cansleep(ddata->pd_gpio, 0);
- in->ops.dpi->disable(in);
+ src->ops->disable(src);
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void tfp410_fix_timings(struct videomode *vm)
-{
- vm->flags |= DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE |
- DISPLAY_FLAGS_SYNC_POSEDGE;
-}
-
-static void tfp410_set_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- tfp410_fix_timings(vm);
-
- ddata->vm = *vm;
- dssdev->panel.vm = *vm;
-
- in->ops.dpi->set_timings(in, vm);
-}
-
-static void tfp410_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- *vm = ddata->vm;
-}
-
-static int tfp410_check_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- tfp410_fix_timings(vm);
-
- return in->ops.dpi->check_timings(in, vm);
-}
-
-static const struct omapdss_dvi_ops tfp410_dvi_ops = {
+static const struct omap_dss_device_ops tfp410_ops = {
.connect = tfp410_connect,
.disconnect = tfp410_disconnect,
-
.enable = tfp410_enable,
.disable = tfp410_disable,
-
- .check_timings = tfp410_check_timings,
- .set_timings = tfp410_set_timings,
- .get_timings = tfp410_get_timings,
};
-static int tfp410_probe_of(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct device_node *node = pdev->dev.of_node;
- int gpio;
-
- gpio = of_get_named_gpio(node, "powerdown-gpios", 0);
-
- if (gpio_is_valid(gpio) || gpio == -ENOENT) {
- ddata->pd_gpio = gpio;
- } else {
- if (gpio != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to parse PD gpio\n");
- return gpio;
- }
-
- return 0;
-}
-
static int tfp410_probe(struct platform_device *pdev)
{
struct panel_drv_data *ddata;
struct omap_dss_device *dssdev;
- int r;
+ struct gpio_desc *gpio;
ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
if (!ddata)
@@ -204,34 +95,34 @@ static int tfp410_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ddata);
- r = tfp410_probe_of(pdev);
- if (r)
- return r;
-
- if (gpio_is_valid(ddata->pd_gpio)) {
- r = devm_gpio_request_one(&pdev->dev, ddata->pd_gpio,
- GPIOF_OUT_INIT_LOW, "tfp410 PD");
- if (r) {
- dev_err(&pdev->dev, "Failed to request PD GPIO %d\n",
- ddata->pd_gpio);
- return r;
- }
+ /* Powerdown GPIO */
+ gpio = devm_gpiod_get_optional(&pdev->dev, "powerdown", GPIOD_OUT_HIGH);
+ if (IS_ERR(gpio)) {
+ dev_err(&pdev->dev, "failed to parse powerdown gpio\n");
+ return PTR_ERR(gpio);
}
+ ddata->pd_gpio = gpio;
+
dssdev = &ddata->dssdev;
- dssdev->ops.dvi = &tfp410_dvi_ops;
+ dssdev->ops = &tfp410_ops;
dssdev->dev = &pdev->dev;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->output_type = OMAP_DISPLAY_TYPE_DVI;
dssdev->owner = THIS_MODULE;
- dssdev->port_num = 1;
-
- r = omapdss_register_output(dssdev);
- if (r) {
- dev_err(&pdev->dev, "Failed to register output\n");
- return r;
+ dssdev->of_ports = BIT(1) | BIT(0);
+ dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_POSEDGE
+ | DRM_BUS_FLAG_PIXDATA_POSEDGE;
+
+ dssdev->next = omapdss_of_find_connected_device(pdev->dev.of_node, 1);
+ if (IS_ERR(dssdev->next)) {
+ if (PTR_ERR(dssdev->next) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to find video sink\n");
+ return PTR_ERR(dssdev->next);
}
+ omapdss_device_register(dssdev);
+
return 0;
}
@@ -240,7 +131,9 @@ static int __exit tfp410_remove(struct platform_device *pdev)
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- omapdss_unregister_output(&ddata->dssdev);
+ if (dssdev->next)
+ omapdss_device_put(dssdev->next);
+ omapdss_device_unregister(&ddata->dssdev);
WARN_ON(omapdss_device_is_enabled(dssdev));
if (omapdss_device_is_enabled(dssdev))
@@ -248,7 +141,7 @@ static int __exit tfp410_remove(struct platform_device *pdev)
WARN_ON(omapdss_device_is_connected(dssdev));
if (omapdss_device_is_connected(dssdev))
- tfp410_disconnect(dssdev, dssdev->dst);
+ omapdss_device_disconnect(NULL, dssdev);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
index d275bf152da5..94de55fd8884 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
@@ -21,42 +21,26 @@
struct panel_drv_data {
struct omap_dss_device dssdev;
- struct omap_dss_device *in;
void (*hpd_cb)(void *cb_data, enum drm_connector_status status);
void *hpd_cb_data;
- bool hpd_enabled;
struct mutex hpd_lock;
struct gpio_desc *ct_cp_hpd_gpio;
struct gpio_desc *ls_oe_gpio;
struct gpio_desc *hpd_gpio;
-
- struct videomode vm;
};
#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
-static int tpd_connect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst)
+static int tpd_connect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in;
+ struct panel_drv_data *ddata = to_panel_data(dst);
int r;
- in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
- if (IS_ERR(in)) {
- dev_err(dssdev->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- r = in->ops.hdmi->connect(in, dssdev);
- if (r) {
- omap_dss_put_device(in);
+ r = omapdss_device_connect(dst->dss, dst, dst->next);
+ if (r)
return r;
- }
-
- dst->src = dssdev;
- dssdev->dst = dst;
gpiod_set_value_cansleep(ddata->ct_cp_hpd_gpio, 1);
gpiod_set_value_cansleep(ddata->ls_oe_gpio, 1);
@@ -64,45 +48,29 @@ static int tpd_connect(struct omap_dss_device *dssdev,
/* DC-DC converter needs at max 300us to get to 90% of 5V */
udelay(300);
- ddata->in = in;
return 0;
}
-static void tpd_disconnect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst)
+static void tpd_disconnect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- WARN_ON(dst != dssdev->dst);
-
- if (dst != dssdev->dst)
- return;
+ struct panel_drv_data *ddata = to_panel_data(dst);
gpiod_set_value_cansleep(ddata->ct_cp_hpd_gpio, 0);
gpiod_set_value_cansleep(ddata->ls_oe_gpio, 0);
- dst->src = NULL;
- dssdev->dst = NULL;
-
- in->ops.hdmi->disconnect(in, &ddata->dssdev);
-
- omap_dss_put_device(in);
- ddata->in = NULL;
+ omapdss_device_disconnect(dst, dst->next);
}
static int tpd_enable(struct omap_dss_device *dssdev)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
int r;
if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
return 0;
- in->ops.hdmi->set_timings(in, &ddata->vm);
-
- r = in->ops.hdmi->enable(in);
+ r = src->ops->enable(src);
if (r)
return r;
@@ -113,76 +81,27 @@ static int tpd_enable(struct omap_dss_device *dssdev)
static void tpd_disable(struct omap_dss_device *dssdev)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
return;
- in->ops.hdmi->disable(in);
+ src->ops->disable(src);
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void tpd_set_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- ddata->vm = *vm;
- dssdev->panel.vm = *vm;
-
- in->ops.hdmi->set_timings(in, vm);
-}
-
-static void tpd_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- *vm = ddata->vm;
-}
-
-static int tpd_check_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
- int r;
-
- r = in->ops.hdmi->check_timings(in, vm);
-
- return r;
-}
-
-static int tpd_read_edid(struct omap_dss_device *dssdev,
- u8 *edid, int len)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- if (!gpiod_get_value_cansleep(ddata->hpd_gpio))
- return -ENODEV;
-
- return in->ops.hdmi->read_edid(in, edid, len);
-}
-
static bool tpd_detect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
- bool connected = gpiod_get_value_cansleep(ddata->hpd_gpio);
- if (!connected && in->ops.hdmi->lost_hotplug)
- in->ops.hdmi->lost_hotplug(in);
- return connected;
+ return gpiod_get_value_cansleep(ddata->hpd_gpio);
}
-static int tpd_register_hpd_cb(struct omap_dss_device *dssdev,
- void (*cb)(void *cb_data,
+static void tpd_register_hpd_cb(struct omap_dss_device *dssdev,
+ void (*cb)(void *cb_data,
enum drm_connector_status status),
- void *cb_data)
+ void *cb_data)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
@@ -190,8 +109,6 @@ static int tpd_register_hpd_cb(struct omap_dss_device *dssdev,
ddata->hpd_cb = cb;
ddata->hpd_cb_data = cb_data;
mutex_unlock(&ddata->hpd_lock);
-
- return 0;
}
static void tpd_unregister_hpd_cb(struct omap_dss_device *dssdev)
@@ -204,61 +121,14 @@ static void tpd_unregister_hpd_cb(struct omap_dss_device *dssdev)
mutex_unlock(&ddata->hpd_lock);
}
-static void tpd_enable_hpd(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- mutex_lock(&ddata->hpd_lock);
- ddata->hpd_enabled = true;
- mutex_unlock(&ddata->hpd_lock);
-}
-
-static void tpd_disable_hpd(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- mutex_lock(&ddata->hpd_lock);
- ddata->hpd_enabled = false;
- mutex_unlock(&ddata->hpd_lock);
-}
-
-static int tpd_set_infoframe(struct omap_dss_device *dssdev,
- const struct hdmi_avi_infoframe *avi)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- return in->ops.hdmi->set_infoframe(in, avi);
-}
-
-static int tpd_set_hdmi_mode(struct omap_dss_device *dssdev,
- bool hdmi_mode)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- return in->ops.hdmi->set_hdmi_mode(in, hdmi_mode);
-}
-
-static const struct omapdss_hdmi_ops tpd_hdmi_ops = {
+static const struct omap_dss_device_ops tpd_ops = {
.connect = tpd_connect,
.disconnect = tpd_disconnect,
-
.enable = tpd_enable,
.disable = tpd_disable,
-
- .check_timings = tpd_check_timings,
- .set_timings = tpd_set_timings,
- .get_timings = tpd_get_timings,
-
- .read_edid = tpd_read_edid,
.detect = tpd_detect,
.register_hpd_cb = tpd_register_hpd_cb,
.unregister_hpd_cb = tpd_unregister_hpd_cb,
- .enable_hpd = tpd_enable_hpd,
- .disable_hpd = tpd_disable_hpd,
- .set_infoframe = tpd_set_infoframe,
- .set_hdmi_mode = tpd_set_hdmi_mode,
};
static irqreturn_t tpd_hpd_isr(int irq, void *data)
@@ -266,7 +136,7 @@ static irqreturn_t tpd_hpd_isr(int irq, void *data)
struct panel_drv_data *ddata = data;
mutex_lock(&ddata->hpd_lock);
- if (ddata->hpd_enabled && ddata->hpd_cb) {
+ if (ddata->hpd_cb) {
enum drm_connector_status status;
if (tpd_detect(&ddata->dssdev))
@@ -283,7 +153,7 @@ static irqreturn_t tpd_hpd_isr(int irq, void *data)
static int tpd_probe(struct platform_device *pdev)
{
- struct omap_dss_device *in, *dssdev;
+ struct omap_dss_device *dssdev;
struct panel_drv_data *ddata;
int r;
struct gpio_desc *gpio;
@@ -325,21 +195,24 @@ static int tpd_probe(struct platform_device *pdev)
return r;
dssdev = &ddata->dssdev;
- dssdev->ops.hdmi = &tpd_hdmi_ops;
+ dssdev->ops = &tpd_ops;
dssdev->dev = &pdev->dev;
dssdev->type = OMAP_DISPLAY_TYPE_HDMI;
dssdev->output_type = OMAP_DISPLAY_TYPE_HDMI;
dssdev->owner = THIS_MODULE;
- dssdev->port_num = 1;
-
- in = ddata->in;
-
- r = omapdss_register_output(dssdev);
- if (r) {
- dev_err(&pdev->dev, "Failed to register output\n");
- return r;
+ dssdev->of_ports = BIT(1) | BIT(0);
+ dssdev->ops_flags = OMAP_DSS_DEVICE_OP_DETECT
+ | OMAP_DSS_DEVICE_OP_HPD;
+
+ dssdev->next = omapdss_of_find_connected_device(pdev->dev.of_node, 1);
+ if (IS_ERR(dssdev->next)) {
+ if (PTR_ERR(dssdev->next) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to find video sink\n");
+ return PTR_ERR(dssdev->next);
}
+ omapdss_device_register(dssdev);
+
return 0;
}
@@ -348,7 +221,9 @@ static int __exit tpd_remove(struct platform_device *pdev)
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- omapdss_unregister_output(&ddata->dssdev);
+ if (dssdev->next)
+ omapdss_device_put(dssdev->next);
+ omapdss_device_unregister(&ddata->dssdev);
WARN_ON(omapdss_device_is_enabled(dssdev));
if (omapdss_device_is_enabled(dssdev))
@@ -356,7 +231,7 @@ static int __exit tpd_remove(struct platform_device *pdev)
WARN_ON(omapdss_device_is_connected(dssdev));
if (omapdss_device_is_connected(dssdev))
- tpd_disconnect(dssdev, dssdev->dst);
+ omapdss_device_disconnect(NULL, dssdev);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
index 6cbf570d6727..1f8161b041be 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
@@ -23,7 +23,6 @@
struct panel_drv_data {
struct omap_dss_device dssdev;
- struct omap_dss_device *in;
struct videomode vm;
@@ -35,49 +34,21 @@ struct panel_drv_data {
#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
-static int panel_dpi_connect(struct omap_dss_device *dssdev)
+static int panel_dpi_connect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in;
- int r;
-
- if (omapdss_device_is_connected(dssdev))
- return 0;
-
- in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
- if (IS_ERR(in)) {
- dev_err(dssdev->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- r = in->ops.dpi->connect(in, dssdev);
- if (r) {
- omap_dss_put_device(in);
- return r;
- }
-
- ddata->in = in;
return 0;
}
-static void panel_dpi_disconnect(struct omap_dss_device *dssdev)
+static void panel_dpi_disconnect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- if (!omapdss_device_is_connected(dssdev))
- return;
-
- in->ops.dpi->disconnect(in, dssdev);
-
- omap_dss_put_device(in);
- ddata->in = NULL;
}
static int panel_dpi_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
int r;
if (!omapdss_device_is_connected(dssdev))
@@ -86,15 +57,13 @@ static int panel_dpi_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.dpi->set_timings(in, &ddata->vm);
-
- r = in->ops.dpi->enable(in);
+ r = src->ops->enable(src);
if (r)
return r;
r = regulator_enable(ddata->vcc_supply);
if (r) {
- in->ops.dpi->disable(in);
+ src->ops->disable(src);
return r;
}
@@ -109,7 +78,7 @@ static int panel_dpi_enable(struct omap_dss_device *dssdev)
static void panel_dpi_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
if (!omapdss_device_is_enabled(dssdev))
return;
@@ -119,23 +88,11 @@ static void panel_dpi_disable(struct omap_dss_device *dssdev)
gpiod_set_value_cansleep(ddata->enable_gpio, 0);
regulator_disable(ddata->vcc_supply);
- in->ops.dpi->disable(in);
+ src->ops->disable(src);
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void panel_dpi_set_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- ddata->vm = *vm;
- dssdev->panel.vm = *vm;
-
- in->ops.dpi->set_timings(in, vm);
-}
-
static void panel_dpi_get_timings(struct omap_dss_device *dssdev,
struct videomode *vm)
{
@@ -144,25 +101,14 @@ static void panel_dpi_get_timings(struct omap_dss_device *dssdev,
*vm = ddata->vm;
}
-static int panel_dpi_check_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- return in->ops.dpi->check_timings(in, vm);
-}
-
-static struct omap_dss_driver panel_dpi_ops = {
+static const struct omap_dss_device_ops panel_dpi_ops = {
.connect = panel_dpi_connect,
.disconnect = panel_dpi_disconnect,
.enable = panel_dpi_enable,
.disable = panel_dpi_disable,
- .set_timings = panel_dpi_set_timings,
.get_timings = panel_dpi_get_timings,
- .check_timings = panel_dpi_check_timings,
};
static int panel_dpi_probe_of(struct platform_device *pdev)
@@ -227,16 +173,13 @@ static int panel_dpi_probe(struct platform_device *pdev)
dssdev = &ddata->dssdev;
dssdev->dev = &pdev->dev;
- dssdev->driver = &panel_dpi_ops;
+ dssdev->ops = &panel_dpi_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.vm = ddata->vm;
+ dssdev->of_ports = BIT(0);
- r = omapdss_register_display(dssdev);
- if (r) {
- dev_err(&pdev->dev, "Failed to register panel\n");
- return r;
- }
+ omapdss_display_init(dssdev);
+ omapdss_device_register(dssdev);
return 0;
}
@@ -246,10 +189,9 @@ static int __exit panel_dpi_remove(struct platform_device *pdev)
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- omapdss_unregister_display(dssdev);
+ omapdss_device_unregister(dssdev);
panel_dpi_disable(dssdev);
- panel_dpi_disconnect(dssdev);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
index 428de90fced1..29692a5217c5 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
@@ -41,7 +41,6 @@
struct panel_drv_data {
struct omap_dss_device dssdev;
- struct omap_dss_device *in;
struct videomode vm;
@@ -142,11 +141,11 @@ static void hw_guard_wait(struct panel_drv_data *ddata)
static int dsicm_dcs_read_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 *data)
{
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = ddata->dssdev.src;
int r;
u8 buf[1];
- r = in->ops.dsi->dcs_read(in, ddata->channel, dcs_cmd, buf, 1);
+ r = src->ops->dsi.dcs_read(src, ddata->channel, dcs_cmd, buf, 1);
if (r < 0)
return r;
@@ -158,29 +157,30 @@ static int dsicm_dcs_read_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 *data)
static int dsicm_dcs_write_0(struct panel_drv_data *ddata, u8 dcs_cmd)
{
- struct omap_dss_device *in = ddata->in;
- return in->ops.dsi->dcs_write(in, ddata->channel, &dcs_cmd, 1);
+ struct omap_dss_device *src = ddata->dssdev.src;
+
+ return src->ops->dsi.dcs_write(src, ddata->channel, &dcs_cmd, 1);
}
static int dsicm_dcs_write_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 param)
{
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = ddata->dssdev.src;
u8 buf[2] = { dcs_cmd, param };
- return in->ops.dsi->dcs_write(in, ddata->channel, buf, 2);
+ return src->ops->dsi.dcs_write(src, ddata->channel, buf, 2);
}
static int dsicm_sleep_in(struct panel_drv_data *ddata)
{
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = ddata->dssdev.src;
u8 cmd;
int r;
hw_guard_wait(ddata);
cmd = MIPI_DCS_ENTER_SLEEP_MODE;
- r = in->ops.dsi->dcs_write_nosync(in, ddata->channel, &cmd, 1);
+ r = src->ops->dsi.dcs_write_nosync(src, ddata->channel, &cmd, 1);
if (r)
return r;
@@ -228,7 +228,7 @@ static int dsicm_get_id(struct panel_drv_data *ddata, u8 *id1, u8 *id2, u8 *id3)
static int dsicm_set_update_window(struct panel_drv_data *ddata,
u16 x, u16 y, u16 w, u16 h)
{
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = ddata->dssdev.src;
int r;
u16 x1 = x;
u16 x2 = x + w - 1;
@@ -242,7 +242,7 @@ static int dsicm_set_update_window(struct panel_drv_data *ddata,
buf[3] = (x2 >> 8) & 0xff;
buf[4] = (x2 >> 0) & 0xff;
- r = in->ops.dsi->dcs_write_nosync(in, ddata->channel, buf, sizeof(buf));
+ r = src->ops->dsi.dcs_write_nosync(src, ddata->channel, buf, sizeof(buf));
if (r)
return r;
@@ -252,11 +252,11 @@ static int dsicm_set_update_window(struct panel_drv_data *ddata,
buf[3] = (y2 >> 8) & 0xff;
buf[4] = (y2 >> 0) & 0xff;
- r = in->ops.dsi->dcs_write_nosync(in, ddata->channel, buf, sizeof(buf));
+ r = src->ops->dsi.dcs_write_nosync(src, ddata->channel, buf, sizeof(buf));
if (r)
return r;
- in->ops.dsi->bta_sync(in, ddata->channel);
+ src->ops->dsi.bta_sync(src, ddata->channel);
return r;
}
@@ -275,7 +275,7 @@ static void dsicm_cancel_ulps_work(struct panel_drv_data *ddata)
static int dsicm_enter_ulps(struct panel_drv_data *ddata)
{
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = ddata->dssdev.src;
int r;
if (ddata->ulps_enabled)
@@ -290,7 +290,7 @@ static int dsicm_enter_ulps(struct panel_drv_data *ddata)
if (ddata->ext_te_gpio)
disable_irq(gpiod_to_irq(ddata->ext_te_gpio));
- in->ops.dsi->disable(in, false, true);
+ src->ops->dsi.disable(src, false, true);
ddata->ulps_enabled = true;
@@ -309,19 +309,19 @@ err:
static int dsicm_exit_ulps(struct panel_drv_data *ddata)
{
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = ddata->dssdev.src;
int r;
if (!ddata->ulps_enabled)
return 0;
- r = in->ops.dsi->enable(in);
+ r = src->ops->enable(src);
if (r) {
dev_err(&ddata->pdev->dev, "failed to enable DSI\n");
goto err1;
}
- in->ops.dsi->enable_hs(in, ddata->channel, true);
+ src->ops->dsi.enable_hs(src, ddata->channel, true);
r = _dsicm_enable_te(ddata, true);
if (r) {
@@ -366,7 +366,7 @@ static int dsicm_wake_up(struct panel_drv_data *ddata)
static int dsicm_bl_update_status(struct backlight_device *dev)
{
struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = ddata->dssdev.src;
int r = 0;
int level;
@@ -381,13 +381,13 @@ static int dsicm_bl_update_status(struct backlight_device *dev)
mutex_lock(&ddata->lock);
if (ddata->enabled) {
- in->ops.dsi->bus_lock(in);
+ src->ops->dsi.bus_lock(src);
r = dsicm_wake_up(ddata);
if (!r)
r = dsicm_dcs_write_1(ddata, DCS_BRIGHTNESS, level);
- in->ops.dsi->bus_unlock(in);
+ src->ops->dsi.bus_unlock(src);
}
mutex_unlock(&ddata->lock);
@@ -414,21 +414,21 @@ static ssize_t dsicm_num_errors_show(struct device *dev,
{
struct platform_device *pdev = to_platform_device(dev);
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = ddata->dssdev.src;
u8 errors = 0;
int r;
mutex_lock(&ddata->lock);
if (ddata->enabled) {
- in->ops.dsi->bus_lock(in);
+ src->ops->dsi.bus_lock(src);
r = dsicm_wake_up(ddata);
if (!r)
r = dsicm_dcs_read_1(ddata, DCS_READ_NUM_ERRORS,
&errors);
- in->ops.dsi->bus_unlock(in);
+ src->ops->dsi.bus_unlock(src);
} else {
r = -ENODEV;
}
@@ -446,20 +446,20 @@ static ssize_t dsicm_hw_revision_show(struct device *dev,
{
struct platform_device *pdev = to_platform_device(dev);
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = ddata->dssdev.src;
u8 id1, id2, id3;
int r;
mutex_lock(&ddata->lock);
if (ddata->enabled) {
- in->ops.dsi->bus_lock(in);
+ src->ops->dsi.bus_lock(src);
r = dsicm_wake_up(ddata);
if (!r)
r = dsicm_get_id(ddata, &id1, &id2, &id3);
- in->ops.dsi->bus_unlock(in);
+ src->ops->dsi.bus_unlock(src);
} else {
r = -ENODEV;
}
@@ -478,7 +478,7 @@ static ssize_t dsicm_store_ulps(struct device *dev,
{
struct platform_device *pdev = to_platform_device(dev);
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = ddata->dssdev.src;
unsigned long t;
int r;
@@ -489,14 +489,14 @@ static ssize_t dsicm_store_ulps(struct device *dev,
mutex_lock(&ddata->lock);
if (ddata->enabled) {
- in->ops.dsi->bus_lock(in);
+ src->ops->dsi.bus_lock(src);
if (t)
r = dsicm_enter_ulps(ddata);
else
r = dsicm_wake_up(ddata);
- in->ops.dsi->bus_unlock(in);
+ src->ops->dsi.bus_unlock(src);
}
mutex_unlock(&ddata->lock);
@@ -528,7 +528,7 @@ static ssize_t dsicm_store_ulps_timeout(struct device *dev,
{
struct platform_device *pdev = to_platform_device(dev);
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = ddata->dssdev.src;
unsigned long t;
int r;
@@ -541,9 +541,9 @@ static ssize_t dsicm_store_ulps_timeout(struct device *dev,
if (ddata->enabled) {
/* dsicm_wake_up will restart the timer */
- in->ops.dsi->bus_lock(in);
+ src->ops->dsi.bus_lock(src);
r = dsicm_wake_up(ddata);
- in->ops.dsi->bus_unlock(in);
+ src->ops->dsi.bus_unlock(src);
}
mutex_unlock(&ddata->lock);
@@ -603,7 +603,7 @@ static void dsicm_hw_reset(struct panel_drv_data *ddata)
static int dsicm_power_on(struct panel_drv_data *ddata)
{
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = ddata->dssdev.src;
u8 id1, id2, id3;
int r;
struct omap_dss_dsi_config dsi_config = {
@@ -635,7 +635,7 @@ static int dsicm_power_on(struct panel_drv_data *ddata)
}
if (ddata->pin_config.num_pins > 0) {
- r = in->ops.dsi->configure_pins(in, &ddata->pin_config);
+ r = src->ops->dsi.configure_pins(src, &ddata->pin_config);
if (r) {
dev_err(&ddata->pdev->dev,
"failed to configure DSI pins\n");
@@ -643,13 +643,13 @@ static int dsicm_power_on(struct panel_drv_data *ddata)
}
}
- r = in->ops.dsi->set_config(in, &dsi_config);
+ r = src->ops->dsi.set_config(src, &dsi_config);
if (r) {
dev_err(&ddata->pdev->dev, "failed to configure DSI\n");
goto err_vddi;
}
- r = in->ops.dsi->enable(in);
+ r = src->ops->enable(src);
if (r) {
dev_err(&ddata->pdev->dev, "failed to enable DSI\n");
goto err_vddi;
@@ -657,7 +657,7 @@ static int dsicm_power_on(struct panel_drv_data *ddata)
dsicm_hw_reset(ddata);
- in->ops.dsi->enable_hs(in, ddata->channel, false);
+ src->ops->dsi.enable_hs(src, ddata->channel, false);
r = dsicm_sleep_out(ddata);
if (r)
@@ -689,7 +689,7 @@ static int dsicm_power_on(struct panel_drv_data *ddata)
if (r)
goto err;
- r = in->ops.dsi->enable_video_output(in, ddata->channel);
+ r = src->ops->dsi.enable_video_output(src, ddata->channel);
if (r)
goto err;
@@ -701,7 +701,7 @@ static int dsicm_power_on(struct panel_drv_data *ddata)
ddata->intro_printed = true;
}
- in->ops.dsi->enable_hs(in, ddata->channel, true);
+ src->ops->dsi.enable_hs(src, ddata->channel, true);
return 0;
err:
@@ -709,7 +709,7 @@ err:
dsicm_hw_reset(ddata);
- in->ops.dsi->disable(in, true, false);
+ src->ops->dsi.disable(src, true, false);
err_vddi:
if (ddata->vddi)
regulator_disable(ddata->vddi);
@@ -722,10 +722,10 @@ err_vpnl:
static void dsicm_power_off(struct panel_drv_data *ddata)
{
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = ddata->dssdev.src;
int r;
- in->ops.dsi->disable_video_output(in, ddata->channel);
+ src->ops->dsi.disable_video_output(src, ddata->channel);
r = dsicm_dcs_write_0(ddata, MIPI_DCS_SET_DISPLAY_OFF);
if (!r)
@@ -737,7 +737,7 @@ static void dsicm_power_off(struct panel_drv_data *ddata)
dsicm_hw_reset(ddata);
}
- in->ops.dsi->disable(in, true, false);
+ src->ops->dsi.disable(src, true, false);
if (ddata->vddi)
regulator_disable(ddata->vddi);
@@ -756,71 +756,41 @@ static int dsicm_panel_reset(struct panel_drv_data *ddata)
return dsicm_power_on(ddata);
}
-static int dsicm_connect(struct omap_dss_device *dssdev)
+static int dsicm_connect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct panel_drv_data *ddata = to_panel_data(dst);
struct device *dev = &ddata->pdev->dev;
- struct omap_dss_device *in;
int r;
- if (omapdss_device_is_connected(dssdev))
- return 0;
-
- in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
- if (IS_ERR(in)) {
- dev_err(dssdev->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- r = in->ops.dsi->connect(in, dssdev);
- if (r) {
- dev_err(dev, "Failed to connect to video source\n");
- goto err_connect;
- }
-
- r = in->ops.dsi->request_vc(in, &ddata->channel);
+ r = src->ops->dsi.request_vc(src, &ddata->channel);
if (r) {
dev_err(dev, "failed to get virtual channel\n");
- goto err_req_vc;
+ return r;
}
- r = in->ops.dsi->set_vc_id(in, ddata->channel, TCH);
+ r = src->ops->dsi.set_vc_id(src, ddata->channel, TCH);
if (r) {
dev_err(dev, "failed to set VC_ID\n");
- goto err_vc_id;
+ src->ops->dsi.release_vc(src, ddata->channel);
+ return r;
}
- ddata->in = in;
return 0;
-
-err_vc_id:
- in->ops.dsi->release_vc(in, ddata->channel);
-err_req_vc:
- in->ops.dsi->disconnect(in, dssdev);
-err_connect:
- omap_dss_put_device(in);
- return r;
}
-static void dsicm_disconnect(struct omap_dss_device *dssdev)
+static void dsicm_disconnect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- if (!omapdss_device_is_connected(dssdev))
- return;
-
- in->ops.dsi->release_vc(in, ddata->channel);
- in->ops.dsi->disconnect(in, dssdev);
+ struct panel_drv_data *ddata = to_panel_data(dst);
- omap_dss_put_device(in);
- ddata->in = NULL;
+ src->ops->dsi.release_vc(src, ddata->channel);
}
static int dsicm_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
int r;
dev_dbg(&ddata->pdev->dev, "enable\n");
@@ -837,11 +807,11 @@ static int dsicm_enable(struct omap_dss_device *dssdev)
goto err;
}
- in->ops.dsi->bus_lock(in);
+ src->ops->dsi.bus_lock(src);
r = dsicm_power_on(ddata);
- in->ops.dsi->bus_unlock(in);
+ src->ops->dsi.bus_unlock(src);
if (r)
goto err;
@@ -862,7 +832,7 @@ err:
static void dsicm_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
int r;
dev_dbg(&ddata->pdev->dev, "disable\n");
@@ -873,7 +843,7 @@ static void dsicm_disable(struct omap_dss_device *dssdev)
dsicm_cancel_ulps_work(ddata);
- in->ops.dsi->bus_lock(in);
+ src->ops->dsi.bus_lock(src);
if (omapdss_device_is_enabled(dssdev)) {
r = dsicm_wake_up(ddata);
@@ -881,7 +851,7 @@ static void dsicm_disable(struct omap_dss_device *dssdev)
dsicm_power_off(ddata);
}
- in->ops.dsi->bus_unlock(in);
+ src->ops->dsi.bus_unlock(src);
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
@@ -891,16 +861,16 @@ static void dsicm_disable(struct omap_dss_device *dssdev)
static void dsicm_framedone_cb(int err, void *data)
{
struct panel_drv_data *ddata = data;
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = ddata->dssdev.src;
dev_dbg(&ddata->pdev->dev, "framedone, err %d\n", err);
- in->ops.dsi->bus_unlock(ddata->in);
+ src->ops->dsi.bus_unlock(src);
}
static irqreturn_t dsicm_te_isr(int irq, void *data)
{
struct panel_drv_data *ddata = data;
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = ddata->dssdev.src;
int old;
int r;
@@ -909,7 +879,7 @@ static irqreturn_t dsicm_te_isr(int irq, void *data)
if (old) {
cancel_delayed_work(&ddata->te_timeout_work);
- r = in->ops.dsi->update(in, ddata->channel, dsicm_framedone_cb,
+ r = src->ops->dsi.update(src, ddata->channel, dsicm_framedone_cb,
ddata);
if (r)
goto err;
@@ -918,7 +888,7 @@ static irqreturn_t dsicm_te_isr(int irq, void *data)
return IRQ_HANDLED;
err:
dev_err(&ddata->pdev->dev, "start update failed\n");
- in->ops.dsi->bus_unlock(in);
+ src->ops->dsi.bus_unlock(src);
return IRQ_HANDLED;
}
@@ -926,25 +896,25 @@ static void dsicm_te_timeout_work_callback(struct work_struct *work)
{
struct panel_drv_data *ddata = container_of(work, struct panel_drv_data,
te_timeout_work.work);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = ddata->dssdev.src;
dev_err(&ddata->pdev->dev, "TE not received for 250ms!\n");
atomic_set(&ddata->do_update, 0);
- in->ops.dsi->bus_unlock(in);
+ src->ops->dsi.bus_unlock(src);
}
static int dsicm_update(struct omap_dss_device *dssdev,
u16 x, u16 y, u16 w, u16 h)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
int r;
dev_dbg(&ddata->pdev->dev, "update %d, %d, %d x %d\n", x, y, w, h);
mutex_lock(&ddata->lock);
- in->ops.dsi->bus_lock(in);
+ src->ops->dsi.bus_lock(src);
r = dsicm_wake_up(ddata);
if (r)
@@ -956,9 +926,8 @@ static int dsicm_update(struct omap_dss_device *dssdev,
}
/* XXX no need to send this every frame, but dsi break if not done */
- r = dsicm_set_update_window(ddata, 0, 0,
- dssdev->panel.vm.hactive,
- dssdev->panel.vm.vactive);
+ r = dsicm_set_update_window(ddata, 0, 0, ddata->vm.hactive,
+ ddata->vm.vactive);
if (r)
goto err;
@@ -967,17 +936,17 @@ static int dsicm_update(struct omap_dss_device *dssdev,
msecs_to_jiffies(250));
atomic_set(&ddata->do_update, 1);
} else {
- r = in->ops.dsi->update(in, ddata->channel, dsicm_framedone_cb,
+ r = src->ops->dsi.update(src, ddata->channel, dsicm_framedone_cb,
ddata);
if (r)
goto err;
}
- /* note: no bus_unlock here. unlock is in framedone_cb */
+ /* note: no bus_unlock here. unlock is src framedone_cb */
mutex_unlock(&ddata->lock);
return 0;
err:
- in->ops.dsi->bus_unlock(in);
+ src->ops->dsi.bus_unlock(src);
mutex_unlock(&ddata->lock);
return r;
}
@@ -985,13 +954,13 @@ err:
static int dsicm_sync(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
dev_dbg(&ddata->pdev->dev, "sync\n");
mutex_lock(&ddata->lock);
- in->ops.dsi->bus_lock(in);
- in->ops.dsi->bus_unlock(in);
+ src->ops->dsi.bus_lock(src);
+ src->ops->dsi.bus_unlock(src);
mutex_unlock(&ddata->lock);
dev_dbg(&ddata->pdev->dev, "sync done\n");
@@ -1001,7 +970,7 @@ static int dsicm_sync(struct omap_dss_device *dssdev)
static int _dsicm_enable_te(struct panel_drv_data *ddata, bool enable)
{
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = ddata->dssdev.src;
int r;
if (enable)
@@ -1010,7 +979,7 @@ static int _dsicm_enable_te(struct panel_drv_data *ddata, bool enable)
r = dsicm_dcs_write_0(ddata, MIPI_DCS_SET_TEAR_OFF);
if (!ddata->ext_te_gpio)
- in->ops.dsi->enable_te(in, enable);
+ src->ops->dsi.enable_te(src, enable);
/* possible panel bug */
msleep(100);
@@ -1021,7 +990,7 @@ static int _dsicm_enable_te(struct panel_drv_data *ddata, bool enable)
static int dsicm_enable_te(struct omap_dss_device *dssdev, bool enable)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
int r;
mutex_lock(&ddata->lock);
@@ -1029,7 +998,7 @@ static int dsicm_enable_te(struct omap_dss_device *dssdev, bool enable)
if (ddata->te_enabled == enable)
goto end;
- in->ops.dsi->bus_lock(in);
+ src->ops->dsi.bus_lock(src);
if (ddata->enabled) {
r = dsicm_wake_up(ddata);
@@ -1043,13 +1012,13 @@ static int dsicm_enable_te(struct omap_dss_device *dssdev, bool enable)
ddata->te_enabled = enable;
- in->ops.dsi->bus_unlock(in);
+ src->ops->dsi.bus_unlock(src);
end:
mutex_unlock(&ddata->lock);
return 0;
err:
- in->ops.dsi->bus_unlock(in);
+ src->ops->dsi.bus_unlock(src);
mutex_unlock(&ddata->lock);
return r;
@@ -1072,7 +1041,7 @@ static int dsicm_memory_read(struct omap_dss_device *dssdev,
u16 x, u16 y, u16 w, u16 h)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
int r;
int first = 1;
int plen;
@@ -1089,9 +1058,9 @@ static int dsicm_memory_read(struct omap_dss_device *dssdev,
}
size = min((u32)w * h * 3,
- dssdev->panel.vm.hactive * dssdev->panel.vm.vactive * 3);
+ ddata->vm.hactive * ddata->vm.vactive * 3);
- in->ops.dsi->bus_lock(in);
+ src->ops->dsi.bus_lock(src);
r = dsicm_wake_up(ddata);
if (r)
@@ -1107,7 +1076,7 @@ static int dsicm_memory_read(struct omap_dss_device *dssdev,
dsicm_set_update_window(ddata, x, y, w, h);
- r = in->ops.dsi->set_max_rx_packet_size(in, ddata->channel, plen);
+ r = src->ops->dsi.set_max_rx_packet_size(src, ddata->channel, plen);
if (r)
goto err2;
@@ -1115,7 +1084,7 @@ static int dsicm_memory_read(struct omap_dss_device *dssdev,
u8 dcs_cmd = first ? 0x2e : 0x3e;
first = 0;
- r = in->ops.dsi->dcs_read(in, ddata->channel, dcs_cmd,
+ r = src->ops->dsi.dcs_read(src, ddata->channel, dcs_cmd,
buf + buf_used, size - buf_used);
if (r < 0) {
@@ -1141,9 +1110,9 @@ static int dsicm_memory_read(struct omap_dss_device *dssdev,
r = buf_used;
err3:
- in->ops.dsi->set_max_rx_packet_size(in, ddata->channel, 1);
+ src->ops->dsi.set_max_rx_packet_size(src, ddata->channel, 1);
err2:
- in->ops.dsi->bus_unlock(in);
+ src->ops->dsi.bus_unlock(src);
err1:
mutex_unlock(&ddata->lock);
return r;
@@ -1154,7 +1123,7 @@ static void dsicm_ulps_work(struct work_struct *work)
struct panel_drv_data *ddata = container_of(work, struct panel_drv_data,
ulps_work.work);
struct omap_dss_device *dssdev = &ddata->dssdev;
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
mutex_lock(&ddata->lock);
@@ -1163,11 +1132,11 @@ static void dsicm_ulps_work(struct work_struct *work)
return;
}
- in->ops.dsi->bus_lock(in);
+ src->ops->dsi.bus_lock(src);
dsicm_enter_ulps(ddata);
- in->ops.dsi->bus_unlock(in);
+ src->ops->dsi.bus_unlock(src);
mutex_unlock(&ddata->lock);
}
@@ -1210,18 +1179,21 @@ static void dsicm_get_size(struct omap_dss_device *dssdev,
*height = ddata->height_mm;
}
-static struct omap_dss_driver dsicm_ops = {
+static const struct omap_dss_device_ops dsicm_ops = {
.connect = dsicm_connect,
.disconnect = dsicm_disconnect,
.enable = dsicm_enable,
.disable = dsicm_disable,
+ .get_timings = dsicm_get_timings,
+ .check_timings = dsicm_check_timings,
+};
+
+static const struct omap_dss_driver dsicm_dss_driver = {
.update = dsicm_update,
.sync = dsicm_sync,
- .get_timings = dsicm_get_timings,
- .check_timings = dsicm_check_timings,
.get_size = dsicm_get_size,
.enable_te = dsicm_enable_te,
@@ -1330,20 +1302,17 @@ static int dsicm_probe(struct platform_device *pdev)
dssdev = &ddata->dssdev;
dssdev->dev = dev;
- dssdev->driver = &dsicm_ops;
- dssdev->panel.vm = ddata->vm;
+ dssdev->ops = &dsicm_ops;
+ dssdev->driver = &dsicm_dss_driver;
dssdev->type = OMAP_DISPLAY_TYPE_DSI;
dssdev->owner = THIS_MODULE;
+ dssdev->of_ports = BIT(0);
- dssdev->panel.dsi_pix_fmt = OMAP_DSS_DSI_FMT_RGB888;
dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE |
OMAP_DSS_DISPLAY_CAP_TEAR_ELIM;
- r = omapdss_register_display(dssdev);
- if (r) {
- dev_err(dev, "Failed to register panel\n");
- goto err_reg;
- }
+ omapdss_display_init(dssdev);
+ omapdss_device_register(dssdev);
mutex_init(&ddata->lock);
@@ -1414,10 +1383,10 @@ static int __exit dsicm_remove(struct platform_device *pdev)
dev_dbg(&pdev->dev, "remove\n");
- omapdss_unregister_display(dssdev);
+ omapdss_device_unregister(dssdev);
dsicm_disable(dssdev);
- dsicm_disconnect(dssdev);
+ omapdss_device_disconnect(dssdev->src, dssdev);
sysfs_remove_group(&pdev->dev.kobj, &dsicm_attr_group);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
index 754197099440..f6ef8ff964dd 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
@@ -33,19 +33,11 @@ static const struct videomode lb035q02_vm = {
.vfront_porch = 4,
.vback_porch = 18,
- .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
- DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_NEGEDGE |
- DISPLAY_FLAGS_PIXDATA_POSEDGE,
- /*
- * Note: According to the panel documentation:
- * DE is active LOW
- * DATA needs to be driven on the FALLING edge
- */
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
};
struct panel_drv_data {
struct omap_dss_device dssdev;
- struct omap_dss_device *in;
struct spi_device *spi;
@@ -116,51 +108,25 @@ static void init_lb035q02_panel(struct spi_device *spi)
lb035q02_write_reg(spi, 0x3b, 0x0806);
}
-static int lb035q02_connect(struct omap_dss_device *dssdev)
+static int lb035q02_connect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in;
- int r;
-
- if (omapdss_device_is_connected(dssdev))
- return 0;
-
- in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
- if (IS_ERR(in)) {
- dev_err(dssdev->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- r = in->ops.dpi->connect(in, dssdev);
- if (r) {
- omap_dss_put_device(in);
- return r;
- }
+ struct panel_drv_data *ddata = to_panel_data(dst);
init_lb035q02_panel(ddata->spi);
- ddata->in = in;
return 0;
}
-static void lb035q02_disconnect(struct omap_dss_device *dssdev)
+static void lb035q02_disconnect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- if (!omapdss_device_is_connected(dssdev))
- return;
-
- in->ops.dpi->disconnect(in, dssdev);
-
- omap_dss_put_device(in);
- ddata->in = NULL;
}
static int lb035q02_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
int r;
if (!omapdss_device_is_connected(dssdev))
@@ -169,9 +135,7 @@ static int lb035q02_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.dpi->set_timings(in, &ddata->vm);
-
- r = in->ops.dpi->enable(in);
+ r = src->ops->enable(src);
if (r)
return r;
@@ -186,7 +150,7 @@ static int lb035q02_enable(struct omap_dss_device *dssdev)
static void lb035q02_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
if (!omapdss_device_is_enabled(dssdev))
return;
@@ -194,23 +158,11 @@ static void lb035q02_disable(struct omap_dss_device *dssdev)
if (ddata->enable_gpio)
gpiod_set_value_cansleep(ddata->enable_gpio, 0);
- in->ops.dpi->disable(in);
+ src->ops->disable(src);
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void lb035q02_set_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- ddata->vm = *vm;
- dssdev->panel.vm = *vm;
-
- in->ops.dpi->set_timings(in, vm);
-}
-
static void lb035q02_get_timings(struct omap_dss_device *dssdev,
struct videomode *vm)
{
@@ -219,25 +171,14 @@ static void lb035q02_get_timings(struct omap_dss_device *dssdev,
*vm = ddata->vm;
}
-static int lb035q02_check_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- return in->ops.dpi->check_timings(in, vm);
-}
-
-static struct omap_dss_driver lb035q02_ops = {
+static const struct omap_dss_device_ops lb035q02_ops = {
.connect = lb035q02_connect,
.disconnect = lb035q02_disconnect,
.enable = lb035q02_enable,
.disable = lb035q02_disable,
- .set_timings = lb035q02_set_timings,
.get_timings = lb035q02_get_timings,
- .check_timings = lb035q02_check_timings,
};
static int lb035q02_probe_of(struct spi_device *spi)
@@ -278,16 +219,21 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi)
dssdev = &ddata->dssdev;
dssdev->dev = &spi->dev;
- dssdev->driver = &lb035q02_ops;
+ dssdev->ops = &lb035q02_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.vm = ddata->vm;
+ dssdev->of_ports = BIT(0);
- r = omapdss_register_display(dssdev);
- if (r) {
- dev_err(&spi->dev, "Failed to register panel\n");
- return r;
- }
+ /*
+ * Note: According to the panel documentation:
+ * DE is active LOW
+ * DATA needs to be driven on the FALLING edge
+ */
+ dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_NEGEDGE
+ | DRM_BUS_FLAG_PIXDATA_POSEDGE;
+
+ omapdss_display_init(dssdev);
+ omapdss_device_register(dssdev);
return 0;
}
@@ -297,10 +243,9 @@ static int lb035q02_panel_spi_remove(struct spi_device *spi)
struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- omapdss_unregister_display(dssdev);
+ omapdss_device_unregister(dssdev);
lb035q02_disable(dssdev);
- lb035q02_disconnect(dssdev);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
index 9a3b27fa5cb5..f445de6369f7 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
@@ -11,22 +11,19 @@
* (at your option) any later version.
*/
-#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/spi/spi.h>
#include <linux/gpio/consumer.h>
-#include <linux/of_gpio.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
#include "../dss/omapdss.h"
struct panel_drv_data {
struct omap_dss_device dssdev;
- struct omap_dss_device *in;
struct videomode vm;
- int res_gpio;
- int qvga_gpio;
+ struct gpio_desc *res_gpio;
struct spi_device *spi;
};
@@ -74,9 +71,7 @@ static const struct videomode nec_8048_panel_vm = {
.vsync_len = 1,
.vback_porch = 4,
- .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
- DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_POSEDGE |
- DISPLAY_FLAGS_PIXDATA_POSEDGE,
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
};
#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
@@ -112,49 +107,21 @@ static int init_nec_8048_wvga_lcd(struct spi_device *spi)
return 0;
}
-static int nec_8048_connect(struct omap_dss_device *dssdev)
+static int nec_8048_connect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in;
- int r;
-
- if (omapdss_device_is_connected(dssdev))
- return 0;
-
- in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
- if (IS_ERR(in)) {
- dev_err(dssdev->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- r = in->ops.dpi->connect(in, dssdev);
- if (r) {
- omap_dss_put_device(in);
- return r;
- }
-
- ddata->in = in;
return 0;
}
-static void nec_8048_disconnect(struct omap_dss_device *dssdev)
+static void nec_8048_disconnect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- if (!omapdss_device_is_connected(dssdev))
- return;
-
- in->ops.dpi->disconnect(in, dssdev);
-
- omap_dss_put_device(in);
- ddata->in = NULL;
}
static int nec_8048_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
int r;
if (!omapdss_device_is_connected(dssdev))
@@ -163,14 +130,11 @@ static int nec_8048_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.dpi->set_timings(in, &ddata->vm);
-
- r = in->ops.dpi->enable(in);
+ r = src->ops->enable(src);
if (r)
return r;
- if (gpio_is_valid(ddata->res_gpio))
- gpio_set_value_cansleep(ddata->res_gpio, 1);
+ gpiod_set_value_cansleep(ddata->res_gpio, 1);
dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
@@ -180,31 +144,18 @@ static int nec_8048_enable(struct omap_dss_device *dssdev)
static void nec_8048_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
if (!omapdss_device_is_enabled(dssdev))
return;
- if (gpio_is_valid(ddata->res_gpio))
- gpio_set_value_cansleep(ddata->res_gpio, 0);
+ gpiod_set_value_cansleep(ddata->res_gpio, 0);
- in->ops.dpi->disable(in);
+ src->ops->disable(src);
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void nec_8048_set_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- ddata->vm = *vm;
- dssdev->panel.vm = *vm;
-
- in->ops.dpi->set_timings(in, vm);
-}
-
static void nec_8048_get_timings(struct omap_dss_device *dssdev,
struct videomode *vm)
{
@@ -213,50 +164,21 @@ static void nec_8048_get_timings(struct omap_dss_device *dssdev,
*vm = ddata->vm;
}
-static int nec_8048_check_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- return in->ops.dpi->check_timings(in, vm);
-}
-
-static struct omap_dss_driver nec_8048_ops = {
+static const struct omap_dss_device_ops nec_8048_ops = {
.connect = nec_8048_connect,
.disconnect = nec_8048_disconnect,
.enable = nec_8048_enable,
.disable = nec_8048_disable,
- .set_timings = nec_8048_set_timings,
.get_timings = nec_8048_get_timings,
- .check_timings = nec_8048_check_timings,
};
-static int nec_8048_probe_of(struct spi_device *spi)
-{
- struct device_node *node = spi->dev.of_node;
- struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
- int gpio;
-
- gpio = of_get_named_gpio(node, "reset-gpios", 0);
- if (!gpio_is_valid(gpio)) {
- dev_err(&spi->dev, "failed to parse enable gpio\n");
- return gpio;
- }
- ddata->res_gpio = gpio;
-
- /* XXX the panel spec doesn't mention any QVGA pin?? */
- ddata->qvga_gpio = -ENOENT;
-
- return 0;
-}
-
static int nec_8048_probe(struct spi_device *spi)
{
struct panel_drv_data *ddata;
struct omap_dss_device *dssdev;
+ struct gpio_desc *gpio;
int r;
dev_dbg(&spi->dev, "%s\n", __func__);
@@ -280,38 +202,27 @@ static int nec_8048_probe(struct spi_device *spi)
ddata->spi = spi;
- r = nec_8048_probe_of(spi);
- if (r)
- return r;
-
- if (gpio_is_valid(ddata->qvga_gpio)) {
- r = devm_gpio_request_one(&spi->dev, ddata->qvga_gpio,
- GPIOF_OUT_INIT_HIGH, "lcd QVGA");
- if (r)
- return r;
+ gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(gpio)) {
+ dev_err(&spi->dev, "failed to get reset gpio\n");
+ return PTR_ERR(gpio);
}
- if (gpio_is_valid(ddata->res_gpio)) {
- r = devm_gpio_request_one(&spi->dev, ddata->res_gpio,
- GPIOF_OUT_INIT_LOW, "lcd RES");
- if (r)
- return r;
- }
+ ddata->res_gpio = gpio;
ddata->vm = nec_8048_panel_vm;
dssdev = &ddata->dssdev;
dssdev->dev = &spi->dev;
- dssdev->driver = &nec_8048_ops;
+ dssdev->ops = &nec_8048_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.vm = ddata->vm;
+ dssdev->of_ports = BIT(0);
+ dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_POSEDGE
+ | DRM_BUS_FLAG_PIXDATA_POSEDGE;
- r = omapdss_register_display(dssdev);
- if (r) {
- dev_err(&spi->dev, "Failed to register panel\n");
- return r;
- }
+ omapdss_display_init(dssdev);
+ omapdss_device_register(dssdev);
return 0;
}
@@ -323,10 +234,9 @@ static int nec_8048_remove(struct spi_device *spi)
dev_dbg(&ddata->spi->dev, "%s\n", __func__);
- omapdss_unregister_display(dssdev);
+ omapdss_device_unregister(dssdev);
nec_8048_disable(dssdev);
- nec_8048_disconnect(dssdev);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
index bb5b680cabfe..64b1369cb274 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
@@ -21,7 +21,6 @@
struct panel_drv_data {
struct omap_dss_device dssdev;
- struct omap_dss_device *in;
struct regulator *vcc;
struct videomode vm;
@@ -47,60 +46,26 @@ static const struct videomode sharp_ls_vm = {
.vfront_porch = 1,
.vback_porch = 1,
- .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
- DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_NEGEDGE |
- DISPLAY_FLAGS_PIXDATA_POSEDGE,
- /*
- * Note: According to the panel documentation:
- * DATA needs to be driven on the FALLING edge
- */
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
};
#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
-static int sharp_ls_connect(struct omap_dss_device *dssdev)
+static int sharp_ls_connect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in;
- int r;
-
- if (omapdss_device_is_connected(dssdev))
- return 0;
-
- in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
- if (IS_ERR(in)) {
- dev_err(dssdev->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- r = in->ops.dpi->connect(in, dssdev);
- if (r) {
- omap_dss_put_device(in);
- return r;
- }
-
- ddata->in = in;
return 0;
}
-static void sharp_ls_disconnect(struct omap_dss_device *dssdev)
+static void sharp_ls_disconnect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- if (!omapdss_device_is_connected(dssdev))
- return;
-
- in->ops.dpi->disconnect(in, dssdev);
-
- omap_dss_put_device(in);
- ddata->in = NULL;
}
static int sharp_ls_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
int r;
if (!omapdss_device_is_connected(dssdev))
@@ -109,15 +74,13 @@ static int sharp_ls_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.dpi->set_timings(in, &ddata->vm);
-
if (ddata->vcc) {
r = regulator_enable(ddata->vcc);
if (r != 0)
return r;
}
- r = in->ops.dpi->enable(in);
+ r = src->ops->enable(src);
if (r) {
regulator_disable(ddata->vcc);
return r;
@@ -140,7 +103,7 @@ static int sharp_ls_enable(struct omap_dss_device *dssdev)
static void sharp_ls_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
if (!omapdss_device_is_enabled(dssdev))
return;
@@ -155,7 +118,7 @@ static void sharp_ls_disable(struct omap_dss_device *dssdev)
msleep(100);
- in->ops.dpi->disable(in);
+ src->ops->disable(src);
if (ddata->vcc)
regulator_disable(ddata->vcc);
@@ -163,18 +126,6 @@ static void sharp_ls_disable(struct omap_dss_device *dssdev)
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void sharp_ls_set_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- ddata->vm = *vm;
- dssdev->panel.vm = *vm;
-
- in->ops.dpi->set_timings(in, vm);
-}
-
static void sharp_ls_get_timings(struct omap_dss_device *dssdev,
struct videomode *vm)
{
@@ -183,25 +134,14 @@ static void sharp_ls_get_timings(struct omap_dss_device *dssdev,
*vm = ddata->vm;
}
-static int sharp_ls_check_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- return in->ops.dpi->check_timings(in, vm);
-}
-
-static struct omap_dss_driver sharp_ls_ops = {
+static const struct omap_dss_device_ops sharp_ls_ops = {
.connect = sharp_ls_connect,
.disconnect = sharp_ls_disconnect,
.enable = sharp_ls_enable,
.disable = sharp_ls_disable,
- .set_timings = sharp_ls_set_timings,
.get_timings = sharp_ls_get_timings,
- .check_timings = sharp_ls_check_timings,
};
static int sharp_ls_get_gpio_of(struct device *dev, int index, int val,
@@ -278,16 +218,20 @@ static int sharp_ls_probe(struct platform_device *pdev)
dssdev = &ddata->dssdev;
dssdev->dev = &pdev->dev;
- dssdev->driver = &sharp_ls_ops;
+ dssdev->ops = &sharp_ls_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.vm = ddata->vm;
+ dssdev->of_ports = BIT(0);
- r = omapdss_register_display(dssdev);
- if (r) {
- dev_err(&pdev->dev, "Failed to register panel\n");
- return r;
- }
+ /*
+ * Note: According to the panel documentation:
+ * DATA needs to be driven on the FALLING edge
+ */
+ dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_NEGEDGE
+ | DRM_BUS_FLAG_PIXDATA_POSEDGE;
+
+ omapdss_display_init(dssdev);
+ omapdss_device_register(dssdev);
return 0;
}
@@ -297,10 +241,9 @@ static int __exit sharp_ls_remove(struct platform_device *pdev)
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- omapdss_unregister_display(dssdev);
+ omapdss_device_unregister(dssdev);
sharp_ls_disable(dssdev);
- sharp_ls_disconnect(dssdev);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
index f34c06bb5bd7..e04663856b31 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
@@ -20,17 +20,15 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/spi/spi.h>
-#include <linux/jiffies.h>
#include <linux/sched.h>
-#include <linux/backlight.h>
-#include <linux/gpio/consumer.h>
-#include <linux/of.h>
-#include <linux/of_gpio.h>
+#include <linux/spi/spi.h>
#include "../dss/omapdss.h"
@@ -64,9 +62,8 @@
struct panel_drv_data {
struct omap_dss_device dssdev;
- struct omap_dss_device *in;
- int reset_gpio;
+ struct gpio_desc *reset_gpio;
struct videomode vm;
@@ -100,9 +97,7 @@ static const struct videomode acx565akm_panel_vm = {
.vsync_len = 3,
.vback_porch = 4,
- .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
- DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_NEGEDGE |
- DISPLAY_FLAGS_PIXDATA_POSEDGE,
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
};
#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
@@ -507,56 +502,26 @@ static const struct attribute_group bldev_attr_group = {
.attrs = bldev_attrs,
};
-static int acx565akm_connect(struct omap_dss_device *dssdev)
+static int acx565akm_connect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in;
- int r;
-
- if (omapdss_device_is_connected(dssdev))
- return 0;
-
- in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
- if (IS_ERR(in)) {
- dev_err(dssdev->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- r = in->ops.sdi->connect(in, dssdev);
- if (r) {
- omap_dss_put_device(in);
- return r;
- }
-
- ddata->in = in;
return 0;
}
-static void acx565akm_disconnect(struct omap_dss_device *dssdev)
+static void acx565akm_disconnect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- if (!omapdss_device_is_connected(dssdev))
- return;
-
- in->ops.sdi->disconnect(in, dssdev);
-
- omap_dss_put_device(in);
- ddata->in = NULL;
}
static int acx565akm_panel_power_on(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
int r;
dev_dbg(&ddata->spi->dev, "%s\n", __func__);
- in->ops.sdi->set_timings(in, &ddata->vm);
-
- r = in->ops.sdi->enable(in);
+ r = src->ops->enable(src);
if (r) {
pr_err("%s sdi enable failed\n", __func__);
return r;
@@ -565,8 +530,8 @@ static int acx565akm_panel_power_on(struct omap_dss_device *dssdev)
/*FIXME tweak me */
msleep(50);
- if (gpio_is_valid(ddata->reset_gpio))
- gpio_set_value(ddata->reset_gpio, 1);
+ if (ddata->reset_gpio)
+ gpiod_set_value(ddata->reset_gpio, 1);
if (ddata->enabled) {
dev_dbg(&ddata->spi->dev, "panel already enabled\n");
@@ -597,7 +562,7 @@ static int acx565akm_panel_power_on(struct omap_dss_device *dssdev)
static void acx565akm_panel_power_off(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
dev_dbg(dssdev->dev, "%s\n", __func__);
@@ -615,13 +580,13 @@ static void acx565akm_panel_power_off(struct omap_dss_device *dssdev)
*/
msleep(50);
- if (gpio_is_valid(ddata->reset_gpio))
- gpio_set_value(ddata->reset_gpio, 0);
+ if (ddata->reset_gpio)
+ gpiod_set_value(ddata->reset_gpio, 0);
/* FIXME need to tweak this delay */
msleep(100);
- in->ops.sdi->disable(in);
+ src->ops->disable(src);
}
static int acx565akm_enable(struct omap_dss_device *dssdev)
@@ -664,18 +629,6 @@ static void acx565akm_disable(struct omap_dss_device *dssdev)
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void acx565akm_set_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- ddata->vm = *vm;
- dssdev->panel.vm = *vm;
-
- in->ops.sdi->set_timings(in, vm);
-}
-
static void acx565akm_get_timings(struct omap_dss_device *dssdev,
struct videomode *vm)
{
@@ -684,37 +637,16 @@ static void acx565akm_get_timings(struct omap_dss_device *dssdev,
*vm = ddata->vm;
}
-static int acx565akm_check_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- return in->ops.sdi->check_timings(in, vm);
-}
-
-static struct omap_dss_driver acx565akm_ops = {
+static const struct omap_dss_device_ops acx565akm_ops = {
.connect = acx565akm_connect,
.disconnect = acx565akm_disconnect,
.enable = acx565akm_enable,
.disable = acx565akm_disable,
- .set_timings = acx565akm_set_timings,
.get_timings = acx565akm_get_timings,
- .check_timings = acx565akm_check_timings,
};
-static int acx565akm_probe_of(struct spi_device *spi)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
- struct device_node *np = spi->dev.of_node;
-
- ddata->reset_gpio = of_get_named_gpio(np, "reset-gpios", 0);
-
- return 0;
-}
-
static int acx565akm_probe(struct spi_device *spi)
{
struct panel_drv_data *ddata;
@@ -722,6 +654,7 @@ static int acx565akm_probe(struct spi_device *spi)
struct backlight_device *bldev;
int max_brightness, brightness;
struct backlight_properties props;
+ struct gpio_desc *gpio;
int r;
dev_dbg(&spi->dev, "%s\n", __func__);
@@ -738,19 +671,16 @@ static int acx565akm_probe(struct spi_device *spi)
mutex_init(&ddata->mutex);
- r = acx565akm_probe_of(spi);
- if (r)
- return r;
-
- if (gpio_is_valid(ddata->reset_gpio)) {
- r = devm_gpio_request_one(&spi->dev, ddata->reset_gpio,
- GPIOF_OUT_INIT_LOW, "lcd reset");
- if (r)
- goto err_gpio;
+ gpio = devm_gpiod_get_optional(&spi->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(gpio)) {
+ dev_err(&spi->dev, "failed to parse reset gpio\n");
+ return PTR_ERR(gpio);
}
- if (gpio_is_valid(ddata->reset_gpio))
- gpio_set_value(ddata->reset_gpio, 1);
+ ddata->reset_gpio = gpio;
+
+ if (ddata->reset_gpio)
+ gpiod_set_value(ddata->reset_gpio, 1);
/*
* After reset we have to wait 5 msec before the first
@@ -762,12 +692,12 @@ static int acx565akm_probe(struct spi_device *spi)
r = panel_detect(ddata);
- if (!ddata->enabled && gpio_is_valid(ddata->reset_gpio))
- gpio_set_value(ddata->reset_gpio, 0);
+ if (!ddata->enabled && ddata->reset_gpio)
+ gpiod_set_value(ddata->reset_gpio, 0);
if (r) {
dev_err(&spi->dev, "%s panel detect error\n", __func__);
- goto err_detect;
+ return r;
}
memset(&props, 0, sizeof(props));
@@ -777,17 +707,15 @@ static int acx565akm_probe(struct spi_device *spi)
bldev = backlight_device_register("acx565akm", &ddata->spi->dev,
ddata, &acx565akm_bl_ops, &props);
- if (IS_ERR(bldev)) {
- r = PTR_ERR(bldev);
- goto err_reg_bl;
- }
+ if (IS_ERR(bldev))
+ return PTR_ERR(bldev);
ddata->bl_dev = bldev;
if (ddata->has_cabc) {
r = sysfs_create_group(&bldev->dev.kobj, &bldev_attr_group);
if (r) {
dev_err(&bldev->dev,
"%s failed to create sysfs files\n", __func__);
- goto err_sysfs;
+ goto err_backlight_unregister;
}
ddata->cabc_mode = get_hw_cabc_mode(ddata);
}
@@ -809,26 +737,20 @@ static int acx565akm_probe(struct spi_device *spi)
dssdev = &ddata->dssdev;
dssdev->dev = &spi->dev;
- dssdev->driver = &acx565akm_ops;
+ dssdev->ops = &acx565akm_ops;
dssdev->type = OMAP_DISPLAY_TYPE_SDI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.vm = ddata->vm;
+ dssdev->of_ports = BIT(0);
+ dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_NEGEDGE
+ | DRM_BUS_FLAG_PIXDATA_POSEDGE;
- r = omapdss_register_display(dssdev);
- if (r) {
- dev_err(&spi->dev, "Failed to register panel\n");
- goto err_reg;
- }
+ omapdss_display_init(dssdev);
+ omapdss_device_register(dssdev);
return 0;
-err_reg:
- sysfs_remove_group(&bldev->dev.kobj, &bldev_attr_group);
-err_sysfs:
+err_backlight_unregister:
backlight_device_unregister(bldev);
-err_reg_bl:
-err_detect:
-err_gpio:
return r;
}
@@ -842,10 +764,9 @@ static int acx565akm_remove(struct spi_device *spi)
sysfs_remove_group(&ddata->bl_dev->dev.kobj, &bldev_attr_group);
backlight_device_unregister(ddata->bl_dev);
- omapdss_unregister_display(dssdev);
+ omapdss_device_unregister(dssdev);
acx565akm_disable(dssdev);
- acx565akm_disconnect(dssdev);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
index a1f1dc18407a..7ddc8c574a61 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
@@ -27,13 +27,11 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/spi/spi.h>
-#include <linux/gpio.h>
#include "../dss/omapdss.h"
struct panel_drv_data {
struct omap_dss_device dssdev;
- struct omap_dss_device *in;
struct videomode vm;
@@ -51,13 +49,7 @@ static const struct videomode td028ttec1_panel_vm = {
.vsync_len = 2,
.vback_porch = 2,
- .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
- DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_POSEDGE |
- DISPLAY_FLAGS_PIXDATA_NEGEDGE,
- /*
- * Note: According to the panel documentation:
- * SYNC needs to be driven on the FALLING edge
- */
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
};
#define JBT_COMMAND 0x000
@@ -166,49 +158,21 @@ enum jbt_register {
#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
-static int td028ttec1_panel_connect(struct omap_dss_device *dssdev)
+static int td028ttec1_panel_connect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in;
- int r;
-
- if (omapdss_device_is_connected(dssdev))
- return 0;
-
- in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
- if (IS_ERR(in)) {
- dev_err(dssdev->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- r = in->ops.dpi->connect(in, dssdev);
- if (r) {
- omap_dss_put_device(in);
- return r;
- }
-
- ddata->in = in;
return 0;
}
-static void td028ttec1_panel_disconnect(struct omap_dss_device *dssdev)
+static void td028ttec1_panel_disconnect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- if (!omapdss_device_is_connected(dssdev))
- return;
-
- in->ops.dpi->disconnect(in, dssdev);
-
- omap_dss_put_device(in);
- ddata->in = NULL;
}
static int td028ttec1_panel_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
int r;
if (!omapdss_device_is_connected(dssdev))
@@ -217,9 +181,7 @@ static int td028ttec1_panel_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.dpi->set_timings(in, &ddata->vm);
-
- r = in->ops.dpi->enable(in);
+ r = src->ops->enable(src);
if (r)
return r;
@@ -316,7 +278,7 @@ transfer_err:
static void td028ttec1_panel_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
if (!omapdss_device_is_enabled(dssdev))
return;
@@ -328,23 +290,11 @@ static void td028ttec1_panel_disable(struct omap_dss_device *dssdev)
jbt_ret_write_0(ddata, JBT_REG_SLEEP_IN);
jbt_reg_write_1(ddata, JBT_REG_POWER_ON_OFF, 0x00);
- in->ops.dpi->disable(in);
+ src->ops->disable(src);
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void td028ttec1_panel_set_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- ddata->vm = *vm;
- dssdev->panel.vm = *vm;
-
- in->ops.dpi->set_timings(in, vm);
-}
-
static void td028ttec1_panel_get_timings(struct omap_dss_device *dssdev,
struct videomode *vm)
{
@@ -353,25 +303,14 @@ static void td028ttec1_panel_get_timings(struct omap_dss_device *dssdev,
*vm = ddata->vm;
}
-static int td028ttec1_panel_check_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- return in->ops.dpi->check_timings(in, vm);
-}
-
-static struct omap_dss_driver td028ttec1_ops = {
+static const struct omap_dss_device_ops td028ttec1_ops = {
.connect = td028ttec1_panel_connect,
.disconnect = td028ttec1_panel_disconnect,
.enable = td028ttec1_panel_enable,
.disable = td028ttec1_panel_disable,
- .set_timings = td028ttec1_panel_set_timings,
.get_timings = td028ttec1_panel_get_timings,
- .check_timings = td028ttec1_panel_check_timings,
};
static int td028ttec1_panel_probe(struct spi_device *spi)
@@ -403,16 +342,20 @@ static int td028ttec1_panel_probe(struct spi_device *spi)
dssdev = &ddata->dssdev;
dssdev->dev = &spi->dev;
- dssdev->driver = &td028ttec1_ops;
+ dssdev->ops = &td028ttec1_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.vm = ddata->vm;
+ dssdev->of_ports = BIT(0);
- r = omapdss_register_display(dssdev);
- if (r) {
- dev_err(&spi->dev, "Failed to register panel\n");
- return r;
- }
+ /*
+ * Note: According to the panel documentation:
+ * SYNC needs to be driven on the FALLING edge
+ */
+ dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_POSEDGE
+ | DRM_BUS_FLAG_PIXDATA_NEGEDGE;
+
+ omapdss_display_init(dssdev);
+ omapdss_device_register(dssdev);
return 0;
}
@@ -424,10 +367,9 @@ static int td028ttec1_panel_remove(struct spi_device *spi)
dev_dbg(&ddata->spi_dev->dev, "%s\n", __func__);
- omapdss_unregister_display(dssdev);
+ omapdss_device_unregister(dssdev);
td028ttec1_panel_disable(dssdev);
- td028ttec1_panel_disconnect(dssdev);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
index c08e22b43447..8440fcb744d9 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
@@ -10,14 +10,13 @@
* (at your option) any later version.
*/
-#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/spi/spi.h>
-#include <linux/regulator/consumer.h>
-#include <linux/gpio/consumer.h>
#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
-#include <linux/of_gpio.h>
+#include <linux/spi/spi.h>
#include "../dss/omapdss.h"
@@ -54,16 +53,14 @@ static const u16 tpo_td043_def_gamma[12] = {
struct panel_drv_data {
struct omap_dss_device dssdev;
- struct omap_dss_device *in;
struct videomode vm;
struct spi_device *spi;
struct regulator *vcc_reg;
- int nreset_gpio;
+ struct gpio_desc *reset_gpio;
u16 gamma[12];
u32 mode;
- u32 hmirror:1;
u32 vmirror:1;
u32 powered_on:1;
u32 spi_suspended:1;
@@ -84,13 +81,7 @@ static const struct videomode tpo_td043_vm = {
.vfront_porch = 39,
.vback_porch = 34,
- .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
- DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_POSEDGE |
- DISPLAY_FLAGS_PIXDATA_NEGEDGE,
- /*
- * Note: According to the panel documentation:
- * SYNC needs to be driven on the FALLING edge
- */
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
};
#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
@@ -152,22 +143,6 @@ static int tpo_td043_write_mirror(struct spi_device *spi, bool h, bool v)
return tpo_td043_write(spi, 4, reg4);
}
-static int tpo_td043_set_hmirror(struct omap_dss_device *dssdev, bool enable)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dssdev->dev);
-
- ddata->hmirror = enable;
- return tpo_td043_write_mirror(ddata->spi, ddata->hmirror,
- ddata->vmirror);
-}
-
-static bool tpo_td043_get_hmirror(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dssdev->dev);
-
- return ddata->hmirror;
-}
-
static ssize_t tpo_td043_vmirror_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -189,7 +164,7 @@ static ssize_t tpo_td043_vmirror_store(struct device *dev,
val = !!val;
- ret = tpo_td043_write_mirror(ddata->spi, ddata->hmirror, val);
+ ret = tpo_td043_write_mirror(ddata->spi, false, val);
if (ret < 0)
return ret;
@@ -300,16 +275,14 @@ static int tpo_td043_power_on(struct panel_drv_data *ddata)
/* wait for panel to stabilize */
msleep(160);
- if (gpio_is_valid(ddata->nreset_gpio))
- gpio_set_value(ddata->nreset_gpio, 1);
+ gpiod_set_value(ddata->reset_gpio, 0);
tpo_td043_write(ddata->spi, 2,
TPO_R02_MODE(ddata->mode) | TPO_R02_NCLK_RISING);
tpo_td043_write(ddata->spi, 3, TPO_R03_VAL_NORMAL);
tpo_td043_write(ddata->spi, 0x20, 0xf0);
tpo_td043_write(ddata->spi, 0x21, 0xf0);
- tpo_td043_write_mirror(ddata->spi, ddata->hmirror,
- ddata->vmirror);
+ tpo_td043_write_mirror(ddata->spi, false, ddata->vmirror);
tpo_td043_write_gamma(ddata->spi, ddata->gamma);
ddata->powered_on = 1;
@@ -324,8 +297,7 @@ static void tpo_td043_power_off(struct panel_drv_data *ddata)
tpo_td043_write(ddata->spi, 3,
TPO_R03_VAL_STANDBY | TPO_R03_EN_PWM);
- if (gpio_is_valid(ddata->nreset_gpio))
- gpio_set_value(ddata->nreset_gpio, 0);
+ gpiod_set_value(ddata->reset_gpio, 1);
/* wait for at least 2 vsyncs before cutting off power */
msleep(50);
@@ -337,49 +309,21 @@ static void tpo_td043_power_off(struct panel_drv_data *ddata)
ddata->powered_on = 0;
}
-static int tpo_td043_connect(struct omap_dss_device *dssdev)
+static int tpo_td043_connect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in;
- int r;
-
- if (omapdss_device_is_connected(dssdev))
- return 0;
-
- in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
- if (IS_ERR(in)) {
- dev_err(dssdev->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- r = in->ops.dpi->connect(in, dssdev);
- if (r) {
- omap_dss_put_device(in);
- return r;
- }
-
- ddata->in = in;
return 0;
}
-static void tpo_td043_disconnect(struct omap_dss_device *dssdev)
+static void tpo_td043_disconnect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- if (!omapdss_device_is_connected(dssdev))
- return;
-
- in->ops.dpi->disconnect(in, dssdev);
-
- omap_dss_put_device(in);
- ddata->in = NULL;
}
static int tpo_td043_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
int r;
if (!omapdss_device_is_connected(dssdev))
@@ -388,9 +332,7 @@ static int tpo_td043_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.dpi->set_timings(in, &ddata->vm);
-
- r = in->ops.dpi->enable(in);
+ r = src->ops->enable(src);
if (r)
return r;
@@ -401,7 +343,7 @@ static int tpo_td043_enable(struct omap_dss_device *dssdev)
if (!ddata->spi_suspended) {
r = tpo_td043_power_on(ddata);
if (r) {
- in->ops.dpi->disable(in);
+ src->ops->disable(src);
return r;
}
}
@@ -414,12 +356,12 @@ static int tpo_td043_enable(struct omap_dss_device *dssdev)
static void tpo_td043_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
if (!omapdss_device_is_enabled(dssdev))
return;
- in->ops.dpi->disable(in);
+ src->ops->disable(src);
if (!ddata->spi_suspended)
tpo_td043_power_off(ddata);
@@ -427,18 +369,6 @@ static void tpo_td043_disable(struct omap_dss_device *dssdev)
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void tpo_td043_set_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- ddata->vm = *vm;
- dssdev->panel.vm = *vm;
-
- in->ops.dpi->set_timings(in, vm);
-}
-
static void tpo_td043_get_timings(struct omap_dss_device *dssdev,
struct videomode *vm)
{
@@ -447,50 +377,21 @@ static void tpo_td043_get_timings(struct omap_dss_device *dssdev,
*vm = ddata->vm;
}
-static int tpo_td043_check_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- return in->ops.dpi->check_timings(in, vm);
-}
-
-static struct omap_dss_driver tpo_td043_ops = {
+static const struct omap_dss_device_ops tpo_td043_ops = {
.connect = tpo_td043_connect,
.disconnect = tpo_td043_disconnect,
.enable = tpo_td043_enable,
.disable = tpo_td043_disable,
- .set_timings = tpo_td043_set_timings,
.get_timings = tpo_td043_get_timings,
- .check_timings = tpo_td043_check_timings,
-
- .set_mirror = tpo_td043_set_hmirror,
- .get_mirror = tpo_td043_get_hmirror,
};
-static int tpo_td043_probe_of(struct spi_device *spi)
-{
- struct device_node *node = spi->dev.of_node;
- struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
- int gpio;
-
- gpio = of_get_named_gpio(node, "reset-gpios", 0);
- if (!gpio_is_valid(gpio)) {
- dev_err(&spi->dev, "failed to parse enable gpio\n");
- return gpio;
- }
- ddata->nreset_gpio = gpio;
-
- return 0;
-}
-
static int tpo_td043_probe(struct spi_device *spi)
{
struct panel_drv_data *ddata;
struct omap_dss_device *dssdev;
+ struct gpio_desc *gpio;
int r;
dev_dbg(&spi->dev, "%s\n", __func__);
@@ -512,59 +413,49 @@ static int tpo_td043_probe(struct spi_device *spi)
ddata->spi = spi;
- r = tpo_td043_probe_of(spi);
- if (r)
- return r;
-
ddata->mode = TPO_R02_MODE_800x480;
memcpy(ddata->gamma, tpo_td043_def_gamma, sizeof(ddata->gamma));
ddata->vcc_reg = devm_regulator_get(&spi->dev, "vcc");
if (IS_ERR(ddata->vcc_reg)) {
dev_err(&spi->dev, "failed to get LCD VCC regulator\n");
- r = PTR_ERR(ddata->vcc_reg);
- goto err_regulator;
+ return PTR_ERR(ddata->vcc_reg);
}
- if (gpio_is_valid(ddata->nreset_gpio)) {
- r = devm_gpio_request_one(&spi->dev,
- ddata->nreset_gpio, GPIOF_OUT_INIT_LOW,
- "lcd reset");
- if (r < 0) {
- dev_err(&spi->dev, "couldn't request reset GPIO\n");
- goto err_gpio_req;
- }
+ gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(gpio)) {
+ dev_err(&spi->dev, "failed to get reset gpio\n");
+ return PTR_ERR(gpio);
}
+ ddata->reset_gpio = gpio;
+
r = sysfs_create_group(&spi->dev.kobj, &tpo_td043_attr_group);
if (r) {
dev_err(&spi->dev, "failed to create sysfs files\n");
- goto err_sysfs;
+ return r;
}
ddata->vm = tpo_td043_vm;
dssdev = &ddata->dssdev;
dssdev->dev = &spi->dev;
- dssdev->driver = &tpo_td043_ops;
+ dssdev->ops = &tpo_td043_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.vm = ddata->vm;
+ dssdev->of_ports = BIT(0);
- r = omapdss_register_display(dssdev);
- if (r) {
- dev_err(&spi->dev, "Failed to register panel\n");
- goto err_reg;
- }
+ /*
+ * Note: According to the panel documentation:
+ * SYNC needs to be driven on the FALLING edge
+ */
+ dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_POSEDGE
+ | DRM_BUS_FLAG_PIXDATA_NEGEDGE;
- return 0;
+ omapdss_display_init(dssdev);
+ omapdss_device_register(dssdev);
-err_reg:
- sysfs_remove_group(&spi->dev.kobj, &tpo_td043_attr_group);
-err_sysfs:
-err_gpio_req:
-err_regulator:
- return r;
+ return 0;
}
static int tpo_td043_remove(struct spi_device *spi)
@@ -574,10 +465,9 @@ static int tpo_td043_remove(struct spi_device *spi)
dev_dbg(&ddata->spi->dev, "%s\n", __func__);
- omapdss_unregister_display(dssdev);
+ omapdss_device_unregister(dssdev);
tpo_td043_disable(dssdev);
- tpo_td043_disconnect(dssdev);
sysfs_remove_group(&spi->dev.kobj, &tpo_td043_attr_group);
diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c
index 99e8cb8dc65b..472f56e3de70 100644
--- a/drivers/gpu/drm/omapdrm/dss/base.c
+++ b/drivers/gpu/drm/omapdrm/dss/base.c
@@ -14,24 +14,17 @@
*/
#include <linux/kernel.h>
+#include <linux/list.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_graph.h>
-#include <linux/list.h>
#include "dss.h"
#include "omapdss.h"
static struct dss_device *dss_device;
-static struct list_head omapdss_comp_list;
-
-struct omapdss_comp_node {
- struct list_head list;
- struct device_node *node;
- bool dss_core_component;
-};
-
struct dss_device *omapdss_get_dss(void)
{
return dss_device;
@@ -56,6 +49,208 @@ const struct dispc_ops *dispc_get_ops(struct dss_device *dss)
}
EXPORT_SYMBOL(dispc_get_ops);
+
+/* -----------------------------------------------------------------------------
+ * OMAP DSS Devices Handling
+ */
+
+static LIST_HEAD(omapdss_devices_list);
+static DEFINE_MUTEX(omapdss_devices_lock);
+
+void omapdss_device_register(struct omap_dss_device *dssdev)
+{
+ mutex_lock(&omapdss_devices_lock);
+ list_add_tail(&dssdev->list, &omapdss_devices_list);
+ mutex_unlock(&omapdss_devices_lock);
+}
+EXPORT_SYMBOL_GPL(omapdss_device_register);
+
+void omapdss_device_unregister(struct omap_dss_device *dssdev)
+{
+ mutex_lock(&omapdss_devices_lock);
+ list_del(&dssdev->list);
+ mutex_unlock(&omapdss_devices_lock);
+}
+EXPORT_SYMBOL_GPL(omapdss_device_unregister);
+
+static bool omapdss_device_is_registered(struct device_node *node)
+{
+ struct omap_dss_device *dssdev;
+ bool found = false;
+
+ mutex_lock(&omapdss_devices_lock);
+
+ list_for_each_entry(dssdev, &omapdss_devices_list, list) {
+ if (dssdev->dev->of_node == node) {
+ found = true;
+ break;
+ }
+ }
+
+ mutex_unlock(&omapdss_devices_lock);
+ return found;
+}
+
+struct omap_dss_device *omapdss_device_get(struct omap_dss_device *dssdev)
+{
+ if (!try_module_get(dssdev->owner))
+ return NULL;
+
+ if (get_device(dssdev->dev) == NULL) {
+ module_put(dssdev->owner);
+ return NULL;
+ }
+
+ return dssdev;
+}
+EXPORT_SYMBOL(omapdss_device_get);
+
+void omapdss_device_put(struct omap_dss_device *dssdev)
+{
+ put_device(dssdev->dev);
+ module_put(dssdev->owner);
+}
+EXPORT_SYMBOL(omapdss_device_put);
+
+struct omap_dss_device *omapdss_find_device_by_port(struct device_node *src,
+ unsigned int port)
+{
+ struct omap_dss_device *dssdev;
+
+ list_for_each_entry(dssdev, &omapdss_devices_list, list) {
+ if (dssdev->dev->of_node == src && dssdev->of_ports & BIT(port))
+ return omapdss_device_get(dssdev);
+ }
+
+ return NULL;
+}
+
+/*
+ * Search for the next device starting at @from. The type argument specfies
+ * which device types to consider when searching. Searching for multiple types
+ * is supported by and'ing their type flags. Release the reference to the @from
+ * device, and acquire a reference to the returned device if found.
+ */
+struct omap_dss_device *omapdss_device_get_next(struct omap_dss_device *from,
+ enum omap_dss_device_type type)
+{
+ struct omap_dss_device *dssdev;
+ struct list_head *list;
+
+ mutex_lock(&omapdss_devices_lock);
+
+ if (list_empty(&omapdss_devices_list)) {
+ dssdev = NULL;
+ goto done;
+ }
+
+ /*
+ * Start from the from entry if given or from omapdss_devices_list
+ * otherwise.
+ */
+ list = from ? &from->list : &omapdss_devices_list;
+
+ list_for_each_entry(dssdev, list, list) {
+ /*
+ * Stop if we reach the omapdss_devices_list, that's the end of
+ * the list.
+ */
+ if (&dssdev->list == &omapdss_devices_list) {
+ dssdev = NULL;
+ goto done;
+ }
+
+ /*
+ * Accept display entities if the display type is requested,
+ * and output entities if the output type is requested.
+ */
+ if ((type & OMAP_DSS_DEVICE_TYPE_DISPLAY) &&
+ !dssdev->output_type)
+ goto done;
+ if ((type & OMAP_DSS_DEVICE_TYPE_OUTPUT) && dssdev->id &&
+ dssdev->next)
+ goto done;
+ }
+
+ dssdev = NULL;
+
+done:
+ if (from)
+ omapdss_device_put(from);
+ if (dssdev)
+ omapdss_device_get(dssdev);
+
+ mutex_unlock(&omapdss_devices_lock);
+ return dssdev;
+}
+EXPORT_SYMBOL(omapdss_device_get_next);
+
+int omapdss_device_connect(struct dss_device *dss,
+ struct omap_dss_device *src,
+ struct omap_dss_device *dst)
+{
+ int ret;
+
+ dev_dbg(dst->dev, "connect\n");
+
+ if (omapdss_device_is_connected(dst))
+ return -EBUSY;
+
+ dst->dss = dss;
+
+ ret = dst->ops->connect(src, dst);
+ if (ret < 0) {
+ dst->dss = NULL;
+ return ret;
+ }
+
+ if (src) {
+ WARN_ON(src->dst);
+ dst->src = src;
+ src->dst = dst;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(omapdss_device_connect);
+
+void omapdss_device_disconnect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
+{
+ dev_dbg(dst->dev, "disconnect\n");
+
+ if (!dst->id && !omapdss_device_is_connected(dst)) {
+ WARN_ON(dst->output_type);
+ return;
+ }
+
+ if (src) {
+ if (WARN_ON(dst != src->dst))
+ return;
+
+ dst->src = NULL;
+ src->dst = NULL;
+ }
+
+ WARN_ON(dst->state != OMAP_DSS_DISPLAY_DISABLED);
+
+ dst->ops->disconnect(src, dst);
+ dst->dss = NULL;
+}
+EXPORT_SYMBOL_GPL(omapdss_device_disconnect);
+
+/* -----------------------------------------------------------------------------
+ * Components Handling
+ */
+
+static struct list_head omapdss_comp_list;
+
+struct omapdss_comp_node {
+ struct list_head list;
+ struct device_node *node;
+ bool dss_core_component;
+};
+
static bool omapdss_list_contains(const struct device_node *node)
{
struct omapdss_comp_node *comp;
@@ -130,9 +325,7 @@ static bool omapdss_component_is_loaded(struct omapdss_comp_node *comp)
{
if (comp->dss_core_component)
return true;
- if (omapdss_component_is_display(comp->node))
- return true;
- if (omapdss_component_is_output(comp->node))
+ if (omapdss_device_is_registered(comp->node))
return true;
return false;
diff --git a/drivers/gpu/drm/omapdrm/dss/core.c b/drivers/gpu/drm/omapdrm/dss/core.c
index 07d00a186f15..a2edabc9f6b3 100644
--- a/drivers/gpu/drm/omapdrm/dss/core.c
+++ b/drivers/gpu/drm/omapdrm/dss/core.c
@@ -45,36 +45,14 @@ static struct platform_driver * const omap_dss_drivers[] = {
#endif
};
-static struct platform_device *omap_drm_device;
-
static int __init omap_dss_init(void)
{
- int r;
-
- r = platform_register_drivers(omap_dss_drivers,
- ARRAY_SIZE(omap_dss_drivers));
- if (r)
- goto err_reg;
-
- omap_drm_device = platform_device_register_simple("omapdrm", 0, NULL, 0);
- if (IS_ERR(omap_drm_device)) {
- r = PTR_ERR(omap_drm_device);
- goto err_reg;
- }
-
- return 0;
-
-err_reg:
- platform_unregister_drivers(omap_dss_drivers,
- ARRAY_SIZE(omap_dss_drivers));
-
- return r;
+ return platform_register_drivers(omap_dss_drivers,
+ ARRAY_SIZE(omap_dss_drivers));
}
static void __exit omap_dss_exit(void)
{
- platform_device_unregister(omap_drm_device);
-
platform_unregister_drivers(omap_dss_drivers,
ARRAY_SIZE(omap_dss_drivers));
}
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index 84f274c4a4cb..e61a9592a650 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -2904,13 +2904,6 @@ static int dispc_ovl_enable(struct dispc_device *dispc,
return 0;
}
-static enum omap_dss_output_id
-dispc_mgr_get_supported_outputs(struct dispc_device *dispc,
- enum omap_channel channel)
-{
- return dss_get_supported_outputs(dispc->dss, channel);
-}
-
static void dispc_lcd_enable_signal_polarity(struct dispc_device *dispc,
bool act_high)
{
@@ -3120,28 +3113,29 @@ static bool _dispc_mgr_pclk_ok(struct dispc_device *dispc,
return pclk <= dispc->feat->max_tv_pclk;
}
-bool dispc_mgr_timings_ok(struct dispc_device *dispc, enum omap_channel channel,
- const struct videomode *vm)
+static int dispc_mgr_check_timings(struct dispc_device *dispc,
+ enum omap_channel channel,
+ const struct videomode *vm)
{
if (!_dispc_mgr_size_ok(dispc, vm->hactive, vm->vactive))
- return false;
+ return MODE_BAD;
if (!_dispc_mgr_pclk_ok(dispc, channel, vm->pixelclock))
- return false;
+ return MODE_BAD;
if (dss_mgr_is_lcd(channel)) {
/* TODO: OMAP4+ supports interlace for LCD outputs */
if (vm->flags & DISPLAY_FLAGS_INTERLACED)
- return false;
+ return MODE_BAD;
if (!_dispc_lcd_timings_ok(dispc, vm->hsync_len,
vm->hfront_porch, vm->hback_porch,
vm->vsync_len, vm->vfront_porch,
vm->vback_porch))
- return false;
+ return MODE_BAD;
}
- return true;
+ return MODE_OK;
}
static void _dispc_mgr_set_lcd_timings(struct dispc_device *dispc,
@@ -3243,7 +3237,7 @@ static void dispc_mgr_set_timings(struct dispc_device *dispc,
DSSDBG("channel %d xres %u yres %u\n", channel, t.hactive, t.vactive);
- if (!dispc_mgr_timings_ok(dispc, channel, &t)) {
+ if (dispc_mgr_check_timings(dispc, channel, &t)) {
BUG();
return;
}
@@ -4740,9 +4734,9 @@ static const struct dispc_ops dispc_ops = {
.mgr_go_busy = dispc_mgr_go_busy,
.mgr_go = dispc_mgr_go,
.mgr_set_lcd_config = dispc_mgr_set_lcd_config,
+ .mgr_check_timings = dispc_mgr_check_timings,
.mgr_set_timings = dispc_mgr_set_timings,
.mgr_setup = dispc_mgr_setup,
- .mgr_get_supported_outputs = dispc_mgr_get_supported_outputs,
.mgr_gamma_size = dispc_mgr_gamma_size,
.mgr_set_gamma = dispc_mgr_set_gamma,
diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c
index 9e7fcbd57e52..34b2a4ef63a4 100644
--- a/drivers/gpu/drm/omapdrm/dss/display.c
+++ b/drivers/gpu/drm/omapdrm/dss/display.c
@@ -21,27 +21,14 @@
#define DSS_SUBSYS_NAME "DISPLAY"
#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/jiffies.h>
-#include <linux/platform_device.h>
#include <linux/of.h>
#include "omapdss.h"
-static void omapdss_default_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- *vm = dssdev->panel.vm;
-}
-
-static LIST_HEAD(panel_list);
-static DEFINE_MUTEX(panel_list_mutex);
static int disp_num_counter;
-int omapdss_register_display(struct omap_dss_device *dssdev)
+void omapdss_display_init(struct omap_dss_device *dssdev)
{
- struct omap_dss_driver *drv = dssdev->driver;
- struct list_head *cur;
int id;
/*
@@ -52,123 +39,22 @@ int omapdss_register_display(struct omap_dss_device *dssdev)
if (id < 0)
id = disp_num_counter++;
- snprintf(dssdev->alias, sizeof(dssdev->alias), "display%d", id);
+ dssdev->alias_id = id;
/* Use 'label' property for name, if it exists */
of_property_read_string(dssdev->dev->of_node, "label", &dssdev->name);
if (dssdev->name == NULL)
- dssdev->name = dssdev->alias;
-
- if (drv && drv->get_timings == NULL)
- drv->get_timings = omapdss_default_get_timings;
-
- mutex_lock(&panel_list_mutex);
- list_for_each(cur, &panel_list) {
- struct omap_dss_device *ldev = list_entry(cur,
- struct omap_dss_device,
- panel_list);
- if (strcmp(ldev->alias, dssdev->alias) > 0)
- break;
- }
- list_add_tail(&dssdev->panel_list, cur);
- mutex_unlock(&panel_list_mutex);
- return 0;
-}
-EXPORT_SYMBOL(omapdss_register_display);
-
-void omapdss_unregister_display(struct omap_dss_device *dssdev)
-{
- mutex_lock(&panel_list_mutex);
- list_del(&dssdev->panel_list);
- mutex_unlock(&panel_list_mutex);
+ dssdev->name = devm_kasprintf(dssdev->dev, GFP_KERNEL,
+ "display%u", id);
}
-EXPORT_SYMBOL(omapdss_unregister_display);
+EXPORT_SYMBOL_GPL(omapdss_display_init);
-bool omapdss_component_is_display(struct device_node *node)
+struct omap_dss_device *omapdss_display_get(struct omap_dss_device *output)
{
- struct omap_dss_device *dssdev;
- bool found = false;
-
- mutex_lock(&panel_list_mutex);
- list_for_each_entry(dssdev, &panel_list, panel_list) {
- if (dssdev->dev->of_node == node) {
- found = true;
- goto out;
- }
- }
-out:
- mutex_unlock(&panel_list_mutex);
- return found;
-}
-EXPORT_SYMBOL(omapdss_component_is_display);
-
-struct omap_dss_device *omap_dss_get_device(struct omap_dss_device *dssdev)
-{
- if (!try_module_get(dssdev->owner))
- return NULL;
-
- if (get_device(dssdev->dev) == NULL) {
- module_put(dssdev->owner);
- return NULL;
- }
-
- return dssdev;
-}
-EXPORT_SYMBOL(omap_dss_get_device);
-
-void omap_dss_put_device(struct omap_dss_device *dssdev)
-{
- put_device(dssdev->dev);
- module_put(dssdev->owner);
-}
-EXPORT_SYMBOL(omap_dss_put_device);
-
-/*
- * ref count of the found device is incremented.
- * ref count of from-device is decremented.
- */
-struct omap_dss_device *omap_dss_get_next_device(struct omap_dss_device *from)
-{
- struct list_head *l;
- struct omap_dss_device *dssdev;
-
- mutex_lock(&panel_list_mutex);
-
- if (list_empty(&panel_list)) {
- dssdev = NULL;
- goto out;
- }
-
- if (from == NULL) {
- dssdev = list_first_entry(&panel_list, struct omap_dss_device,
- panel_list);
- omap_dss_get_device(dssdev);
- goto out;
- }
-
- omap_dss_put_device(from);
-
- list_for_each(l, &panel_list) {
- dssdev = list_entry(l, struct omap_dss_device, panel_list);
- if (dssdev == from) {
- if (list_is_last(l, &panel_list)) {
- dssdev = NULL;
- goto out;
- }
-
- dssdev = list_entry(l->next, struct omap_dss_device,
- panel_list);
- omap_dss_get_device(dssdev);
- goto out;
- }
- }
-
- WARN(1, "'from' dssdev not found\n");
+ while (output->next)
+ output = output->next;
- dssdev = NULL;
-out:
- mutex_unlock(&panel_list_mutex);
- return dssdev;
+ return omapdss_device_get(output);
}
-EXPORT_SYMBOL(omap_dss_get_next_device);
+EXPORT_SYMBOL_GPL(omapdss_display_get);
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index 9fcc50217133..ca4f3c4c6318 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -39,6 +39,7 @@ struct dpi_data {
struct platform_device *pdev;
enum dss_model dss_model;
struct dss_device *dss;
+ unsigned int id;
struct regulator *vdds_dsi_reg;
enum dss_clk_source clk_src;
@@ -346,10 +347,9 @@ static int dpi_set_dispc_clk(struct dpi_data *dpi, unsigned long pck_req,
static int dpi_set_mode(struct dpi_data *dpi)
{
- struct videomode *vm = &dpi->vm;
+ const struct videomode *vm = &dpi->vm;
int lck_div = 0, pck_div = 0;
unsigned long fck = 0;
- unsigned long pck;
int r = 0;
if (dpi->pll)
@@ -361,17 +361,6 @@ static int dpi_set_mode(struct dpi_data *dpi)
if (r)
return r;
- pck = fck / lck_div / pck_div;
-
- if (pck != vm->pixelclock) {
- DSSWARN("Could not find exact pixel clock. Requested %lu Hz, got %lu Hz\n",
- vm->pixelclock, pck);
-
- vm->pixelclock = pck;
- }
-
- dss_mgr_set_timings(&dpi->output, vm);
-
return 0;
}
@@ -413,7 +402,7 @@ static int dpi_display_enable(struct omap_dss_device *dssdev)
if (r)
goto err_get_dispc;
- r = dss_dpi_select_source(dpi->dss, out->port_num, out->dispc_channel);
+ r = dss_dpi_select_source(dpi->dss, dpi->id, out->dispc_channel);
if (r)
goto err_src_sel;
@@ -478,7 +467,7 @@ static void dpi_display_disable(struct omap_dss_device *dssdev)
}
static void dpi_set_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
+ const struct videomode *vm)
{
struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
@@ -491,23 +480,10 @@ static void dpi_set_timings(struct omap_dss_device *dssdev,
mutex_unlock(&dpi->lock);
}
-static void dpi_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
-
- mutex_lock(&dpi->lock);
-
- *vm = dpi->vm;
-
- mutex_unlock(&dpi->lock);
-}
-
static int dpi_check_timings(struct omap_dss_device *dssdev,
struct videomode *vm)
{
struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
- enum omap_channel channel = dpi->output.dispc_channel;
int lck_div, pck_div;
unsigned long fck;
unsigned long pck;
@@ -517,9 +493,6 @@ static int dpi_check_timings(struct omap_dss_device *dssdev,
if (vm->hactive % 8 != 0)
return -EINVAL;
- if (!dispc_mgr_timings_ok(dpi->dss->dispc, channel, vm))
- return -EINVAL;
-
if (vm->pixelclock == 0)
return -EINVAL;
@@ -562,38 +535,6 @@ static int dpi_verify_pll(struct dss_pll *pll)
return 0;
}
-static const struct soc_device_attribute dpi_soc_devices[] = {
- { .machine = "OMAP3[456]*" },
- { .machine = "[AD]M37*" },
- { /* sentinel */ }
-};
-
-static int dpi_init_regulator(struct dpi_data *dpi)
-{
- struct regulator *vdds_dsi;
-
- /*
- * The DPI uses the DSI VDDS on OMAP34xx, OMAP35xx, OMAP36xx, AM37xx and
- * DM37xx only.
- */
- if (!soc_device_match(dpi_soc_devices))
- return 0;
-
- if (dpi->vdds_dsi_reg)
- return 0;
-
- vdds_dsi = devm_regulator_get(&dpi->pdev->dev, "vdds_dsi");
- if (IS_ERR(vdds_dsi)) {
- if (PTR_ERR(vdds_dsi) != -EPROBE_DEFER)
- DSSERR("can't get VDDS_DSI regulator\n");
- return PTR_ERR(vdds_dsi);
- }
-
- dpi->vdds_dsi_reg = vdds_dsi;
-
- return 0;
-}
-
static void dpi_init_pll(struct dpi_data *dpi)
{
struct dss_pll *pll;
@@ -621,7 +562,7 @@ static void dpi_init_pll(struct dpi_data *dpi)
* the channel in some more dynamic manner, or get the channel as a user
* parameter.
*/
-static enum omap_channel dpi_get_channel(struct dpi_data *dpi, int port_num)
+static enum omap_channel dpi_get_channel(struct dpi_data *dpi)
{
switch (dpi->dss_model) {
case DSS_MODEL_OMAP2:
@@ -629,7 +570,7 @@ static enum omap_channel dpi_get_channel(struct dpi_data *dpi, int port_num)
return OMAP_DSS_CHANNEL_LCD;
case DSS_MODEL_DRA7:
- switch (port_num) {
+ switch (dpi->id) {
case 2:
return OMAP_DSS_CHANNEL_LCD3;
case 1:
@@ -651,49 +592,31 @@ static enum omap_channel dpi_get_channel(struct dpi_data *dpi, int port_num)
}
}
-static int dpi_connect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst)
+static int dpi_connect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
+ struct dpi_data *dpi = dpi_get_data_from_dssdev(dst);
int r;
- r = dpi_init_regulator(dpi);
- if (r)
- return r;
-
dpi_init_pll(dpi);
- r = dss_mgr_connect(&dpi->output, dssdev);
+ r = omapdss_device_connect(dst->dss, dst, dst->next);
if (r)
return r;
- r = omapdss_output_set_device(dssdev, dst);
- if (r) {
- DSSERR("failed to connect output to new device: %s\n",
- dst->name);
- dss_mgr_disconnect(&dpi->output, dssdev);
- return r;
- }
-
+ dst->dispc_channel_connected = true;
return 0;
}
-static void dpi_disconnect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst)
+static void dpi_disconnect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
-
- WARN_ON(dst != dssdev->dst);
+ dst->dispc_channel_connected = false;
- if (dst != dssdev->dst)
- return;
-
- omapdss_output_unset_device(dssdev);
-
- dss_mgr_disconnect(&dpi->output, dssdev);
+ omapdss_device_disconnect(dst, dst->next);
}
-static const struct omapdss_dpi_ops dpi_ops = {
+static const struct omap_dss_device_ops dpi_ops = {
.connect = dpi_connect,
.disconnect = dpi_disconnect,
@@ -702,18 +625,16 @@ static const struct omapdss_dpi_ops dpi_ops = {
.check_timings = dpi_check_timings,
.set_timings = dpi_set_timings,
- .get_timings = dpi_get_timings,
};
-static void dpi_init_output_port(struct dpi_data *dpi, struct device_node *port)
+static int dpi_init_output_port(struct dpi_data *dpi, struct device_node *port)
{
struct omap_dss_device *out = &dpi->output;
+ u32 port_num = 0;
int r;
- u32 port_num;
- r = of_property_read_u32(port, "reg", &port_num);
- if (r)
- port_num = 0;
+ of_property_read_u32(port, "reg", &port_num);
+ dpi->id = port_num <= 2 ? port_num : 0;
switch (port_num) {
case 2:
@@ -731,12 +652,28 @@ static void dpi_init_output_port(struct dpi_data *dpi, struct device_node *port)
out->dev = &dpi->pdev->dev;
out->id = OMAP_DSS_OUTPUT_DPI;
out->output_type = OMAP_DISPLAY_TYPE_DPI;
- out->dispc_channel = dpi_get_channel(dpi, port_num);
- out->port_num = port_num;
- out->ops.dpi = &dpi_ops;
+ out->dispc_channel = dpi_get_channel(dpi);
+ out->of_ports = BIT(port_num);
+ out->ops = &dpi_ops;
out->owner = THIS_MODULE;
- omapdss_register_output(out);
+ out->next = omapdss_of_find_connected_device(out->dev->of_node, 0);
+ if (IS_ERR(out->next)) {
+ if (PTR_ERR(out->next) != -EPROBE_DEFER)
+ dev_err(out->dev, "failed to find video sink\n");
+ return PTR_ERR(out->next);
+ }
+
+ r = omapdss_output_validate(out);
+ if (r) {
+ omapdss_device_put(out->next);
+ out->next = NULL;
+ return r;
+ }
+
+ omapdss_device_register(out);
+
+ return 0;
}
static void dpi_uninit_output_port(struct device_node *port)
@@ -744,7 +681,38 @@ static void dpi_uninit_output_port(struct device_node *port)
struct dpi_data *dpi = port->data;
struct omap_dss_device *out = &dpi->output;
- omapdss_unregister_output(out);
+ if (out->next)
+ omapdss_device_put(out->next);
+ omapdss_device_unregister(out);
+}
+
+static const struct soc_device_attribute dpi_soc_devices[] = {
+ { .machine = "OMAP3[456]*" },
+ { .machine = "[AD]M37*" },
+ { /* sentinel */ }
+};
+
+static int dpi_init_regulator(struct dpi_data *dpi)
+{
+ struct regulator *vdds_dsi;
+
+ /*
+ * The DPI uses the DSI VDDS on OMAP34xx, OMAP35xx, OMAP36xx, AM37xx and
+ * DM37xx only.
+ */
+ if (!soc_device_match(dpi_soc_devices))
+ return 0;
+
+ vdds_dsi = devm_regulator_get(&dpi->pdev->dev, "vdds_dsi");
+ if (IS_ERR(vdds_dsi)) {
+ if (PTR_ERR(vdds_dsi) != -EPROBE_DEFER)
+ DSSERR("can't get VDDS_DSI regulator\n");
+ return PTR_ERR(vdds_dsi);
+ }
+
+ dpi->vdds_dsi_reg = vdds_dsi;
+
+ return 0;
}
int dpi_init_port(struct dss_device *dss, struct platform_device *pdev,
@@ -764,15 +732,14 @@ int dpi_init_port(struct dss_device *dss, struct platform_device *pdev,
return 0;
r = of_property_read_u32(ep, "data-lines", &datalines);
+ of_node_put(ep);
if (r) {
DSSERR("failed to parse datalines\n");
- goto err_datalines;
+ return r;
}
dpi->data_lines = datalines;
- of_node_put(ep);
-
dpi->pdev = pdev;
dpi->dss_model = dss_model;
dpi->dss = dss;
@@ -780,14 +747,11 @@ int dpi_init_port(struct dss_device *dss, struct platform_device *pdev,
mutex_init(&dpi->lock);
- dpi_init_output_port(dpi, port);
-
- return 0;
-
-err_datalines:
- of_node_put(ep);
+ r = dpi_init_regulator(dpi);
+ if (r)
+ return r;
- return r;
+ return dpi_init_output_port(dpi, port);
}
void dpi_uninit_port(struct device_node *port)
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 74467b308721..394c129cfb3b 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -403,6 +403,7 @@ struct dsi_data {
struct {
struct dss_debugfs_entry *irqs;
struct dss_debugfs_entry *regs;
+ struct dss_debugfs_entry *clks;
} debugfs;
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
@@ -442,27 +443,6 @@ static inline struct dsi_data *to_dsi_data(struct omap_dss_device *dssdev)
return dev_get_drvdata(dssdev->dev);
}
-static struct dsi_data *dsi_get_dsi_from_id(int module)
-{
- struct omap_dss_device *out;
- enum omap_dss_output_id id;
-
- switch (module) {
- case 0:
- id = OMAP_DSS_OUTPUT_DSI1;
- break;
- case 1:
- id = OMAP_DSS_OUTPUT_DSI2;
- break;
- default:
- return NULL;
- }
-
- out = omap_dss_get_output(id);
-
- return out ? to_dsi_data(out) : NULL;
-}
-
static inline void dsi_write_reg(struct dsi_data *dsi,
const struct dsi_reg idx, u32 val)
{
@@ -1157,26 +1137,6 @@ static void dsi_runtime_put(struct dsi_data *dsi)
WARN_ON(r < 0 && r != -ENOSYS);
}
-static int dsi_regulator_init(struct dsi_data *dsi)
-{
- struct regulator *vdds_dsi;
-
- if (dsi->vdds_dsi_reg != NULL)
- return 0;
-
- vdds_dsi = devm_regulator_get(dsi->dev, "vdd");
-
- if (IS_ERR(vdds_dsi)) {
- if (PTR_ERR(vdds_dsi) != -EPROBE_DEFER)
- DSSERR("can't get DSI VDD regulator\n");
- return PTR_ERR(vdds_dsi);
- }
-
- dsi->vdds_dsi_reg = vdds_dsi;
-
- return 0;
-}
-
static void _dsi_print_reset_status(struct dsi_data *dsi)
{
u32 l;
@@ -1373,10 +1333,6 @@ static int dsi_pll_enable(struct dss_pll *pll)
DSSDBG("PLL init\n");
- r = dsi_regulator_init(dsi);
- if (r)
- return r;
-
r = dsi_runtime_get(dsi);
if (r)
return r;
@@ -1448,8 +1404,9 @@ static void dsi_pll_disable(struct dss_pll *pll)
dsi_pll_uninit(dsi, true);
}
-static void dsi_dump_dsi_clocks(struct dsi_data *dsi, struct seq_file *s)
+static int dsi_dump_dsi_clocks(struct seq_file *s, void *p)
{
+ struct dsi_data *dsi = p;
struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo;
enum dss_clk_source dispc_clk_src, dsi_clk_src;
int dsi_module = dsi->module_id;
@@ -1459,7 +1416,7 @@ static void dsi_dump_dsi_clocks(struct dsi_data *dsi, struct seq_file *s)
dsi_clk_src = dss_get_dsi_clk_source(dsi->dss, dsi_module);
if (dsi_runtime_get(dsi))
- return;
+ return 0;
seq_printf(s, "- DSI%d PLL -\n", dsi_module + 1);
@@ -1503,23 +1460,14 @@ static void dsi_dump_dsi_clocks(struct dsi_data *dsi, struct seq_file *s)
seq_printf(s, "LP_CLK\t\t%lu\n", dsi->current_lp_cinfo.lp_clk);
dsi_runtime_put(dsi);
-}
-
-void dsi_dump_clocks(struct seq_file *s)
-{
- struct dsi_data *dsi;
- int i;
- for (i = 0; i < MAX_NUM_DSI; i++) {
- dsi = dsi_get_dsi_from_id(i);
- if (dsi)
- dsi_dump_dsi_clocks(dsi, s);
- }
+ return 0;
}
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
-static void dsi_dump_dsi_irqs(struct dsi_data *dsi, struct seq_file *s)
+static int dsi_dump_dsi_irqs(struct seq_file *s, void *p)
{
+ struct dsi_data *dsi = p;
unsigned long flags;
struct dsi_irq_stats stats;
@@ -1603,33 +1551,20 @@ static void dsi_dump_dsi_irqs(struct dsi_data *dsi, struct seq_file *s)
PIS(ULPSACTIVENOT_ALL0);
PIS(ULPSACTIVENOT_ALL1);
#undef PIS
-}
-static int dsi1_dump_irqs(struct seq_file *s, void *p)
-{
- struct dsi_data *dsi = dsi_get_dsi_from_id(0);
-
- dsi_dump_dsi_irqs(dsi, s);
- return 0;
-}
-
-static int dsi2_dump_irqs(struct seq_file *s, void *p)
-{
- struct dsi_data *dsi = dsi_get_dsi_from_id(1);
-
- dsi_dump_dsi_irqs(dsi, s);
return 0;
}
#endif
-static void dsi_dump_dsi_regs(struct dsi_data *dsi, struct seq_file *s)
+static int dsi_dump_dsi_regs(struct seq_file *s, void *p)
{
-#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(dsi, r))
+ struct dsi_data *dsi = p;
if (dsi_runtime_get(dsi))
- return;
+ return 0;
dsi_enable_scp_clk(dsi);
+#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(dsi, r))
DUMPREG(DSI_REVISION);
DUMPREG(DSI_SYSCONFIG);
DUMPREG(DSI_SYSSTATUS);
@@ -1699,25 +1634,11 @@ static void dsi_dump_dsi_regs(struct dsi_data *dsi, struct seq_file *s)
DUMPREG(DSI_PLL_GO);
DUMPREG(DSI_PLL_CONFIGURATION1);
DUMPREG(DSI_PLL_CONFIGURATION2);
+#undef DUMPREG
dsi_disable_scp_clk(dsi);
dsi_runtime_put(dsi);
-#undef DUMPREG
-}
-
-static int dsi1_dump_regs(struct seq_file *s, void *p)
-{
- struct dsi_data *dsi = dsi_get_dsi_from_id(0);
-
- dsi_dump_dsi_regs(dsi, s);
- return 0;
-}
-
-static int dsi2_dump_regs(struct seq_file *s, void *p)
-{
- struct dsi_data *dsi = dsi_get_dsi_from_id(1);
- dsi_dump_dsi_regs(dsi, s);
return 0;
}
@@ -3344,7 +3265,7 @@ static void dsi_config_vp_num_line_buffers(struct dsi_data *dsi)
if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
int bpp = dsi_get_pixel_size(dsi->pix_fmt);
- struct videomode *vm = &dsi->vm;
+ const struct videomode *vm = &dsi->vm;
/*
* Don't use line buffers if width is greater than the video
* port's line buffer size
@@ -3473,7 +3394,7 @@ static void dsi_config_cmd_mode_interleaving(struct dsi_data *dsi)
int ddr_clk_pre, ddr_clk_post, enter_hs_mode_lat, exit_hs_mode_lat;
int tclk_trail, ths_exit, exiths_clk;
bool ddr_alwon;
- struct videomode *vm = &dsi->vm;
+ const struct videomode *vm = &dsi->vm;
int bpp = dsi_get_pixel_size(dsi->pix_fmt);
int ndl = dsi->num_lanes_used - 1;
int dsi_fclk_hsdiv = dsi->user_dsi_cinfo.mX[HSDIV_DSI] + 1;
@@ -3723,7 +3644,7 @@ static void dsi_proto_timings(struct dsi_data *dsi)
int vbp = dsi->vm_timings.vbp;
int window_sync = dsi->vm_timings.window_sync;
bool hsync_end;
- struct videomode *vm = &dsi->vm;
+ const struct videomode *vm = &dsi->vm;
int bpp = dsi_get_pixel_size(dsi->pix_fmt);
int tl, t_he, width_bytes;
@@ -3980,8 +3901,6 @@ static void dsi_update_screen_dispc(struct dsi_data *dsi)
msecs_to_jiffies(250));
BUG_ON(r == 0);
- dss_mgr_set_timings(&dsi->output, &dsi->vm);
-
dss_mgr_start_update(&dsi->output);
if (dsi->te_enabled) {
@@ -4123,24 +4042,6 @@ static int dsi_display_init_dispc(struct dsi_data *dsi)
dsi->mgr_config.fifohandcheck = false;
}
- /*
- * override interlace, logic level and edge related parameters in
- * videomode with default values
- */
- dsi->vm.flags &= ~DISPLAY_FLAGS_INTERLACED;
- dsi->vm.flags &= ~DISPLAY_FLAGS_HSYNC_LOW;
- dsi->vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH;
- dsi->vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW;
- dsi->vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH;
- dsi->vm.flags &= ~DISPLAY_FLAGS_PIXDATA_NEGEDGE;
- dsi->vm.flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE;
- dsi->vm.flags &= ~DISPLAY_FLAGS_DE_LOW;
- dsi->vm.flags |= DISPLAY_FLAGS_DE_HIGH;
- dsi->vm.flags &= ~DISPLAY_FLAGS_SYNC_POSEDGE;
- dsi->vm.flags |= DISPLAY_FLAGS_SYNC_NEGEDGE;
-
- dss_mgr_set_timings(&dsi->output, &dsi->vm);
-
r = dsi_configure_dispc_clocks(dsi);
if (r)
goto err1;
@@ -4840,6 +4741,19 @@ static int dsi_set_config(struct omap_dss_device *dssdev,
dsi->user_dispc_cinfo = ctx.dispc_cinfo;
dsi->vm = ctx.vm;
+
+ /*
+ * override interlace, logic level and edge related parameters in
+ * videomode with default values
+ */
+ dsi->vm.flags &= ~DISPLAY_FLAGS_INTERLACED;
+ dsi->vm.flags &= ~DISPLAY_FLAGS_HSYNC_LOW;
+ dsi->vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH;
+ dsi->vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW;
+ dsi->vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH;
+
+ dss_mgr_set_timings(&dsi->output, &dsi->vm);
+
dsi->vm_timings = ctx.dsi_vm;
mutex_unlock(&dsi->lock);
@@ -4960,163 +4874,71 @@ static int dsi_get_clocks(struct dsi_data *dsi)
return 0;
}
-static int dsi_connect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst)
+static int dsi_connect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct dsi_data *dsi = to_dsi_data(dssdev);
int r;
- r = dsi_regulator_init(dsi);
- if (r)
- return r;
-
- r = dss_mgr_connect(&dsi->output, dssdev);
+ r = omapdss_device_connect(dst->dss, dst, dst->next);
if (r)
return r;
- r = omapdss_output_set_device(dssdev, dst);
- if (r) {
- DSSERR("failed to connect output to new device: %s\n",
- dssdev->name);
- dss_mgr_disconnect(&dsi->output, dssdev);
- return r;
- }
-
+ dst->dispc_channel_connected = true;
return 0;
}
-static void dsi_disconnect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst)
+static void dsi_disconnect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct dsi_data *dsi = to_dsi_data(dssdev);
+ dst->dispc_channel_connected = false;
- WARN_ON(dst != dssdev->dst);
-
- if (dst != dssdev->dst)
- return;
-
- omapdss_output_unset_device(dssdev);
-
- dss_mgr_disconnect(&dsi->output, dssdev);
+ omapdss_device_disconnect(dst, dst->next);
}
-static const struct omapdss_dsi_ops dsi_ops = {
+static const struct omap_dss_device_ops dsi_ops = {
.connect = dsi_connect,
.disconnect = dsi_disconnect,
-
- .bus_lock = dsi_bus_lock,
- .bus_unlock = dsi_bus_unlock,
-
.enable = dsi_display_enable,
- .disable = dsi_display_disable,
-
- .enable_hs = dsi_vc_enable_hs,
-
- .configure_pins = dsi_configure_pins,
- .set_config = dsi_set_config,
-
- .enable_video_output = dsi_enable_video_output,
- .disable_video_output = dsi_disable_video_output,
-
- .update = dsi_update,
-
- .enable_te = dsi_enable_te,
-
- .request_vc = dsi_request_vc,
- .set_vc_id = dsi_set_vc_id,
- .release_vc = dsi_release_vc,
-
- .dcs_write = dsi_vc_dcs_write,
- .dcs_write_nosync = dsi_vc_dcs_write_nosync,
- .dcs_read = dsi_vc_dcs_read,
-
- .gen_write = dsi_vc_generic_write,
- .gen_write_nosync = dsi_vc_generic_write_nosync,
- .gen_read = dsi_vc_generic_read,
-
- .bta_sync = dsi_vc_send_bta_sync,
-
- .set_max_rx_packet_size = dsi_vc_set_max_rx_packet_size,
-};
-
-static void dsi_init_output(struct dsi_data *dsi)
-{
- struct omap_dss_device *out = &dsi->output;
-
- out->dev = dsi->dev;
- out->id = dsi->module_id == 0 ?
- OMAP_DSS_OUTPUT_DSI1 : OMAP_DSS_OUTPUT_DSI2;
-
- out->output_type = OMAP_DISPLAY_TYPE_DSI;
- out->name = dsi->module_id == 0 ? "dsi.0" : "dsi.1";
- out->dispc_channel = dsi_get_channel(dsi);
- out->ops.dsi = &dsi_ops;
- out->owner = THIS_MODULE;
- omapdss_register_output(out);
-}
+ .dsi = {
+ .bus_lock = dsi_bus_lock,
+ .bus_unlock = dsi_bus_unlock,
-static void dsi_uninit_output(struct dsi_data *dsi)
-{
- struct omap_dss_device *out = &dsi->output;
+ .disable = dsi_display_disable,
- omapdss_unregister_output(out);
-}
+ .enable_hs = dsi_vc_enable_hs,
-static int dsi_probe_of(struct dsi_data *dsi)
-{
- struct device_node *node = dsi->dev->of_node;
- struct property *prop;
- u32 lane_arr[10];
- int len, num_pins;
- int r, i;
- struct device_node *ep;
- struct omap_dsi_pin_config pin_cfg;
-
- ep = of_graph_get_endpoint_by_regs(node, 0, 0);
- if (!ep)
- return 0;
+ .configure_pins = dsi_configure_pins,
+ .set_config = dsi_set_config,
- prop = of_find_property(ep, "lanes", &len);
- if (prop == NULL) {
- dev_err(dsi->dev, "failed to find lane data\n");
- r = -EINVAL;
- goto err;
- }
+ .enable_video_output = dsi_enable_video_output,
+ .disable_video_output = dsi_disable_video_output,
- num_pins = len / sizeof(u32);
+ .update = dsi_update,
- if (num_pins < 4 || num_pins % 2 != 0 ||
- num_pins > dsi->num_lanes_supported * 2) {
- dev_err(dsi->dev, "bad number of lanes\n");
- r = -EINVAL;
- goto err;
- }
+ .enable_te = dsi_enable_te,
- r = of_property_read_u32_array(ep, "lanes", lane_arr, num_pins);
- if (r) {
- dev_err(dsi->dev, "failed to read lane data\n");
- goto err;
- }
+ .request_vc = dsi_request_vc,
+ .set_vc_id = dsi_set_vc_id,
+ .release_vc = dsi_release_vc,
- pin_cfg.num_pins = num_pins;
- for (i = 0; i < num_pins; ++i)
- pin_cfg.pins[i] = (int)lane_arr[i];
+ .dcs_write = dsi_vc_dcs_write,
+ .dcs_write_nosync = dsi_vc_dcs_write_nosync,
+ .dcs_read = dsi_vc_dcs_read,
- r = dsi_configure_pins(&dsi->output, &pin_cfg);
- if (r) {
- dev_err(dsi->dev, "failed to configure pins");
- goto err;
- }
+ .gen_write = dsi_vc_generic_write,
+ .gen_write_nosync = dsi_vc_generic_write_nosync,
+ .gen_read = dsi_vc_generic_read,
- of_node_put(ep);
+ .bta_sync = dsi_vc_send_bta_sync,
- return 0;
+ .set_max_rx_packet_size = dsi_vc_set_max_rx_packet_size,
+ },
+};
-err:
- of_node_put(ep);
- return r;
-}
+/* -----------------------------------------------------------------------------
+ * PLL
+ */
static const struct dss_pll_ops dsi_pll_ops = {
.enable = dsi_pll_enable,
@@ -5231,7 +5053,175 @@ static int dsi_init_pll_data(struct dss_device *dss, struct dsi_data *dsi)
return 0;
}
-/* DSI1 HW IP initialisation */
+/* -----------------------------------------------------------------------------
+ * Component Bind & Unbind
+ */
+
+static int dsi_bind(struct device *dev, struct device *master, void *data)
+{
+ struct dss_device *dss = dss_get_device(master);
+ struct dsi_data *dsi = dev_get_drvdata(dev);
+ char name[10];
+ u32 rev;
+ int r;
+
+ dsi->dss = dss;
+
+ dsi_init_pll_data(dss, dsi);
+
+ r = dsi_runtime_get(dsi);
+ if (r)
+ return r;
+
+ rev = dsi_read_reg(dsi, DSI_REVISION);
+ dev_dbg(dev, "OMAP DSI rev %d.%d\n",
+ FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
+
+ dsi->line_buffer_size = dsi_get_line_buf_size(dsi);
+
+ dsi_runtime_put(dsi);
+
+ snprintf(name, sizeof(name), "dsi%u_regs", dsi->module_id + 1);
+ dsi->debugfs.regs = dss_debugfs_create_file(dss, name,
+ dsi_dump_dsi_regs, &dsi);
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+ snprintf(name, sizeof(name), "dsi%u_irqs", dsi->module_id + 1);
+ dsi->debugfs.irqs = dss_debugfs_create_file(dss, name,
+ dsi_dump_dsi_irqs, &dsi);
+#endif
+ snprintf(name, sizeof(name), "dsi%u_clks", dsi->module_id + 1);
+ dsi->debugfs.clks = dss_debugfs_create_file(dss, name,
+ dsi_dump_dsi_clocks, &dsi);
+
+ return 0;
+}
+
+static void dsi_unbind(struct device *dev, struct device *master, void *data)
+{
+ struct dsi_data *dsi = dev_get_drvdata(dev);
+
+ dss_debugfs_remove_file(dsi->debugfs.clks);
+ dss_debugfs_remove_file(dsi->debugfs.irqs);
+ dss_debugfs_remove_file(dsi->debugfs.regs);
+
+ of_platform_depopulate(dev);
+
+ WARN_ON(dsi->scp_clk_refcount > 0);
+
+ dss_pll_unregister(&dsi->pll);
+}
+
+static const struct component_ops dsi_component_ops = {
+ .bind = dsi_bind,
+ .unbind = dsi_unbind,
+};
+
+/* -----------------------------------------------------------------------------
+ * Probe & Remove, Suspend & Resume
+ */
+
+static int dsi_init_output(struct dsi_data *dsi)
+{
+ struct omap_dss_device *out = &dsi->output;
+ int r;
+
+ out->dev = dsi->dev;
+ out->id = dsi->module_id == 0 ?
+ OMAP_DSS_OUTPUT_DSI1 : OMAP_DSS_OUTPUT_DSI2;
+
+ out->output_type = OMAP_DISPLAY_TYPE_DSI;
+ out->name = dsi->module_id == 0 ? "dsi.0" : "dsi.1";
+ out->dispc_channel = dsi_get_channel(dsi);
+ out->ops = &dsi_ops;
+ out->owner = THIS_MODULE;
+ out->of_ports = BIT(0);
+ out->bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE
+ | DRM_BUS_FLAG_DE_HIGH
+ | DRM_BUS_FLAG_SYNC_NEGEDGE;
+
+ out->next = omapdss_of_find_connected_device(out->dev->of_node, 0);
+ if (IS_ERR(out->next)) {
+ if (PTR_ERR(out->next) != -EPROBE_DEFER)
+ dev_err(out->dev, "failed to find video sink\n");
+ return PTR_ERR(out->next);
+ }
+
+ r = omapdss_output_validate(out);
+ if (r) {
+ omapdss_device_put(out->next);
+ out->next = NULL;
+ return r;
+ }
+
+ omapdss_device_register(out);
+
+ return 0;
+}
+
+static void dsi_uninit_output(struct dsi_data *dsi)
+{
+ struct omap_dss_device *out = &dsi->output;
+
+ if (out->next)
+ omapdss_device_put(out->next);
+ omapdss_device_unregister(out);
+}
+
+static int dsi_probe_of(struct dsi_data *dsi)
+{
+ struct device_node *node = dsi->dev->of_node;
+ struct property *prop;
+ u32 lane_arr[10];
+ int len, num_pins;
+ int r, i;
+ struct device_node *ep;
+ struct omap_dsi_pin_config pin_cfg;
+
+ ep = of_graph_get_endpoint_by_regs(node, 0, 0);
+ if (!ep)
+ return 0;
+
+ prop = of_find_property(ep, "lanes", &len);
+ if (prop == NULL) {
+ dev_err(dsi->dev, "failed to find lane data\n");
+ r = -EINVAL;
+ goto err;
+ }
+
+ num_pins = len / sizeof(u32);
+
+ if (num_pins < 4 || num_pins % 2 != 0 ||
+ num_pins > dsi->num_lanes_supported * 2) {
+ dev_err(dsi->dev, "bad number of lanes\n");
+ r = -EINVAL;
+ goto err;
+ }
+
+ r = of_property_read_u32_array(ep, "lanes", lane_arr, num_pins);
+ if (r) {
+ dev_err(dsi->dev, "failed to read lane data\n");
+ goto err;
+ }
+
+ pin_cfg.num_pins = num_pins;
+ for (i = 0; i < num_pins; ++i)
+ pin_cfg.pins[i] = (int)lane_arr[i];
+
+ r = dsi_configure_pins(&dsi->output, &pin_cfg);
+ if (r) {
+ dev_err(dsi->dev, "failed to configure pins");
+ goto err;
+ }
+
+ of_node_put(ep);
+
+ return 0;
+
+err:
+ of_node_put(ep);
+ return r;
+}
+
static const struct dsi_of_data dsi_of_data_omap34xx = {
.model = DSI_MODEL_OMAP3,
.pll_hw = &dss_omap3_dsi_pll_hw,
@@ -5297,23 +5287,21 @@ static const struct soc_device_attribute dsi_soc_devices[] = {
{ /* sentinel */ }
};
-static int dsi_bind(struct device *dev, struct device *master, void *data)
+static int dsi_probe(struct platform_device *pdev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct dss_device *dss = dss_get_device(master);
const struct soc_device_attribute *soc;
const struct dsi_module_id_data *d;
- u32 rev;
- int r, i;
+ struct device *dev = &pdev->dev;
struct dsi_data *dsi;
struct resource *dsi_mem;
struct resource *res;
+ unsigned int i;
+ int r;
dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
if (!dsi)
return -ENOMEM;
- dsi->dss = dss;
dsi->dev = dev;
dev_set_drvdata(dev, dsi);
@@ -5364,6 +5352,13 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
return r;
}
+ dsi->vdds_dsi_reg = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(dsi->vdds_dsi_reg)) {
+ if (PTR_ERR(dsi->vdds_dsi_reg) != -EPROBE_DEFER)
+ DSSERR("can't get DSI VDD regulator\n");
+ return PTR_ERR(dsi->vdds_dsi_reg);
+ }
+
soc = soc_device_match(dsi_soc_devices);
if (soc)
dsi->data = soc->data;
@@ -5410,18 +5405,8 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
if (r)
return r;
- dsi_init_pll_data(dss, dsi);
-
pm_runtime_enable(dev);
- r = dsi_runtime_get(dsi);
- if (r)
- goto err_runtime_get;
-
- rev = dsi_read_reg(dsi, DSI_REVISION);
- dev_dbg(dev, "OMAP DSI rev %d.%d\n",
- FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
-
/* DSI on OMAP3 doesn't have register DSI_GNQ, set number
* of data to 3 by default */
if (dsi->data->quirks & DSI_QUIRK_GNQ)
@@ -5430,88 +5415,48 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
else
dsi->num_lanes_supported = 3;
- dsi->line_buffer_size = dsi_get_line_buf_size(dsi);
-
- dsi_init_output(dsi);
+ r = dsi_init_output(dsi);
+ if (r)
+ goto err_pm_disable;
r = dsi_probe_of(dsi);
if (r) {
DSSERR("Invalid DSI DT data\n");
- goto err_probe_of;
+ goto err_uninit_output;
}
r = of_platform_populate(dev->of_node, NULL, NULL, dev);
if (r)
DSSERR("Failed to populate DSI child devices: %d\n", r);
- dsi_runtime_put(dsi);
-
- if (dsi->module_id == 0)
- dsi->debugfs.regs = dss_debugfs_create_file(dss, "dsi1_regs",
- dsi1_dump_regs,
- &dsi);
- else
- dsi->debugfs.regs = dss_debugfs_create_file(dss, "dsi2_regs",
- dsi2_dump_regs,
- &dsi);
-#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
- if (dsi->module_id == 0)
- dsi->debugfs.irqs = dss_debugfs_create_file(dss, "dsi1_irqs",
- dsi1_dump_irqs,
- &dsi);
- else
- dsi->debugfs.irqs = dss_debugfs_create_file(dss, "dsi2_irqs",
- dsi2_dump_irqs,
- &dsi);
-#endif
+ r = component_add(&pdev->dev, &dsi_component_ops);
+ if (r)
+ goto err_uninit_output;
return 0;
-err_probe_of:
+err_uninit_output:
dsi_uninit_output(dsi);
- dsi_runtime_put(dsi);
-
-err_runtime_get:
+err_pm_disable:
pm_runtime_disable(dev);
return r;
}
-static void dsi_unbind(struct device *dev, struct device *master, void *data)
+static int dsi_remove(struct platform_device *pdev)
{
- struct dsi_data *dsi = dev_get_drvdata(dev);
+ struct dsi_data *dsi = platform_get_drvdata(pdev);
- dss_debugfs_remove_file(dsi->debugfs.irqs);
- dss_debugfs_remove_file(dsi->debugfs.regs);
-
- of_platform_depopulate(dev);
-
- WARN_ON(dsi->scp_clk_refcount > 0);
-
- dss_pll_unregister(&dsi->pll);
+ component_del(&pdev->dev, &dsi_component_ops);
dsi_uninit_output(dsi);
- pm_runtime_disable(dev);
+ pm_runtime_disable(&pdev->dev);
if (dsi->vdds_dsi_reg != NULL && dsi->vdds_dsi_enabled) {
regulator_disable(dsi->vdds_dsi_reg);
dsi->vdds_dsi_enabled = false;
}
-}
-static const struct component_ops dsi_component_ops = {
- .bind = dsi_bind,
- .unbind = dsi_unbind,
-};
-
-static int dsi_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &dsi_component_ops);
-}
-
-static int dsi_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &dsi_component_ops);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/dss/dss-of.c b/drivers/gpu/drm/omapdrm/dss/dss-of.c
index 4602a79c6c44..0422597ac6b0 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss-of.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss-of.c
@@ -21,7 +21,8 @@
#include "omapdss.h"
-struct device_node *dss_of_port_get_parent_device(struct device_node *port)
+static struct device_node *
+dss_of_port_get_parent_device(struct device_node *port)
{
struct device_node *np;
int i;
@@ -45,41 +46,37 @@ struct device_node *dss_of_port_get_parent_device(struct device_node *port)
return NULL;
}
-u32 dss_of_port_get_port_number(struct device_node *port)
-{
- int r;
- u32 reg;
-
- r = of_property_read_u32(port, "reg", &reg);
- if (r)
- reg = 0;
-
- return reg;
-}
-
struct omap_dss_device *
-omapdss_of_find_source_for_first_ep(struct device_node *node)
+omapdss_of_find_connected_device(struct device_node *node, unsigned int port)
{
- struct device_node *ep;
+ struct device_node *src_node;
struct device_node *src_port;
+ struct device_node *ep;
struct omap_dss_device *src;
+ u32 port_number = 0;
- ep = of_graph_get_endpoint_by_regs(node, 0, 0);
+ /* Get the endpoint... */
+ ep = of_graph_get_endpoint_by_regs(node, port, 0);
if (!ep)
- return ERR_PTR(-EINVAL);
+ return NULL;
+ /* ... and its remote port... */
src_port = of_graph_get_remote_port(ep);
- if (!src_port) {
- of_node_put(ep);
- return ERR_PTR(-EINVAL);
- }
-
of_node_put(ep);
+ if (!src_port)
+ return NULL;
- src = omap_dss_find_output_by_port_node(src_port);
-
+ /* ... and the remote port's number and parent... */
+ of_property_read_u32(src_port, "reg", &port_number);
+ src_node = dss_of_port_get_parent_device(src_port);
of_node_put(src_port);
+ if (!src_node)
+ return ERR_PTR(-EINVAL);
+
+ /* ... and finally the connected device. */
+ src = omapdss_find_device_by_port(src_node, port_number);
+ of_node_put(src_node);
return src ? src : ERR_PTR(-EPROBE_DEFER);
}
-EXPORT_SYMBOL_GPL(omapdss_of_find_source_for_first_ep);
+EXPORT_SYMBOL_GPL(omapdss_of_find_connected_device);
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index cb80ddaa19d2..19fc4dfc429e 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -394,9 +394,6 @@ static int dss_debug_dump_clocks(struct seq_file *s, void *p)
dss_dump_clocks(dss, s);
dispc_dump_clocks(dss->dispc, s);
-#ifdef CONFIG_OMAP2_DSS_DSI
- dsi_dump_clocks(s);
-#endif
return 0;
}
@@ -681,12 +678,6 @@ unsigned long dss_get_max_fck_rate(struct dss_device *dss)
return dss->feat->fck_freq_max;
}
-enum omap_dss_output_id dss_get_supported_outputs(struct dss_device *dss,
- enum omap_channel channel)
-{
- return dss->feat->outputs[channel];
-}
-
static int dss_setup_default_clock(struct dss_device *dss)
{
unsigned long max_dss_fck, prate;
@@ -1183,7 +1174,8 @@ static int dss_init_ports(struct dss_device *dss)
struct platform_device *pdev = dss->pdev;
struct device_node *parent = pdev->dev.of_node;
struct device_node *port;
- int i;
+ unsigned int i;
+ int r;
for (i = 0; i < dss->feat->num_ports; i++) {
port = of_graph_get_port_by_id(parent, i);
@@ -1192,11 +1184,17 @@ static int dss_init_ports(struct dss_device *dss)
switch (dss->feat->ports[i]) {
case OMAP_DISPLAY_TYPE_DPI:
- dpi_init_port(dss, pdev, port, dss->feat->model);
+ r = dpi_init_port(dss, pdev, port, dss->feat->model);
+ if (r)
+ return r;
break;
+
case OMAP_DISPLAY_TYPE_SDI:
- sdi_init_port(dss, pdev, port);
+ r = sdi_init_port(dss, pdev, port);
+ if (r)
+ return r;
break;
+
default:
break;
}
@@ -1315,6 +1313,7 @@ static const struct soc_device_attribute dss_soc_devices[] = {
static int dss_bind(struct device *dev)
{
struct dss_device *dss = dev_get_drvdata(dev);
+ struct platform_device *drm_pdev;
int r;
r = component_bind_all(dev, NULL);
@@ -1323,14 +1322,25 @@ static int dss_bind(struct device *dev)
pm_set_vt_switch(0);
- omapdss_gather_components(dev);
omapdss_set_dss(dss);
+ drm_pdev = platform_device_register_simple("omapdrm", 0, NULL, 0);
+ if (IS_ERR(drm_pdev)) {
+ component_unbind_all(dev, NULL);
+ return PTR_ERR(drm_pdev);
+ }
+
+ dss->drm_pdev = drm_pdev;
+
return 0;
}
static void dss_unbind(struct device *dev)
{
+ struct dss_device *dss = dev_get_drvdata(dev);
+
+ platform_device_unregister(dss->drm_pdev);
+
omapdss_set_dss(NULL);
component_unbind_all(dev, NULL);
@@ -1474,6 +1484,8 @@ static int dss_probe(struct platform_device *pdev)
dss);
/* Add all the child devices as components. */
+ omapdss_gather_components(&pdev->dev);
+
device_for_each_child(&pdev->dev, &match, dss_add_child_component);
r = component_master_add_with_match(&pdev->dev, &dss_component_ops, match);
@@ -1539,12 +1551,9 @@ static void dss_shutdown(struct platform_device *pdev)
DSSDBG("shutdown\n");
- for_each_dss_dev(dssdev) {
- if (!dssdev->driver)
- continue;
-
+ for_each_dss_display(dssdev) {
if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
- dssdev->driver->disable(dssdev);
+ dssdev->ops->disable(dssdev);
}
}
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h
index 38302631b64b..37790c363128 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.h
+++ b/drivers/gpu/drm/omapdrm/dss/dss.h
@@ -238,6 +238,8 @@ struct dss_device {
struct regmap *syscon_pll_ctrl;
u32 syscon_pll_ctrl_offset;
+ struct platform_device *drm_pdev;
+
struct clk *parent_clk;
struct clk *dss_clk;
unsigned long dss_clk_rate;
@@ -267,6 +269,8 @@ struct dss_device {
struct dispc_device *dispc;
const struct dispc_ops *dispc_ops;
+ const struct dss_mgr_ops *mgr_ops;
+ struct omap_drm_private *mgr_ops_priv;
};
/* core */
@@ -313,8 +317,6 @@ void dss_runtime_put(struct dss_device *dss);
unsigned long dss_get_dispc_clk_rate(struct dss_device *dss);
unsigned long dss_get_max_fck_rate(struct dss_device *dss);
-enum omap_dss_output_id dss_get_supported_outputs(struct dss_device *dss,
- enum omap_channel channel);
int dss_dpi_select_source(struct dss_device *dss, int port,
enum omap_channel channel);
void dss_select_hdmi_venc_clk_source(struct dss_device *dss,
@@ -374,8 +376,6 @@ static inline void sdi_uninit_port(struct device_node *port)
#ifdef CONFIG_OMAP2_DSS_DSI
-void dsi_dump_clocks(struct seq_file *s);
-
void dsi_irq_handler(void);
#endif
@@ -417,9 +417,6 @@ bool dispc_div_calc(struct dispc_device *dispc, unsigned long dispc_freq,
unsigned long pck_min, unsigned long pck_max,
dispc_div_calc_func func, void *data);
-bool dispc_mgr_timings_ok(struct dispc_device *dispc,
- enum omap_channel channel,
- const struct videomode *vm);
int dispc_calc_clock_rates(struct dispc_device *dispc,
unsigned long dispc_fclk_rate,
struct dispc_clock_info *cinfo);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi.h b/drivers/gpu/drm/omapdrm/dss/hdmi.h
index 3aeb4cabd59f..7f0dc490a31d 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi.h
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi.h
@@ -313,13 +313,13 @@ void hdmi_wp_clear_irqenable(struct hdmi_wp_data *wp, u32 mask);
int hdmi_wp_set_phy_pwr(struct hdmi_wp_data *wp, enum hdmi_phy_pwr val);
int hdmi_wp_set_pll_pwr(struct hdmi_wp_data *wp, enum hdmi_pll_pwr val);
void hdmi_wp_video_config_format(struct hdmi_wp_data *wp,
- struct hdmi_video_format *video_fmt);
+ const struct hdmi_video_format *video_fmt);
void hdmi_wp_video_config_interface(struct hdmi_wp_data *wp,
- struct videomode *vm);
+ const struct videomode *vm);
void hdmi_wp_video_config_timing(struct hdmi_wp_data *wp,
- struct videomode *vm);
+ const struct videomode *vm);
void hdmi_wp_init_vid_fmt_timings(struct hdmi_video_format *video_fmt,
- struct videomode *vm, struct hdmi_config *param);
+ struct videomode *vm, const struct hdmi_config *param);
int hdmi_wp_init(struct platform_device *pdev, struct hdmi_wp_data *wp,
unsigned int version);
phys_addr_t hdmi_wp_get_audio_dma_addr(struct hdmi_wp_data *wp);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index 5879f45f6fc9..cf6230eac31a 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -108,26 +108,6 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static int hdmi_init_regulator(struct omap_hdmi *hdmi)
-{
- struct regulator *reg;
-
- if (hdmi->vdda_reg != NULL)
- return 0;
-
- reg = devm_regulator_get(&hdmi->pdev->dev, "vdda");
-
- if (IS_ERR(reg)) {
- if (PTR_ERR(reg) != -EPROBE_DEFER)
- DSSERR("can't get VDDA regulator\n");
- return PTR_ERR(reg);
- }
-
- hdmi->vdda_reg = reg;
-
- return 0;
-}
-
static int hdmi_power_on_core(struct omap_hdmi *hdmi)
{
int r;
@@ -174,7 +154,7 @@ static void hdmi_power_off_core(struct omap_hdmi *hdmi)
static int hdmi_power_on_full(struct omap_hdmi *hdmi)
{
int r;
- struct videomode *vm;
+ const struct videomode *vm;
struct hdmi_wp_data *wp = &hdmi->wp;
struct dss_pll_clock_info hdmi_cinfo = { 0 };
unsigned int pc;
@@ -227,9 +207,6 @@ static int hdmi_power_on_full(struct omap_hdmi *hdmi)
hdmi4_configure(&hdmi->core, &hdmi->wp, &hdmi->cfg);
- /* tv size */
- dss_mgr_set_timings(&hdmi->output, vm);
-
r = dss_mgr_enable(&hdmi->output);
if (r)
goto err_mgr_enable;
@@ -271,19 +248,8 @@ static void hdmi_power_off_full(struct omap_hdmi *hdmi)
hdmi_power_off_core(hdmi);
}
-static int hdmi_display_check_timing(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
-
- if (!dispc_mgr_timings_ok(hdmi->dss->dispc, dssdev->dispc_channel, vm))
- return -EINVAL;
-
- return 0;
-}
-
-static void hdmi_display_set_timing(struct omap_dss_device *dssdev,
- struct videomode *vm)
+static void hdmi_display_set_timings(struct omap_dss_device *dssdev,
+ const struct videomode *vm)
{
struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
@@ -296,14 +262,6 @@ static void hdmi_display_set_timing(struct omap_dss_device *dssdev,
mutex_unlock(&hdmi->lock);
}
-static void hdmi_display_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
-
- *vm = hdmi->cfg.vm;
-}
-
static int hdmi_dump_regs(struct seq_file *s, void *p)
{
struct omap_hdmi *hdmi = s->private;
@@ -456,44 +414,25 @@ void hdmi4_core_disable(struct hdmi_core_data *core)
mutex_unlock(&hdmi->lock);
}
-static int hdmi_connect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst)
+static int hdmi_connect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
int r;
- r = hdmi_init_regulator(hdmi);
+ r = omapdss_device_connect(dst->dss, dst, dst->next);
if (r)
return r;
- r = dss_mgr_connect(&hdmi->output, dssdev);
- if (r)
- return r;
-
- r = omapdss_output_set_device(dssdev, dst);
- if (r) {
- DSSERR("failed to connect output to new device: %s\n",
- dst->name);
- dss_mgr_disconnect(&hdmi->output, dssdev);
- return r;
- }
-
+ dst->dispc_channel_connected = true;
return 0;
}
-static void hdmi_disconnect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst)
+static void hdmi_disconnect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
-
- WARN_ON(dst != dssdev->dst);
+ dst->dispc_channel_connected = false;
- if (dst != dssdev->dst)
- return;
-
- omapdss_output_unset_device(dssdev);
-
- dss_mgr_disconnect(&hdmi->output, dssdev);
+ omapdss_device_disconnect(dst, dst->next);
}
static int hdmi_read_edid(struct omap_dss_device *dssdev,
@@ -548,69 +487,28 @@ static int hdmi_set_hdmi_mode(struct omap_dss_device *dssdev,
return 0;
}
-static const struct omapdss_hdmi_ops hdmi_ops = {
+static const struct omap_dss_device_ops hdmi_ops = {
.connect = hdmi_connect,
.disconnect = hdmi_disconnect,
.enable = hdmi_display_enable,
.disable = hdmi_display_disable,
- .check_timings = hdmi_display_check_timing,
- .set_timings = hdmi_display_set_timing,
- .get_timings = hdmi_display_get_timings,
+ .set_timings = hdmi_display_set_timings,
.read_edid = hdmi_read_edid,
- .lost_hotplug = hdmi_lost_hotplug,
- .set_infoframe = hdmi_set_infoframe,
- .set_hdmi_mode = hdmi_set_hdmi_mode,
-};
-
-static void hdmi_init_output(struct omap_hdmi *hdmi)
-{
- struct omap_dss_device *out = &hdmi->output;
-
- out->dev = &hdmi->pdev->dev;
- out->id = OMAP_DSS_OUTPUT_HDMI;
- out->output_type = OMAP_DISPLAY_TYPE_HDMI;
- out->name = "hdmi.0";
- out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
- out->ops.hdmi = &hdmi_ops;
- out->owner = THIS_MODULE;
-
- omapdss_register_output(out);
-}
-
-static void hdmi_uninit_output(struct omap_hdmi *hdmi)
-{
- struct omap_dss_device *out = &hdmi->output;
-
- omapdss_unregister_output(out);
-}
-
-static int hdmi_probe_of(struct omap_hdmi *hdmi)
-{
- struct platform_device *pdev = hdmi->pdev;
- struct device_node *node = pdev->dev.of_node;
- struct device_node *ep;
- int r;
-
- ep = of_graph_get_endpoint_by_regs(node, 0, 0);
- if (!ep)
- return 0;
-
- r = hdmi_parse_lanes_of(pdev, ep, &hdmi->phy);
- if (r)
- goto err;
- of_node_put(ep);
- return 0;
+ .hdmi = {
+ .lost_hotplug = hdmi_lost_hotplug,
+ .set_infoframe = hdmi_set_infoframe,
+ .set_hdmi_mode = hdmi_set_hdmi_mode,
+ },
+};
-err:
- of_node_put(ep);
- return r;
-}
+/* -----------------------------------------------------------------------------
+ * Audio Callbacks
+ */
-/* Audio callbacks */
static int hdmi_audio_startup(struct device *dev,
void (*abort_cb)(struct device *dev))
{
@@ -725,27 +623,143 @@ static int hdmi_audio_register(struct omap_hdmi *hdmi)
return 0;
}
-/* HDMI HW IP initialisation */
+/* -----------------------------------------------------------------------------
+ * Component Bind & Unbind
+ */
+
static int hdmi4_bind(struct device *dev, struct device *master, void *data)
{
- struct platform_device *pdev = to_platform_device(dev);
struct dss_device *dss = dss_get_device(master);
- struct omap_hdmi *hdmi;
+ struct omap_hdmi *hdmi = dev_get_drvdata(dev);
+ int r;
+
+ hdmi->dss = dss;
+
+ r = hdmi_pll_init(dss, hdmi->pdev, &hdmi->pll, &hdmi->wp);
+ if (r)
+ return r;
+
+ r = hdmi4_cec_init(hdmi->pdev, &hdmi->core, &hdmi->wp);
+ if (r)
+ goto err_pll_uninit;
+
+ r = hdmi_audio_register(hdmi);
+ if (r) {
+ DSSERR("Registering HDMI audio failed\n");
+ goto err_cec_uninit;
+ }
+
+ hdmi->debugfs = dss_debugfs_create_file(dss, "hdmi", hdmi_dump_regs,
+ hdmi);
+
+ return 0;
+
+err_cec_uninit:
+ hdmi4_cec_uninit(&hdmi->core);
+err_pll_uninit:
+ hdmi_pll_uninit(&hdmi->pll);
+ return r;
+}
+
+static void hdmi4_unbind(struct device *dev, struct device *master, void *data)
+{
+ struct omap_hdmi *hdmi = dev_get_drvdata(dev);
+
+ dss_debugfs_remove_file(hdmi->debugfs);
+
+ if (hdmi->audio_pdev)
+ platform_device_unregister(hdmi->audio_pdev);
+
+ hdmi4_cec_uninit(&hdmi->core);
+ hdmi_pll_uninit(&hdmi->pll);
+}
+
+static const struct component_ops hdmi4_component_ops = {
+ .bind = hdmi4_bind,
+ .unbind = hdmi4_unbind,
+};
+
+/* -----------------------------------------------------------------------------
+ * Probe & Remove, Suspend & Resume
+ */
+
+static int hdmi4_init_output(struct omap_hdmi *hdmi)
+{
+ struct omap_dss_device *out = &hdmi->output;
int r;
+
+ out->dev = &hdmi->pdev->dev;
+ out->id = OMAP_DSS_OUTPUT_HDMI;
+ out->output_type = OMAP_DISPLAY_TYPE_HDMI;
+ out->name = "hdmi.0";
+ out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
+ out->ops = &hdmi_ops;
+ out->owner = THIS_MODULE;
+ out->of_ports = BIT(0);
+ out->ops_flags = OMAP_DSS_DEVICE_OP_EDID;
+
+ out->next = omapdss_of_find_connected_device(out->dev->of_node, 0);
+ if (IS_ERR(out->next)) {
+ if (PTR_ERR(out->next) != -EPROBE_DEFER)
+ dev_err(out->dev, "failed to find video sink\n");
+ return PTR_ERR(out->next);
+ }
+
+ r = omapdss_output_validate(out);
+ if (r) {
+ omapdss_device_put(out->next);
+ out->next = NULL;
+ return r;
+ }
+
+ omapdss_device_register(out);
+
+ return 0;
+}
+
+static void hdmi4_uninit_output(struct omap_hdmi *hdmi)
+{
+ struct omap_dss_device *out = &hdmi->output;
+
+ if (out->next)
+ omapdss_device_put(out->next);
+ omapdss_device_unregister(out);
+}
+
+static int hdmi4_probe_of(struct omap_hdmi *hdmi)
+{
+ struct platform_device *pdev = hdmi->pdev;
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *ep;
+ int r;
+
+ ep = of_graph_get_endpoint_by_regs(node, 0, 0);
+ if (!ep)
+ return 0;
+
+ r = hdmi_parse_lanes_of(pdev, ep, &hdmi->phy);
+ of_node_put(ep);
+ return r;
+}
+
+static int hdmi4_probe(struct platform_device *pdev)
+{
+ struct omap_hdmi *hdmi;
int irq;
+ int r;
hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL);
if (!hdmi)
return -ENOMEM;
hdmi->pdev = pdev;
- hdmi->dss = dss;
+
dev_set_drvdata(&pdev->dev, hdmi);
mutex_init(&hdmi->lock);
spin_lock_init(&hdmi->audio_playing_lock);
- r = hdmi_probe_of(hdmi);
+ r = hdmi4_probe_of(hdmi);
if (r)
goto err_free;
@@ -753,27 +767,19 @@ static int hdmi4_bind(struct device *dev, struct device *master, void *data)
if (r)
goto err_free;
- r = hdmi_pll_init(dss, pdev, &hdmi->pll, &hdmi->wp);
- if (r)
- goto err_free;
-
r = hdmi_phy_init(pdev, &hdmi->phy, 4);
if (r)
- goto err_pll;
+ goto err_free;
r = hdmi4_core_init(pdev, &hdmi->core);
if (r)
- goto err_pll;
-
- r = hdmi4_cec_init(pdev, &hdmi->core, &hdmi->wp);
- if (r)
- goto err_pll;
+ goto err_free;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
DSSERR("platform_get_irq failed\n");
r = -ENODEV;
- goto err_pll;
+ goto err_free;
}
r = devm_request_threaded_irq(&pdev->dev, irq,
@@ -781,66 +787,49 @@ static int hdmi4_bind(struct device *dev, struct device *master, void *data)
IRQF_ONESHOT, "OMAP HDMI", hdmi);
if (r) {
DSSERR("HDMI IRQ request failed\n");
- goto err_pll;
+ goto err_free;
}
- pm_runtime_enable(&pdev->dev);
+ hdmi->vdda_reg = devm_regulator_get(&pdev->dev, "vdda");
+ if (IS_ERR(hdmi->vdda_reg)) {
+ r = PTR_ERR(hdmi->vdda_reg);
+ if (r != -EPROBE_DEFER)
+ DSSERR("can't get VDDA regulator\n");
+ goto err_free;
+ }
- hdmi_init_output(hdmi);
+ pm_runtime_enable(&pdev->dev);
- r = hdmi_audio_register(hdmi);
- if (r) {
- DSSERR("Registering HDMI audio failed\n");
- hdmi_uninit_output(hdmi);
- pm_runtime_disable(&pdev->dev);
- return r;
- }
+ r = hdmi4_init_output(hdmi);
+ if (r)
+ goto err_pm_disable;
- hdmi->debugfs = dss_debugfs_create_file(dss, "hdmi", hdmi_dump_regs,
- hdmi);
+ r = component_add(&pdev->dev, &hdmi4_component_ops);
+ if (r)
+ goto err_uninit_output;
return 0;
-err_pll:
- hdmi_pll_uninit(&hdmi->pll);
+err_uninit_output:
+ hdmi4_uninit_output(hdmi);
+err_pm_disable:
+ pm_runtime_disable(&pdev->dev);
err_free:
kfree(hdmi);
return r;
}
-static void hdmi4_unbind(struct device *dev, struct device *master, void *data)
+static int hdmi4_remove(struct platform_device *pdev)
{
- struct omap_hdmi *hdmi = dev_get_drvdata(dev);
-
- dss_debugfs_remove_file(hdmi->debugfs);
-
- if (hdmi->audio_pdev)
- platform_device_unregister(hdmi->audio_pdev);
+ struct omap_hdmi *hdmi = platform_get_drvdata(pdev);
- hdmi_uninit_output(hdmi);
-
- hdmi4_cec_uninit(&hdmi->core);
+ component_del(&pdev->dev, &hdmi4_component_ops);
- hdmi_pll_uninit(&hdmi->pll);
+ hdmi4_uninit_output(hdmi);
- pm_runtime_disable(dev);
+ pm_runtime_disable(&pdev->dev);
kfree(hdmi);
-}
-
-static const struct component_ops hdmi4_component_ops = {
- .bind = hdmi4_bind,
- .unbind = hdmi4_unbind,
-};
-
-static int hdmi4_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &hdmi4_component_ops);
-}
-
-static int hdmi4_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &hdmi4_component_ops);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index ae1a001d1b83..b0e4a7463f8c 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -117,24 +117,6 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static int hdmi_init_regulator(struct omap_hdmi *hdmi)
-{
- struct regulator *reg;
-
- if (hdmi->vdda_reg != NULL)
- return 0;
-
- reg = devm_regulator_get(&hdmi->pdev->dev, "vdda");
- if (IS_ERR(reg)) {
- DSSERR("can't get VDDA regulator\n");
- return PTR_ERR(reg);
- }
-
- hdmi->vdda_reg = reg;
-
- return 0;
-}
-
static int hdmi_power_on_core(struct omap_hdmi *hdmi)
{
int r;
@@ -171,7 +153,7 @@ static void hdmi_power_off_core(struct omap_hdmi *hdmi)
static int hdmi_power_on_full(struct omap_hdmi *hdmi)
{
int r;
- struct videomode *vm;
+ const struct videomode *vm;
struct dss_pll_clock_info hdmi_cinfo = { 0 };
unsigned int pc;
@@ -224,9 +206,6 @@ static int hdmi_power_on_full(struct omap_hdmi *hdmi)
hdmi5_configure(&hdmi->core, &hdmi->wp, &hdmi->cfg);
- /* tv size */
- dss_mgr_set_timings(&hdmi->output, vm);
-
r = dss_mgr_enable(&hdmi->output);
if (r)
goto err_mgr_enable;
@@ -268,19 +247,8 @@ static void hdmi_power_off_full(struct omap_hdmi *hdmi)
hdmi_power_off_core(hdmi);
}
-static int hdmi_display_check_timing(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
-
- if (!dispc_mgr_timings_ok(hdmi->dss->dispc, dssdev->dispc_channel, vm))
- return -EINVAL;
-
- return 0;
-}
-
-static void hdmi_display_set_timing(struct omap_dss_device *dssdev,
- struct videomode *vm)
+static void hdmi_display_set_timings(struct omap_dss_device *dssdev,
+ const struct videomode *vm)
{
struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
@@ -293,14 +261,6 @@ static void hdmi_display_set_timing(struct omap_dss_device *dssdev,
mutex_unlock(&hdmi->lock);
}
-static void hdmi_display_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
-
- *vm = hdmi->cfg.vm;
-}
-
static int hdmi_dump_regs(struct seq_file *s, void *p)
{
struct omap_hdmi *hdmi = s->private;
@@ -459,44 +419,25 @@ static void hdmi_core_disable(struct omap_hdmi *hdmi)
mutex_unlock(&hdmi->lock);
}
-static int hdmi_connect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst)
+static int hdmi_connect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
int r;
- r = hdmi_init_regulator(hdmi);
- if (r)
- return r;
-
- r = dss_mgr_connect(&hdmi->output, dssdev);
+ r = omapdss_device_connect(dst->dss, dst, dst->next);
if (r)
return r;
- r = omapdss_output_set_device(dssdev, dst);
- if (r) {
- DSSERR("failed to connect output to new device: %s\n",
- dst->name);
- dss_mgr_disconnect(&hdmi->output, dssdev);
- return r;
- }
-
+ dst->dispc_channel_connected = true;
return 0;
}
-static void hdmi_disconnect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst)
+static void hdmi_disconnect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
-
- WARN_ON(dst != dssdev->dst);
+ dst->dispc_channel_connected = false;
- if (dst != dssdev->dst)
- return;
-
- omapdss_output_unset_device(dssdev);
-
- dss_mgr_disconnect(&hdmi->output, dssdev);
+ omapdss_device_disconnect(dst, dst->next);
}
static int hdmi_read_edid(struct omap_dss_device *dssdev,
@@ -540,68 +481,27 @@ static int hdmi_set_hdmi_mode(struct omap_dss_device *dssdev,
return 0;
}
-static const struct omapdss_hdmi_ops hdmi_ops = {
+static const struct omap_dss_device_ops hdmi_ops = {
.connect = hdmi_connect,
.disconnect = hdmi_disconnect,
.enable = hdmi_display_enable,
.disable = hdmi_display_disable,
- .check_timings = hdmi_display_check_timing,
- .set_timings = hdmi_display_set_timing,
- .get_timings = hdmi_display_get_timings,
+ .set_timings = hdmi_display_set_timings,
.read_edid = hdmi_read_edid,
- .set_infoframe = hdmi_set_infoframe,
- .set_hdmi_mode = hdmi_set_hdmi_mode,
-};
-
-static void hdmi_init_output(struct omap_hdmi *hdmi)
-{
- struct omap_dss_device *out = &hdmi->output;
-
- out->dev = &hdmi->pdev->dev;
- out->id = OMAP_DSS_OUTPUT_HDMI;
- out->output_type = OMAP_DISPLAY_TYPE_HDMI;
- out->name = "hdmi.0";
- out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
- out->ops.hdmi = &hdmi_ops;
- out->owner = THIS_MODULE;
-
- omapdss_register_output(out);
-}
-
-static void hdmi_uninit_output(struct omap_hdmi *hdmi)
-{
- struct omap_dss_device *out = &hdmi->output;
-
- omapdss_unregister_output(out);
-}
-
-static int hdmi_probe_of(struct omap_hdmi *hdmi)
-{
- struct platform_device *pdev = hdmi->pdev;
- struct device_node *node = pdev->dev.of_node;
- struct device_node *ep;
- int r;
-
- ep = of_graph_get_endpoint_by_regs(node, 0, 0);
- if (!ep)
- return 0;
-
- r = hdmi_parse_lanes_of(pdev, ep, &hdmi->phy);
- if (r)
- goto err;
- of_node_put(ep);
- return 0;
+ .hdmi = {
+ .set_infoframe = hdmi_set_infoframe,
+ .set_hdmi_mode = hdmi_set_hdmi_mode,
+ },
+};
-err:
- of_node_put(ep);
- return r;
-}
+/* -----------------------------------------------------------------------------
+ * Audio Callbacks
+ */
-/* Audio callbacks */
static int hdmi_audio_startup(struct device *dev,
void (*abort_cb)(struct device *dev))
{
@@ -722,27 +622,136 @@ static int hdmi_audio_register(struct omap_hdmi *hdmi)
return 0;
}
-/* HDMI HW IP initialisation */
+/* -----------------------------------------------------------------------------
+ * Component Bind & Unbind
+ */
+
static int hdmi5_bind(struct device *dev, struct device *master, void *data)
{
- struct platform_device *pdev = to_platform_device(dev);
struct dss_device *dss = dss_get_device(master);
- struct omap_hdmi *hdmi;
+ struct omap_hdmi *hdmi = dev_get_drvdata(dev);
+ int r;
+
+ hdmi->dss = dss;
+
+ r = hdmi_pll_init(dss, hdmi->pdev, &hdmi->pll, &hdmi->wp);
+ if (r)
+ return r;
+
+ r = hdmi_audio_register(hdmi);
+ if (r) {
+ DSSERR("Registering HDMI audio failed %d\n", r);
+ goto err_pll_uninit;
+ }
+
+ hdmi->debugfs = dss_debugfs_create_file(dss, "hdmi", hdmi_dump_regs,
+ hdmi);
+
+ return 0;
+
+err_pll_uninit:
+ hdmi_pll_uninit(&hdmi->pll);
+ return r;
+}
+
+static void hdmi5_unbind(struct device *dev, struct device *master, void *data)
+{
+ struct omap_hdmi *hdmi = dev_get_drvdata(dev);
+
+ dss_debugfs_remove_file(hdmi->debugfs);
+
+ if (hdmi->audio_pdev)
+ platform_device_unregister(hdmi->audio_pdev);
+
+ hdmi_pll_uninit(&hdmi->pll);
+}
+
+static const struct component_ops hdmi5_component_ops = {
+ .bind = hdmi5_bind,
+ .unbind = hdmi5_unbind,
+};
+
+/* -----------------------------------------------------------------------------
+ * Probe & Remove, Suspend & Resume
+ */
+
+static int hdmi5_init_output(struct omap_hdmi *hdmi)
+{
+ struct omap_dss_device *out = &hdmi->output;
+ int r;
+
+ out->dev = &hdmi->pdev->dev;
+ out->id = OMAP_DSS_OUTPUT_HDMI;
+ out->output_type = OMAP_DISPLAY_TYPE_HDMI;
+ out->name = "hdmi.0";
+ out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
+ out->ops = &hdmi_ops;
+ out->owner = THIS_MODULE;
+ out->of_ports = BIT(0);
+ out->ops_flags = OMAP_DSS_DEVICE_OP_EDID;
+
+ out->next = omapdss_of_find_connected_device(out->dev->of_node, 0);
+ if (IS_ERR(out->next)) {
+ if (PTR_ERR(out->next) != -EPROBE_DEFER)
+ dev_err(out->dev, "failed to find video sink\n");
+ return PTR_ERR(out->next);
+ }
+
+ r = omapdss_output_validate(out);
+ if (r) {
+ omapdss_device_put(out->next);
+ out->next = NULL;
+ return r;
+ }
+
+ omapdss_device_register(out);
+
+ return 0;
+}
+
+static void hdmi5_uninit_output(struct omap_hdmi *hdmi)
+{
+ struct omap_dss_device *out = &hdmi->output;
+
+ if (out->next)
+ omapdss_device_put(out->next);
+ omapdss_device_unregister(out);
+}
+
+static int hdmi5_probe_of(struct omap_hdmi *hdmi)
+{
+ struct platform_device *pdev = hdmi->pdev;
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *ep;
int r;
+
+ ep = of_graph_get_endpoint_by_regs(node, 0, 0);
+ if (!ep)
+ return 0;
+
+ r = hdmi_parse_lanes_of(pdev, ep, &hdmi->phy);
+ of_node_put(ep);
+ return r;
+}
+
+static int hdmi5_probe(struct platform_device *pdev)
+{
+ struct omap_hdmi *hdmi;
int irq;
+ int r;
hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL);
if (!hdmi)
return -ENOMEM;
hdmi->pdev = pdev;
- hdmi->dss = dss;
+
dev_set_drvdata(&pdev->dev, hdmi);
mutex_init(&hdmi->lock);
spin_lock_init(&hdmi->audio_playing_lock);
- r = hdmi_probe_of(hdmi);
+ r = hdmi5_probe_of(hdmi);
if (r)
goto err_free;
@@ -750,23 +759,19 @@ static int hdmi5_bind(struct device *dev, struct device *master, void *data)
if (r)
goto err_free;
- r = hdmi_pll_init(dss, pdev, &hdmi->pll, &hdmi->wp);
- if (r)
- goto err_free;
-
r = hdmi_phy_init(pdev, &hdmi->phy, 5);
if (r)
- goto err_pll;
+ goto err_free;
r = hdmi5_core_init(pdev, &hdmi->core);
if (r)
- goto err_pll;
+ goto err_free;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
DSSERR("platform_get_irq failed\n");
r = -ENODEV;
- goto err_pll;
+ goto err_free;
}
r = devm_request_threaded_irq(&pdev->dev, irq,
@@ -774,64 +779,49 @@ static int hdmi5_bind(struct device *dev, struct device *master, void *data)
IRQF_ONESHOT, "OMAP HDMI", hdmi);
if (r) {
DSSERR("HDMI IRQ request failed\n");
- goto err_pll;
+ goto err_free;
}
- pm_runtime_enable(&pdev->dev);
+ hdmi->vdda_reg = devm_regulator_get(&pdev->dev, "vdda");
+ if (IS_ERR(hdmi->vdda_reg)) {
+ r = PTR_ERR(hdmi->vdda_reg);
+ if (r != -EPROBE_DEFER)
+ DSSERR("can't get VDDA regulator\n");
+ goto err_free;
+ }
- hdmi_init_output(hdmi);
+ pm_runtime_enable(&pdev->dev);
- r = hdmi_audio_register(hdmi);
- if (r) {
- DSSERR("Registering HDMI audio failed %d\n", r);
- hdmi_uninit_output(hdmi);
- pm_runtime_disable(&pdev->dev);
- return r;
- }
+ r = hdmi5_init_output(hdmi);
+ if (r)
+ goto err_pm_disable;
- hdmi->debugfs = dss_debugfs_create_file(dss, "hdmi", hdmi_dump_regs,
- hdmi);
+ r = component_add(&pdev->dev, &hdmi5_component_ops);
+ if (r)
+ goto err_uninit_output;
return 0;
-err_pll:
- hdmi_pll_uninit(&hdmi->pll);
+err_uninit_output:
+ hdmi5_uninit_output(hdmi);
+err_pm_disable:
+ pm_runtime_disable(&pdev->dev);
err_free:
kfree(hdmi);
return r;
}
-static void hdmi5_unbind(struct device *dev, struct device *master, void *data)
+static int hdmi5_remove(struct platform_device *pdev)
{
- struct omap_hdmi *hdmi = dev_get_drvdata(dev);
-
- dss_debugfs_remove_file(hdmi->debugfs);
+ struct omap_hdmi *hdmi = platform_get_drvdata(pdev);
- if (hdmi->audio_pdev)
- platform_device_unregister(hdmi->audio_pdev);
-
- hdmi_uninit_output(hdmi);
+ component_del(&pdev->dev, &hdmi5_component_ops);
- hdmi_pll_uninit(&hdmi->pll);
+ hdmi5_uninit_output(hdmi);
- pm_runtime_disable(dev);
+ pm_runtime_disable(&pdev->dev);
kfree(hdmi);
-}
-
-static const struct component_ops hdmi5_component_ops = {
- .bind = hdmi5_bind,
- .unbind = hdmi5_unbind,
-};
-
-static int hdmi5_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &hdmi5_component_ops);
-}
-
-static int hdmi5_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &hdmi5_component_ops);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
index 2282e48574c6..02efabc7ed76 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
@@ -287,7 +287,7 @@ void hdmi5_core_dump(struct hdmi_core_data *core, struct seq_file *s)
}
static void hdmi_core_init(struct hdmi_core_vid_config *video_cfg,
- struct hdmi_config *cfg)
+ const struct hdmi_config *cfg)
{
DSSDBG("hdmi_core_init\n");
@@ -325,10 +325,10 @@ static void hdmi_core_init(struct hdmi_core_vid_config *video_cfg,
/* DSS_HDMI_CORE_VIDEO_CONFIG */
static void hdmi_core_video_config(struct hdmi_core_data *core,
- struct hdmi_core_vid_config *cfg)
+ const struct hdmi_core_vid_config *cfg)
{
void __iomem *base = core->base;
- struct videomode *vm = &cfg->v_fc_config.vm;
+ const struct videomode *vm = &cfg->v_fc_config.vm;
unsigned char r = 0;
bool vsync_pol, hsync_pol;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
index 53bc5f78050c..100efb9f08c6 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
@@ -131,7 +131,7 @@ void hdmi_wp_video_stop(struct hdmi_wp_data *wp)
}
void hdmi_wp_video_config_format(struct hdmi_wp_data *wp,
- struct hdmi_video_format *video_fmt)
+ const struct hdmi_video_format *video_fmt)
{
u32 l = 0;
@@ -144,7 +144,7 @@ void hdmi_wp_video_config_format(struct hdmi_wp_data *wp,
}
void hdmi_wp_video_config_interface(struct hdmi_wp_data *wp,
- struct videomode *vm)
+ const struct videomode *vm)
{
u32 r;
bool vsync_inv, hsync_inv;
@@ -164,7 +164,7 @@ void hdmi_wp_video_config_interface(struct hdmi_wp_data *wp,
}
void hdmi_wp_video_config_timing(struct hdmi_wp_data *wp,
- struct videomode *vm)
+ const struct videomode *vm)
{
u32 timing_h = 0;
u32 timing_v = 0;
@@ -193,7 +193,7 @@ void hdmi_wp_video_config_timing(struct hdmi_wp_data *wp,
}
void hdmi_wp_init_vid_fmt_timings(struct hdmi_video_format *video_fmt,
- struct videomode *vm, struct hdmi_config *param)
+ struct videomode *vm, const struct hdmi_config *param)
{
DSSDBG("Enter hdmi_wp_video_init_format\n");
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h
index 14d74adb13fb..1f698a95a94a 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss.h
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h
@@ -296,117 +296,14 @@ struct omap_dss_writeback_info {
u8 pre_mult_alpha;
};
-struct omapdss_dpi_ops {
- int (*connect)(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst);
- void (*disconnect)(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst);
-
- int (*enable)(struct omap_dss_device *dssdev);
- void (*disable)(struct omap_dss_device *dssdev);
-
- int (*check_timings)(struct omap_dss_device *dssdev,
- struct videomode *vm);
- void (*set_timings)(struct omap_dss_device *dssdev,
- struct videomode *vm);
- void (*get_timings)(struct omap_dss_device *dssdev,
- struct videomode *vm);
-};
-
-struct omapdss_sdi_ops {
- int (*connect)(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst);
- void (*disconnect)(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst);
-
- int (*enable)(struct omap_dss_device *dssdev);
- void (*disable)(struct omap_dss_device *dssdev);
-
- int (*check_timings)(struct omap_dss_device *dssdev,
- struct videomode *vm);
- void (*set_timings)(struct omap_dss_device *dssdev,
- struct videomode *vm);
- void (*get_timings)(struct omap_dss_device *dssdev,
- struct videomode *vm);
-};
-
-struct omapdss_dvi_ops {
- int (*connect)(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst);
- void (*disconnect)(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst);
-
- int (*enable)(struct omap_dss_device *dssdev);
- void (*disable)(struct omap_dss_device *dssdev);
-
- int (*check_timings)(struct omap_dss_device *dssdev,
- struct videomode *vm);
- void (*set_timings)(struct omap_dss_device *dssdev,
- struct videomode *vm);
- void (*get_timings)(struct omap_dss_device *dssdev,
- struct videomode *vm);
-};
-
-struct omapdss_atv_ops {
- int (*connect)(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst);
- void (*disconnect)(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst);
-
- int (*enable)(struct omap_dss_device *dssdev);
- void (*disable)(struct omap_dss_device *dssdev);
-
- int (*check_timings)(struct omap_dss_device *dssdev,
- struct videomode *vm);
- void (*set_timings)(struct omap_dss_device *dssdev,
- struct videomode *vm);
- void (*get_timings)(struct omap_dss_device *dssdev,
- struct videomode *vm);
-
- int (*set_wss)(struct omap_dss_device *dssdev, u32 wss);
- u32 (*get_wss)(struct omap_dss_device *dssdev);
-};
-
struct omapdss_hdmi_ops {
- int (*connect)(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst);
- void (*disconnect)(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst);
-
- int (*enable)(struct omap_dss_device *dssdev);
- void (*disable)(struct omap_dss_device *dssdev);
-
- int (*check_timings)(struct omap_dss_device *dssdev,
- struct videomode *vm);
- void (*set_timings)(struct omap_dss_device *dssdev,
- struct videomode *vm);
- void (*get_timings)(struct omap_dss_device *dssdev,
- struct videomode *vm);
-
- int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
void (*lost_hotplug)(struct omap_dss_device *dssdev);
- bool (*detect)(struct omap_dss_device *dssdev);
-
- int (*register_hpd_cb)(struct omap_dss_device *dssdev,
- void (*cb)(void *cb_data,
- enum drm_connector_status status),
- void *cb_data);
- void (*unregister_hpd_cb)(struct omap_dss_device *dssdev);
- void (*enable_hpd)(struct omap_dss_device *dssdev);
- void (*disable_hpd)(struct omap_dss_device *dssdev);
-
int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode);
int (*set_infoframe)(struct omap_dss_device *dssdev,
const struct hdmi_avi_infoframe *avi);
};
struct omapdss_dsi_ops {
- int (*connect)(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst);
- void (*disconnect)(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst);
-
- int (*enable)(struct omap_dss_device *dssdev);
void (*disable)(struct omap_dss_device *dssdev, bool disconnect_lanes,
bool enter_ulps);
@@ -457,53 +354,95 @@ struct omapdss_dsi_ops {
int channel, u16 plen);
};
+struct omap_dss_device_ops {
+ int (*connect)(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst);
+ void (*disconnect)(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst);
+
+ int (*enable)(struct omap_dss_device *dssdev);
+ void (*disable)(struct omap_dss_device *dssdev);
+
+ int (*check_timings)(struct omap_dss_device *dssdev,
+ struct videomode *vm);
+ void (*get_timings)(struct omap_dss_device *dssdev,
+ struct videomode *vm);
+ void (*set_timings)(struct omap_dss_device *dssdev,
+ const struct videomode *vm);
+
+ bool (*detect)(struct omap_dss_device *dssdev);
+
+ void (*register_hpd_cb)(struct omap_dss_device *dssdev,
+ void (*cb)(void *cb_data,
+ enum drm_connector_status status),
+ void *cb_data);
+ void (*unregister_hpd_cb)(struct omap_dss_device *dssdev);
+
+ int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
+
+ union {
+ const struct omapdss_hdmi_ops hdmi;
+ const struct omapdss_dsi_ops dsi;
+ };
+};
+
+/**
+ * enum omap_dss_device_ops_flag - Indicates which device ops are supported
+ * @OMAP_DSS_DEVICE_OP_DETECT: The device supports output connection detection
+ * @OMAP_DSS_DEVICE_OP_HPD: The device supports all hot-plug-related operations
+ * @OMAP_DSS_DEVICE_OP_EDID: The device supports readind EDID
+ */
+enum omap_dss_device_ops_flag {
+ OMAP_DSS_DEVICE_OP_DETECT = BIT(0),
+ OMAP_DSS_DEVICE_OP_HPD = BIT(1),
+ OMAP_DSS_DEVICE_OP_EDID = BIT(2),
+};
+
+enum omap_dss_device_type {
+ OMAP_DSS_DEVICE_TYPE_OUTPUT = (1 << 0),
+ OMAP_DSS_DEVICE_TYPE_DISPLAY = (1 << 1),
+};
+
struct omap_dss_device {
struct kobject kobj;
struct device *dev;
struct module *owner;
- struct list_head panel_list;
+ struct dss_device *dss;
+ struct omap_dss_device *src;
+ struct omap_dss_device *dst;
+ struct omap_dss_device *next;
+
+ struct list_head list;
- /* alias in the form of "display%d" */
- char alias[16];
+ unsigned int alias_id;
enum omap_display_type type;
+ /*
+ * DSS output type that this device generates (for DSS internal devices)
+ * or requires (for external encoders). Must be OMAP_DISPLAY_TYPE_NONE
+ * for display devices (connectors and panels) and to non-zero value for
+ * all other devices.
+ */
enum omap_display_type output_type;
- struct {
- struct videomode vm;
-
- enum omap_dss_dsi_pixel_format dsi_pix_fmt;
- enum omap_dss_dsi_mode dsi_mode;
- } panel;
-
const char *name;
- struct omap_dss_driver *driver;
-
- union {
- const struct omapdss_dpi_ops *dpi;
- const struct omapdss_sdi_ops *sdi;
- const struct omapdss_dvi_ops *dvi;
- const struct omapdss_hdmi_ops *hdmi;
- const struct omapdss_atv_ops *atv;
- const struct omapdss_dsi_ops *dsi;
- } ops;
+ const struct omap_dss_driver *driver;
+ const struct omap_dss_device_ops *ops;
+ unsigned long ops_flags;
+ unsigned long bus_flags;
/* helper variable for driver suspend/resume */
bool activate_after_resume;
enum omap_display_caps caps;
- struct omap_dss_device *src;
-
enum omap_dss_display_state state;
/* OMAP DSS output specific fields */
- struct list_head list;
-
/* DISPC channel for this output */
enum omap_channel dispc_channel;
bool dispc_channel_connected;
@@ -511,24 +450,11 @@ struct omap_dss_device {
/* output instance */
enum omap_dss_output_id id;
- /* the port number in the DT node */
- int port_num;
-
- /* dynamic fields */
- struct omap_dss_device *dst;
+ /* bitmask of port numbers in DT */
+ unsigned int of_ports;
};
struct omap_dss_driver {
- int (*probe)(struct omap_dss_device *);
- void (*remove)(struct omap_dss_device *);
-
- int (*connect)(struct omap_dss_device *dssdev);
- void (*disconnect)(struct omap_dss_device *dssdev);
-
- int (*enable)(struct omap_dss_device *display);
- void (*disable)(struct omap_dss_device *display);
- int (*run_test)(struct omap_dss_device *display, int test);
-
int (*update)(struct omap_dss_device *dssdev,
u16 x, u16 y, u16 w, u16 h);
int (*sync)(struct omap_dss_device *dssdev);
@@ -536,42 +462,12 @@ struct omap_dss_driver {
int (*enable_te)(struct omap_dss_device *dssdev, bool enable);
int (*get_te)(struct omap_dss_device *dssdev);
- u8 (*get_rotate)(struct omap_dss_device *dssdev);
- int (*set_rotate)(struct omap_dss_device *dssdev, u8 rotate);
-
- bool (*get_mirror)(struct omap_dss_device *dssdev);
- int (*set_mirror)(struct omap_dss_device *dssdev, bool enable);
-
int (*memory_read)(struct omap_dss_device *dssdev,
void *buf, size_t size,
u16 x, u16 y, u16 w, u16 h);
- int (*check_timings)(struct omap_dss_device *dssdev,
- struct videomode *vm);
- void (*set_timings)(struct omap_dss_device *dssdev,
- struct videomode *vm);
- void (*get_timings)(struct omap_dss_device *dssdev,
- struct videomode *vm);
void (*get_size)(struct omap_dss_device *dssdev,
unsigned int *width, unsigned int *height);
-
- int (*set_wss)(struct omap_dss_device *dssdev, u32 wss);
- u32 (*get_wss)(struct omap_dss_device *dssdev);
-
- int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
- bool (*detect)(struct omap_dss_device *dssdev);
-
- int (*register_hpd_cb)(struct omap_dss_device *dssdev,
- void (*cb)(void *cb_data,
- enum drm_connector_status status),
- void *cb_data);
- void (*unregister_hpd_cb)(struct omap_dss_device *dssdev);
- void (*enable_hpd)(struct omap_dss_device *dssdev);
- void (*disable_hpd)(struct omap_dss_device *dssdev);
-
- int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode);
- int (*set_hdmi_infoframe)(struct omap_dss_device *dssdev,
- const struct hdmi_avi_infoframe *avi);
};
struct dss_device *omapdss_get_dss(void);
@@ -581,27 +477,32 @@ static inline bool omapdss_is_initialized(void)
return !!omapdss_get_dss();
}
-int omapdss_register_display(struct omap_dss_device *dssdev);
-void omapdss_unregister_display(struct omap_dss_device *dssdev);
-
-struct omap_dss_device *omap_dss_get_device(struct omap_dss_device *dssdev);
-void omap_dss_put_device(struct omap_dss_device *dssdev);
-#define for_each_dss_dev(d) while ((d = omap_dss_get_next_device(d)) != NULL)
-struct omap_dss_device *omap_dss_get_next_device(struct omap_dss_device *from);
+#define for_each_dss_display(d) \
+ while ((d = omapdss_device_get_next(d, OMAP_DSS_DEVICE_TYPE_DISPLAY)) != NULL)
+void omapdss_display_init(struct omap_dss_device *dssdev);
+struct omap_dss_device *omapdss_display_get(struct omap_dss_device *output);
+
+void omapdss_device_register(struct omap_dss_device *dssdev);
+void omapdss_device_unregister(struct omap_dss_device *dssdev);
+struct omap_dss_device *omapdss_device_get(struct omap_dss_device *dssdev);
+void omapdss_device_put(struct omap_dss_device *dssdev);
+struct omap_dss_device *omapdss_find_device_by_port(struct device_node *src,
+ unsigned int port);
+struct omap_dss_device *omapdss_device_get_next(struct omap_dss_device *from,
+ enum omap_dss_device_type type);
+int omapdss_device_connect(struct dss_device *dss,
+ struct omap_dss_device *src,
+ struct omap_dss_device *dst);
+void omapdss_device_disconnect(struct omap_dss_device *src,
+ struct omap_dss_device *dst);
int omap_dss_get_num_overlay_managers(void);
int omap_dss_get_num_overlays(void);
-int omapdss_register_output(struct omap_dss_device *output);
-void omapdss_unregister_output(struct omap_dss_device *output);
-struct omap_dss_device *omap_dss_get_output(enum omap_dss_output_id id);
-struct omap_dss_device *omap_dss_find_output_by_port_node(struct device_node *port);
-int omapdss_output_set_device(struct omap_dss_device *out,
- struct omap_dss_device *dssdev);
-int omapdss_output_unset_device(struct omap_dss_device *out);
-
-struct omap_dss_device *omapdss_find_output_from_display(struct omap_dss_device *dssdev);
+#define for_each_dss_output(d) \
+ while ((d = omapdss_device_get_next(d, OMAP_DSS_DEVICE_TYPE_OUTPUT)) != NULL)
+int omapdss_output_validate(struct omap_dss_device *out);
typedef void (*omap_dispc_isr_t) (void *arg, u32 mask);
int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
@@ -621,10 +522,7 @@ static inline bool omapdss_device_is_enabled(struct omap_dss_device *dssdev)
}
struct omap_dss_device *
-omapdss_of_find_source_for_first_ep(struct device_node *node);
-
-struct device_node *dss_of_port_get_parent_device(struct device_node *port);
-u32 dss_of_port_get_port_number(struct device_node *port);
+omapdss_of_find_connected_device(struct device_node *node, unsigned int port);
enum dss_writeback_channel {
DSS_WB_LCD1_MGR = 0,
@@ -638,13 +536,6 @@ enum dss_writeback_channel {
};
struct dss_mgr_ops {
- int (*connect)(struct omap_drm_private *priv,
- enum omap_channel channel,
- struct omap_dss_device *dst);
- void (*disconnect)(struct omap_drm_private *priv,
- enum omap_channel channel,
- struct omap_dss_device *dst);
-
void (*start_update)(struct omap_drm_private *priv,
enum omap_channel channel);
int (*enable)(struct omap_drm_private *priv,
@@ -665,14 +556,11 @@ struct dss_mgr_ops {
void (*handler)(void *), void *data);
};
-int dss_install_mgr_ops(const struct dss_mgr_ops *mgr_ops,
+int dss_install_mgr_ops(struct dss_device *dss,
+ const struct dss_mgr_ops *mgr_ops,
struct omap_drm_private *priv);
-void dss_uninstall_mgr_ops(void);
+void dss_uninstall_mgr_ops(struct dss_device *dss);
-int dss_mgr_connect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst);
-void dss_mgr_disconnect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst);
void dss_mgr_set_timings(struct omap_dss_device *dssdev,
const struct videomode *vm);
void dss_mgr_set_lcd_config(struct omap_dss_device *dssdev,
@@ -720,13 +608,14 @@ struct dispc_ops {
void (*mgr_set_lcd_config)(struct dispc_device *dispc,
enum omap_channel channel,
const struct dss_lcd_mgr_config *config);
+ int (*mgr_check_timings)(struct dispc_device *dispc,
+ enum omap_channel channel,
+ const struct videomode *vm);
void (*mgr_set_timings)(struct dispc_device *dispc,
enum omap_channel channel,
const struct videomode *vm);
void (*mgr_setup)(struct dispc_device *dispc, enum omap_channel channel,
const struct omap_overlay_manager_info *info);
- enum omap_dss_output_id (*mgr_get_supported_outputs)(
- struct dispc_device *dispc, enum omap_channel channel);
u32 (*mgr_gamma_size)(struct dispc_device *dispc,
enum omap_channel channel);
void (*mgr_set_gamma)(struct dispc_device *dispc,
@@ -757,9 +646,6 @@ struct dispc_ops {
struct dispc_device *dispc_get_dispc(struct dss_device *dss);
const struct dispc_ops *dispc_get_ops(struct dss_device *dss);
-bool omapdss_component_is_display(struct device_node *node);
-bool omapdss_component_is_output(struct device_node *node);
-
bool omapdss_stack_is_ready(void);
void omapdss_gather_components(struct device *dev);
diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c
index 96b9d4cd505f..18505bc70f7e 100644
--- a/drivers/gpu/drm/omapdrm/dss/output.c
+++ b/drivers/gpu/drm/omapdrm/dss/output.c
@@ -21,238 +21,96 @@
#include <linux/slab.h>
#include <linux/of.h>
+#include "dss.h"
#include "omapdss.h"
-static LIST_HEAD(output_list);
-static DEFINE_MUTEX(output_lock);
-
-int omapdss_output_set_device(struct omap_dss_device *out,
- struct omap_dss_device *dssdev)
+int omapdss_output_validate(struct omap_dss_device *out)
{
- int r;
-
- mutex_lock(&output_lock);
-
- if (out->dst) {
- dev_err(out->dev,
- "output already has device %s connected to it\n",
- out->dst->name);
- r = -EINVAL;
- goto err;
- }
-
- if (out->output_type != dssdev->type) {
+ if (out->next && out->output_type != out->next->type) {
dev_err(out->dev, "output type and display type don't match\n");
- r = -EINVAL;
- goto err;
- }
-
- out->dst = dssdev;
- dssdev->src = out;
-
- mutex_unlock(&output_lock);
-
- return 0;
-err:
- mutex_unlock(&output_lock);
-
- return r;
-}
-EXPORT_SYMBOL(omapdss_output_set_device);
-
-int omapdss_output_unset_device(struct omap_dss_device *out)
-{
- int r;
-
- mutex_lock(&output_lock);
-
- if (!out->dst) {
- dev_err(out->dev,
- "output doesn't have a device connected to it\n");
- r = -EINVAL;
- goto err;
+ return -EINVAL;
}
- if (out->dst->state != OMAP_DSS_DISPLAY_DISABLED) {
- dev_err(out->dev,
- "device %s is not disabled, cannot unset device\n",
- out->dst->name);
- r = -EINVAL;
- goto err;
- }
-
- out->dst->src = NULL;
- out->dst = NULL;
-
- mutex_unlock(&output_lock);
-
- return 0;
-err:
- mutex_unlock(&output_lock);
-
- return r;
-}
-EXPORT_SYMBOL(omapdss_output_unset_device);
-
-int omapdss_register_output(struct omap_dss_device *out)
-{
- list_add_tail(&out->list, &output_list);
return 0;
}
-EXPORT_SYMBOL(omapdss_register_output);
-
-void omapdss_unregister_output(struct omap_dss_device *out)
-{
- list_del(&out->list);
-}
-EXPORT_SYMBOL(omapdss_unregister_output);
-
-bool omapdss_component_is_output(struct device_node *node)
-{
- struct omap_dss_device *out;
-
- list_for_each_entry(out, &output_list, list) {
- if (out->dev->of_node == node)
- return true;
- }
+EXPORT_SYMBOL(omapdss_output_validate);
- return false;
-}
-EXPORT_SYMBOL(omapdss_component_is_output);
-
-struct omap_dss_device *omap_dss_get_output(enum omap_dss_output_id id)
-{
- struct omap_dss_device *out;
-
- list_for_each_entry(out, &output_list, list) {
- if (out->id == id)
- return out;
- }
-
- return NULL;
-}
-EXPORT_SYMBOL(omap_dss_get_output);
-
-struct omap_dss_device *omap_dss_find_output_by_port_node(struct device_node *port)
-{
- struct device_node *src_node;
- struct omap_dss_device *out;
- u32 reg;
-
- src_node = dss_of_port_get_parent_device(port);
- if (!src_node)
- return NULL;
-
- reg = dss_of_port_get_port_number(port);
-
- list_for_each_entry(out, &output_list, list) {
- if (out->dev->of_node == src_node && out->port_num == reg) {
- of_node_put(src_node);
- return omap_dss_get_device(out);
- }
- }
-
- of_node_put(src_node);
-
- return NULL;
-}
-
-struct omap_dss_device *omapdss_find_output_from_display(struct omap_dss_device *dssdev)
-{
- while (dssdev->src)
- dssdev = dssdev->src;
-
- if (dssdev->id != 0)
- return omap_dss_get_device(dssdev);
-
- return NULL;
-}
-EXPORT_SYMBOL(omapdss_find_output_from_display);
-
-static const struct dss_mgr_ops *dss_mgr_ops;
-static struct omap_drm_private *dss_mgr_ops_priv;
-
-int dss_install_mgr_ops(const struct dss_mgr_ops *mgr_ops,
+int dss_install_mgr_ops(struct dss_device *dss,
+ const struct dss_mgr_ops *mgr_ops,
struct omap_drm_private *priv)
{
- if (dss_mgr_ops)
+ if (dss->mgr_ops)
return -EBUSY;
- dss_mgr_ops = mgr_ops;
- dss_mgr_ops_priv = priv;
+ dss->mgr_ops = mgr_ops;
+ dss->mgr_ops_priv = priv;
return 0;
}
EXPORT_SYMBOL(dss_install_mgr_ops);
-void dss_uninstall_mgr_ops(void)
+void dss_uninstall_mgr_ops(struct dss_device *dss)
{
- dss_mgr_ops = NULL;
- dss_mgr_ops_priv = NULL;
+ dss->mgr_ops = NULL;
+ dss->mgr_ops_priv = NULL;
}
EXPORT_SYMBOL(dss_uninstall_mgr_ops);
-int dss_mgr_connect(struct omap_dss_device *dssdev, struct omap_dss_device *dst)
-{
- return dss_mgr_ops->connect(dss_mgr_ops_priv,
- dssdev->dispc_channel, dst);
-}
-EXPORT_SYMBOL(dss_mgr_connect);
-
-void dss_mgr_disconnect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst)
-{
- dss_mgr_ops->disconnect(dss_mgr_ops_priv, dssdev->dispc_channel, dst);
-}
-EXPORT_SYMBOL(dss_mgr_disconnect);
-
void dss_mgr_set_timings(struct omap_dss_device *dssdev,
const struct videomode *vm)
{
- dss_mgr_ops->set_timings(dss_mgr_ops_priv, dssdev->dispc_channel, vm);
+ dssdev->dss->mgr_ops->set_timings(dssdev->dss->mgr_ops_priv,
+ dssdev->dispc_channel, vm);
}
EXPORT_SYMBOL(dss_mgr_set_timings);
void dss_mgr_set_lcd_config(struct omap_dss_device *dssdev,
const struct dss_lcd_mgr_config *config)
{
- dss_mgr_ops->set_lcd_config(dss_mgr_ops_priv,
- dssdev->dispc_channel, config);
+ dssdev->dss->mgr_ops->set_lcd_config(dssdev->dss->mgr_ops_priv,
+ dssdev->dispc_channel, config);
}
EXPORT_SYMBOL(dss_mgr_set_lcd_config);
int dss_mgr_enable(struct omap_dss_device *dssdev)
{
- return dss_mgr_ops->enable(dss_mgr_ops_priv, dssdev->dispc_channel);
+ return dssdev->dss->mgr_ops->enable(dssdev->dss->mgr_ops_priv,
+ dssdev->dispc_channel);
}
EXPORT_SYMBOL(dss_mgr_enable);
void dss_mgr_disable(struct omap_dss_device *dssdev)
{
- dss_mgr_ops->disable(dss_mgr_ops_priv, dssdev->dispc_channel);
+ dssdev->dss->mgr_ops->disable(dssdev->dss->mgr_ops_priv,
+ dssdev->dispc_channel);
}
EXPORT_SYMBOL(dss_mgr_disable);
void dss_mgr_start_update(struct omap_dss_device *dssdev)
{
- dss_mgr_ops->start_update(dss_mgr_ops_priv, dssdev->dispc_channel);
+ dssdev->dss->mgr_ops->start_update(dssdev->dss->mgr_ops_priv,
+ dssdev->dispc_channel);
}
EXPORT_SYMBOL(dss_mgr_start_update);
int dss_mgr_register_framedone_handler(struct omap_dss_device *dssdev,
void (*handler)(void *), void *data)
{
- return dss_mgr_ops->register_framedone_handler(dss_mgr_ops_priv,
- dssdev->dispc_channel,
- handler, data);
+ struct dss_device *dss = dssdev->dss;
+
+ return dss->mgr_ops->register_framedone_handler(dss->mgr_ops_priv,
+ dssdev->dispc_channel,
+ handler, data);
}
EXPORT_SYMBOL(dss_mgr_register_framedone_handler);
void dss_mgr_unregister_framedone_handler(struct omap_dss_device *dssdev,
void (*handler)(void *), void *data)
{
- dss_mgr_ops->unregister_framedone_handler(dss_mgr_ops_priv,
- dssdev->dispc_channel,
- handler, data);
+ struct dss_device *dss = dssdev->dss;
+
+ dss->mgr_ops->unregister_framedone_handler(dss->mgr_ops_priv,
+ dssdev->dispc_channel,
+ handler, data);
}
EXPORT_SYMBOL(dss_mgr_unregister_framedone_handler);
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index 69c3b7a3d5c7..b2fe2387037a 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -132,10 +132,8 @@ static void sdi_config_lcd_manager(struct sdi_device *sdi)
static int sdi_display_enable(struct omap_dss_device *dssdev)
{
struct sdi_device *sdi = dssdev_to_sdi(dssdev);
- struct videomode *vm = &sdi->vm;
- unsigned long fck;
struct dispc_clock_info dispc_cinfo;
- unsigned long pck;
+ unsigned long fck;
int r;
if (!sdi->output.dispc_channel_connected) {
@@ -151,27 +149,12 @@ static int sdi_display_enable(struct omap_dss_device *dssdev)
if (r)
goto err_get_dispc;
- /* 15.5.9.1.2 */
- vm->flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE | DISPLAY_FLAGS_SYNC_POSEDGE;
-
- r = sdi_calc_clock_div(sdi, vm->pixelclock, &fck, &dispc_cinfo);
+ r = sdi_calc_clock_div(sdi, sdi->vm.pixelclock, &fck, &dispc_cinfo);
if (r)
goto err_calc_clock_div;
sdi->mgr_config.clock_info = dispc_cinfo;
- pck = fck / dispc_cinfo.lck_div / dispc_cinfo.pck_div;
-
- if (pck != vm->pixelclock) {
- DSSWARN("Could not find exact pixel clock. Requested %lu Hz, got %lu Hz\n",
- vm->pixelclock, pck);
-
- vm->pixelclock = pck;
- }
-
-
- dss_mgr_set_timings(&sdi->output, vm);
-
r = dss_set_fck_rate(sdi->dss, fck);
if (r)
goto err_set_dss_clock_div;
@@ -230,96 +213,63 @@ static void sdi_display_disable(struct omap_dss_device *dssdev)
}
static void sdi_set_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
+ const struct videomode *vm)
{
struct sdi_device *sdi = dssdev_to_sdi(dssdev);
sdi->vm = *vm;
}
-static void sdi_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct sdi_device *sdi = dssdev_to_sdi(dssdev);
-
- *vm = sdi->vm;
-}
-
static int sdi_check_timings(struct omap_dss_device *dssdev,
struct videomode *vm)
{
struct sdi_device *sdi = dssdev_to_sdi(dssdev);
- enum omap_channel channel = dssdev->dispc_channel;
-
- if (!dispc_mgr_timings_ok(sdi->dss->dispc, channel, vm))
- return -EINVAL;
+ struct dispc_clock_info dispc_cinfo;
+ unsigned long fck;
+ unsigned long pck;
+ int r;
if (vm->pixelclock == 0)
return -EINVAL;
- return 0;
-}
+ r = sdi_calc_clock_div(sdi, vm->pixelclock, &fck, &dispc_cinfo);
+ if (r)
+ return r;
-static int sdi_init_regulator(struct sdi_device *sdi)
-{
- struct regulator *vdds_sdi;
+ pck = fck / dispc_cinfo.lck_div / dispc_cinfo.pck_div;
- if (sdi->vdds_sdi_reg)
- return 0;
+ if (pck != vm->pixelclock) {
+ DSSWARN("Pixel clock adjusted from %lu Hz to %lu Hz\n",
+ vm->pixelclock, pck);
- vdds_sdi = devm_regulator_get(&sdi->pdev->dev, "vdds_sdi");
- if (IS_ERR(vdds_sdi)) {
- if (PTR_ERR(vdds_sdi) != -EPROBE_DEFER)
- DSSERR("can't get VDDS_SDI regulator\n");
- return PTR_ERR(vdds_sdi);
+ vm->pixelclock = pck;
}
- sdi->vdds_sdi_reg = vdds_sdi;
-
return 0;
}
-static int sdi_connect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst)
+static int sdi_connect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct sdi_device *sdi = dssdev_to_sdi(dssdev);
int r;
- r = sdi_init_regulator(sdi);
+ r = omapdss_device_connect(dst->dss, dst, dst->next);
if (r)
return r;
- r = dss_mgr_connect(&sdi->output, dssdev);
- if (r)
- return r;
-
- r = omapdss_output_set_device(dssdev, dst);
- if (r) {
- DSSERR("failed to connect output to new device: %s\n",
- dst->name);
- dss_mgr_disconnect(&sdi->output, dssdev);
- return r;
- }
-
+ dst->dispc_channel_connected = true;
return 0;
}
-static void sdi_disconnect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst)
+static void sdi_disconnect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct sdi_device *sdi = dssdev_to_sdi(dssdev);
+ dst->dispc_channel_connected = false;
- WARN_ON(dst != dssdev->dst);
-
- if (dst != dssdev->dst)
- return;
-
- omapdss_output_unset_device(dssdev);
-
- dss_mgr_disconnect(&sdi->output, dssdev);
+ omapdss_device_disconnect(dst, dst->next);
}
-static const struct omapdss_sdi_ops sdi_ops = {
+static const struct omap_dss_device_ops sdi_ops = {
.connect = sdi_connect,
.disconnect = sdi_disconnect,
@@ -328,12 +278,12 @@ static const struct omapdss_sdi_ops sdi_ops = {
.check_timings = sdi_check_timings,
.set_timings = sdi_set_timings,
- .get_timings = sdi_get_timings,
};
-static void sdi_init_output(struct sdi_device *sdi)
+static int sdi_init_output(struct sdi_device *sdi)
{
struct omap_dss_device *out = &sdi->output;
+ int r;
out->dev = &sdi->pdev->dev;
out->id = OMAP_DSS_OUTPUT_SDI;
@@ -341,16 +291,36 @@ static void sdi_init_output(struct sdi_device *sdi)
out->name = "sdi.0";
out->dispc_channel = OMAP_DSS_CHANNEL_LCD;
/* We have SDI only on OMAP3, where it's on port 1 */
- out->port_num = 1;
- out->ops.sdi = &sdi_ops;
+ out->of_ports = BIT(1);
+ out->ops = &sdi_ops;
out->owner = THIS_MODULE;
+ out->bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE /* 15.5.9.1.2 */
+ | DRM_BUS_FLAG_SYNC_POSEDGE;
+
+ out->next = omapdss_of_find_connected_device(out->dev->of_node, 1);
+ if (IS_ERR(out->next)) {
+ if (PTR_ERR(out->next) != -EPROBE_DEFER)
+ dev_err(out->dev, "failed to find video sink\n");
+ return PTR_ERR(out->next);
+ }
- omapdss_register_output(out);
+ r = omapdss_output_validate(out);
+ if (r) {
+ omapdss_device_put(out->next);
+ out->next = NULL;
+ return r;
+ }
+
+ omapdss_device_register(out);
+
+ return 0;
}
static void sdi_uninit_output(struct sdi_device *sdi)
{
- omapdss_unregister_output(&sdi->output);
+ if (sdi->output.next)
+ omapdss_device_put(sdi->output.next);
+ omapdss_device_unregister(&sdi->output);
}
int sdi_init_port(struct dss_device *dss, struct platform_device *pdev,
@@ -372,25 +342,32 @@ int sdi_init_port(struct dss_device *dss, struct platform_device *pdev,
}
r = of_property_read_u32(ep, "datapairs", &datapairs);
+ of_node_put(ep);
if (r) {
DSSERR("failed to parse datapairs\n");
- goto err_datapairs;
+ goto err_free;
}
sdi->datapairs = datapairs;
sdi->dss = dss;
- of_node_put(ep);
-
sdi->pdev = pdev;
port->data = sdi;
- sdi_init_output(sdi);
+ sdi->vdds_sdi_reg = devm_regulator_get(&pdev->dev, "vdds_sdi");
+ if (IS_ERR(sdi->vdds_sdi_reg)) {
+ r = PTR_ERR(sdi->vdds_sdi_reg);
+ if (r != -EPROBE_DEFER)
+ DSSERR("can't get VDDS_SDI regulator\n");
+ goto err_free;
+ }
+
+ r = sdi_init_output(sdi);
+ if (r)
+ goto err_free;
return 0;
-err_datapairs:
- of_node_put(ep);
err_free:
kfree(sdi);
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index ac01907dcc34..ff0b18c8e4ac 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -452,7 +452,7 @@ static void venc_runtime_put(struct venc_device *venc)
WARN_ON(r < 0 && r != -ENOSYS);
}
-static const struct venc_config *venc_timings_to_config(struct videomode *vm)
+static const struct venc_config *venc_timings_to_config(const struct videomode *vm)
{
switch (venc_get_videomode(vm)) {
default:
@@ -491,8 +491,6 @@ static int venc_power_on(struct venc_device *venc)
venc_write_reg(venc, VENC_OUTPUT_CONTROL, l);
- dss_mgr_set_timings(&venc->output, &venc->vm);
-
r = regulator_enable(venc->vdda_dac_reg);
if (r)
goto err1;
@@ -568,32 +566,30 @@ static void venc_display_disable(struct omap_dss_device *dssdev)
mutex_unlock(&venc->venc_lock);
}
-static void venc_set_timings(struct omap_dss_device *dssdev,
+static void venc_get_timings(struct omap_dss_device *dssdev,
struct videomode *vm)
{
struct venc_device *venc = dssdev_to_venc(dssdev);
- struct videomode actual_vm;
+
+ mutex_lock(&venc->venc_lock);
+ *vm = venc->vm;
+ mutex_unlock(&venc->venc_lock);
+}
+
+static void venc_set_timings(struct omap_dss_device *dssdev,
+ const struct videomode *vm)
+{
+ struct venc_device *venc = dssdev_to_venc(dssdev);
DSSDBG("venc_set_timings\n");
mutex_lock(&venc->venc_lock);
- switch (venc_get_videomode(vm)) {
- default:
- WARN_ON_ONCE(1);
- case VENC_MODE_PAL:
- actual_vm = omap_dss_pal_vm;
- break;
- case VENC_MODE_NTSC:
- actual_vm = omap_dss_ntsc_vm;
- break;
- }
-
/* Reset WSS data when the TV standard changes. */
- if (memcmp(&venc->vm, &actual_vm, sizeof(actual_vm)))
+ if (memcmp(&venc->vm, vm, sizeof(*vm)))
venc->wss_data = 0;
- venc->vm = actual_vm;
+ venc->vm = *vm;
dispc_set_tv_pclk(venc->dss->dispc, 13500000);
@@ -607,80 +603,16 @@ static int venc_check_timings(struct omap_dss_device *dssdev,
switch (venc_get_videomode(vm)) {
case VENC_MODE_PAL:
- case VENC_MODE_NTSC:
+ *vm = omap_dss_pal_vm;
return 0;
- default:
- return -EINVAL;
- }
-}
-
-static void venc_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct venc_device *venc = dssdev_to_venc(dssdev);
-
- mutex_lock(&venc->venc_lock);
-
- *vm = venc->vm;
-
- mutex_unlock(&venc->venc_lock);
-}
-
-static u32 venc_get_wss(struct omap_dss_device *dssdev)
-{
- struct venc_device *venc = dssdev_to_venc(dssdev);
-
- /* Invert due to VENC_L21_WC_CTL:INV=1 */
- return (venc->wss_data >> 8) ^ 0xfffff;
-}
-
-static int venc_set_wss(struct omap_dss_device *dssdev, u32 wss)
-{
- struct venc_device *venc = dssdev_to_venc(dssdev);
- const struct venc_config *config;
- int r;
- DSSDBG("venc_set_wss\n");
-
- mutex_lock(&venc->venc_lock);
-
- config = venc_timings_to_config(&venc->vm);
-
- /* Invert due to VENC_L21_WC_CTL:INV=1 */
- venc->wss_data = (wss ^ 0xfffff) << 8;
-
- r = venc_runtime_get(venc);
- if (r)
- goto err;
-
- venc_write_reg(venc, VENC_BSTAMP_WSS_DATA, config->bstamp_wss_data |
- venc->wss_data);
-
- venc_runtime_put(venc);
-
-err:
- mutex_unlock(&venc->venc_lock);
-
- return r;
-}
-
-static int venc_init_regulator(struct venc_device *venc)
-{
- struct regulator *vdda_dac;
-
- if (venc->vdda_dac_reg != NULL)
+ case VENC_MODE_NTSC:
+ *vm = omap_dss_ntsc_vm;
return 0;
- vdda_dac = devm_regulator_get(&venc->pdev->dev, "vdda");
- if (IS_ERR(vdda_dac)) {
- if (PTR_ERR(vdda_dac) != -EPROBE_DEFER)
- DSSERR("can't get VDDA_DAC regulator\n");
- return PTR_ERR(vdda_dac);
+ default:
+ return -EINVAL;
}
-
- venc->vdda_dac_reg = vdda_dac;
-
- return 0;
}
static int venc_dump_regs(struct seq_file *s, void *p)
@@ -760,47 +692,28 @@ static int venc_get_clocks(struct venc_device *venc)
return 0;
}
-static int venc_connect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst)
+static int venc_connect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct venc_device *venc = dssdev_to_venc(dssdev);
int r;
- r = venc_init_regulator(venc);
- if (r)
- return r;
-
- r = dss_mgr_connect(&venc->output, dssdev);
+ r = omapdss_device_connect(dst->dss, dst, dst->next);
if (r)
return r;
- r = omapdss_output_set_device(dssdev, dst);
- if (r) {
- DSSERR("failed to connect output to new device: %s\n",
- dst->name);
- dss_mgr_disconnect(&venc->output, dssdev);
- return r;
- }
-
+ dst->dispc_channel_connected = true;
return 0;
}
-static void venc_disconnect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst)
+static void venc_disconnect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct venc_device *venc = dssdev_to_venc(dssdev);
-
- WARN_ON(dst != dssdev->dst);
-
- if (dst != dssdev->dst)
- return;
-
- omapdss_output_unset_device(dssdev);
+ dst->dispc_channel_connected = false;
- dss_mgr_disconnect(&venc->output, dssdev);
+ omapdss_device_disconnect(dst, dst->next);
}
-static const struct omapdss_atv_ops venc_ops = {
+static const struct omap_dss_device_ops venc_ops = {
.connect = venc_connect,
.disconnect = venc_disconnect,
@@ -808,31 +721,92 @@ static const struct omapdss_atv_ops venc_ops = {
.disable = venc_display_disable,
.check_timings = venc_check_timings,
- .set_timings = venc_set_timings,
.get_timings = venc_get_timings,
+ .set_timings = venc_set_timings,
+};
+
+/* -----------------------------------------------------------------------------
+ * Component Bind & Unbind
+ */
- .set_wss = venc_set_wss,
- .get_wss = venc_get_wss,
+static int venc_bind(struct device *dev, struct device *master, void *data)
+{
+ struct dss_device *dss = dss_get_device(master);
+ struct venc_device *venc = dev_get_drvdata(dev);
+ u8 rev_id;
+ int r;
+
+ venc->dss = dss;
+
+ r = venc_runtime_get(venc);
+ if (r)
+ return r;
+
+ rev_id = (u8)(venc_read_reg(venc, VENC_REV_ID) & 0xff);
+ dev_dbg(dev, "OMAP VENC rev %d\n", rev_id);
+
+ venc_runtime_put(venc);
+
+ venc->debugfs = dss_debugfs_create_file(dss, "venc", venc_dump_regs,
+ venc);
+
+ return 0;
+}
+
+static void venc_unbind(struct device *dev, struct device *master, void *data)
+{
+ struct venc_device *venc = dev_get_drvdata(dev);
+
+ dss_debugfs_remove_file(venc->debugfs);
+}
+
+static const struct component_ops venc_component_ops = {
+ .bind = venc_bind,
+ .unbind = venc_unbind,
};
-static void venc_init_output(struct venc_device *venc)
+/* -----------------------------------------------------------------------------
+ * Probe & Remove, Suspend & Resume
+ */
+
+static int venc_init_output(struct venc_device *venc)
{
struct omap_dss_device *out = &venc->output;
+ int r;
out->dev = &venc->pdev->dev;
out->id = OMAP_DSS_OUTPUT_VENC;
out->output_type = OMAP_DISPLAY_TYPE_VENC;
out->name = "venc.0";
out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
- out->ops.atv = &venc_ops;
+ out->ops = &venc_ops;
out->owner = THIS_MODULE;
+ out->of_ports = BIT(0);
+
+ out->next = omapdss_of_find_connected_device(out->dev->of_node, 0);
+ if (IS_ERR(out->next)) {
+ if (PTR_ERR(out->next) != -EPROBE_DEFER)
+ dev_err(out->dev, "failed to find video sink\n");
+ return PTR_ERR(out->next);
+ }
- omapdss_register_output(out);
+ r = omapdss_output_validate(out);
+ if (r) {
+ omapdss_device_put(out->next);
+ out->next = NULL;
+ return r;
+ }
+
+ omapdss_device_register(out);
+
+ return 0;
}
static void venc_uninit_output(struct venc_device *venc)
{
- omapdss_unregister_output(&venc->output);
+ if (venc->output.next)
+ omapdss_device_put(venc->output.next);
+ omapdss_device_unregister(&venc->output);
}
static int venc_probe_of(struct venc_device *venc)
@@ -878,19 +852,15 @@ err:
return r;
}
-/* VENC HW IP initialisation */
static const struct soc_device_attribute venc_soc_devices[] = {
{ .machine = "OMAP3[45]*" },
{ .machine = "AM35*" },
{ /* sentinel */ }
};
-static int venc_bind(struct device *dev, struct device *master, void *data)
+static int venc_probe(struct platform_device *pdev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct dss_device *dss = dss_get_device(master);
struct venc_device *venc;
- u8 rev_id;
struct resource *venc_mem;
int r;
@@ -899,8 +869,8 @@ static int venc_bind(struct device *dev, struct device *master, void *data)
return -ENOMEM;
venc->pdev = pdev;
- venc->dss = dss;
- dev_set_drvdata(dev, venc);
+
+ platform_set_drvdata(pdev, venc);
/* The OMAP34xx, OMAP35xx and AM35xx VENC require the TV DAC clock. */
if (soc_device_match(venc_soc_devices))
@@ -909,6 +879,7 @@ static int venc_bind(struct device *dev, struct device *master, void *data)
mutex_init(&venc->venc_lock);
venc->wss_data = 0;
+ venc->vm = omap_dss_pal_vm;
venc_mem = platform_get_resource(venc->pdev, IORESOURCE_MEM, 0);
venc->base = devm_ioremap_resource(&pdev->dev, venc_mem);
@@ -917,68 +888,54 @@ static int venc_bind(struct device *dev, struct device *master, void *data)
goto err_free;
}
+ venc->vdda_dac_reg = devm_regulator_get(&pdev->dev, "vdda");
+ if (IS_ERR(venc->vdda_dac_reg)) {
+ r = PTR_ERR(venc->vdda_dac_reg);
+ if (r != -EPROBE_DEFER)
+ DSSERR("can't get VDDA_DAC regulator\n");
+ goto err_free;
+ }
+
r = venc_get_clocks(venc);
if (r)
goto err_free;
- pm_runtime_enable(&pdev->dev);
-
- r = venc_runtime_get(venc);
+ r = venc_probe_of(venc);
if (r)
- goto err_runtime_get;
-
- rev_id = (u8)(venc_read_reg(venc, VENC_REV_ID) & 0xff);
- dev_dbg(&pdev->dev, "OMAP VENC rev %d\n", rev_id);
-
- venc_runtime_put(venc);
+ goto err_free;
- r = venc_probe_of(venc);
- if (r) {
- DSSERR("Invalid DT data\n");
- goto err_probe_of;
- }
+ pm_runtime_enable(&pdev->dev);
- venc->debugfs = dss_debugfs_create_file(dss, "venc", venc_dump_regs,
- venc);
+ r = venc_init_output(venc);
+ if (r)
+ goto err_pm_disable;
- venc_init_output(venc);
+ r = component_add(&pdev->dev, &venc_component_ops);
+ if (r)
+ goto err_uninit_output;
return 0;
-err_probe_of:
-err_runtime_get:
+err_uninit_output:
+ venc_uninit_output(venc);
+err_pm_disable:
pm_runtime_disable(&pdev->dev);
err_free:
kfree(venc);
return r;
}
-static void venc_unbind(struct device *dev, struct device *master, void *data)
+static int venc_remove(struct platform_device *pdev)
{
- struct venc_device *venc = dev_get_drvdata(dev);
+ struct venc_device *venc = platform_get_drvdata(pdev);
- dss_debugfs_remove_file(venc->debugfs);
+ component_del(&pdev->dev, &venc_component_ops);
venc_uninit_output(venc);
- pm_runtime_disable(dev);
+ pm_runtime_disable(&pdev->dev);
kfree(venc);
-}
-
-static const struct component_ops venc_component_ops = {
- .bind = venc_bind,
- .unbind = venc_unbind,
-};
-
-static int venc_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &venc_component_ops);
-}
-
-static int venc_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &venc_component_ops);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index 2ddb856666c4..98f5ca29444a 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -29,10 +29,28 @@
struct omap_connector {
struct drm_connector base;
- struct omap_dss_device *dssdev;
+ struct omap_dss_device *output;
+ struct omap_dss_device *display;
+ struct omap_dss_device *hpd;
bool hdmi_mode;
};
+static void omap_connector_hpd_notify(struct drm_connector *connector,
+ struct omap_dss_device *src,
+ enum drm_connector_status status)
+{
+ if (status == connector_status_disconnected) {
+ /*
+ * If the source is an HDMI encoder, notify it of disconnection.
+ * This is required to let the HDMI encoder reset any internal
+ * state related to connection status, such as the CEC address.
+ */
+ if (src && src->type == OMAP_DISPLAY_TYPE_HDMI &&
+ src->ops->hdmi.lost_hotplug)
+ src->ops->hdmi.lost_hotplug(src);
+ }
+}
+
static void omap_connector_hpd_cb(void *cb_data,
enum drm_connector_status status)
{
@@ -46,8 +64,31 @@ static void omap_connector_hpd_cb(void *cb_data,
connector->status = status;
mutex_unlock(&dev->mode_config.mutex);
- if (old_status != status)
- drm_kms_helper_hotplug_event(dev);
+ if (old_status == status)
+ return;
+
+ omap_connector_hpd_notify(connector, omap_connector->hpd, status);
+
+ drm_kms_helper_hotplug_event(dev);
+}
+
+void omap_connector_enable_hpd(struct drm_connector *connector)
+{
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+ struct omap_dss_device *hpd = omap_connector->hpd;
+
+ if (hpd)
+ hpd->ops->register_hpd_cb(hpd, omap_connector_hpd_cb,
+ omap_connector);
+}
+
+void omap_connector_disable_hpd(struct drm_connector *connector)
+{
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+ struct omap_dss_device *hpd = omap_connector->hpd;
+
+ if (hpd)
+ hpd->ops->unregister_hpd_cb(hpd);
}
bool omap_connector_get_hdmi_mode(struct drm_connector *connector)
@@ -57,120 +98,179 @@ bool omap_connector_get_hdmi_mode(struct drm_connector *connector)
return omap_connector->hdmi_mode;
}
+static struct omap_dss_device *
+omap_connector_find_device(struct drm_connector *connector,
+ enum omap_dss_device_ops_flag op)
+{
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+ struct omap_dss_device *dssdev;
+
+ for (dssdev = omap_connector->display; dssdev; dssdev = dssdev->src) {
+ if (dssdev->ops_flags & op)
+ return dssdev;
+ }
+
+ return NULL;
+}
+
static enum drm_connector_status omap_connector_detect(
struct drm_connector *connector, bool force)
{
struct omap_connector *omap_connector = to_omap_connector(connector);
- struct omap_dss_device *dssdev = omap_connector->dssdev;
- struct omap_dss_driver *dssdrv = dssdev->driver;
- enum drm_connector_status ret;
-
- if (dssdrv->detect) {
- if (dssdrv->detect(dssdev))
- ret = connector_status_connected;
- else
- ret = connector_status_disconnected;
- } else if (dssdev->type == OMAP_DISPLAY_TYPE_DPI ||
- dssdev->type == OMAP_DISPLAY_TYPE_DBI ||
- dssdev->type == OMAP_DISPLAY_TYPE_SDI ||
- dssdev->type == OMAP_DISPLAY_TYPE_DSI) {
- ret = connector_status_connected;
+ struct omap_dss_device *dssdev;
+ enum drm_connector_status status;
+
+ dssdev = omap_connector_find_device(connector,
+ OMAP_DSS_DEVICE_OP_DETECT);
+
+ if (dssdev) {
+ status = dssdev->ops->detect(dssdev)
+ ? connector_status_connected
+ : connector_status_disconnected;
+
+ omap_connector_hpd_notify(connector, dssdev->src, status);
} else {
- ret = connector_status_unknown;
+ switch (omap_connector->display->type) {
+ case OMAP_DISPLAY_TYPE_DPI:
+ case OMAP_DISPLAY_TYPE_DBI:
+ case OMAP_DISPLAY_TYPE_SDI:
+ case OMAP_DISPLAY_TYPE_DSI:
+ status = connector_status_connected;
+ break;
+ default:
+ status = connector_status_unknown;
+ break;
+ }
}
- VERB("%s: %d (force=%d)", omap_connector->dssdev->name, ret, force);
+ VERB("%s: %d (force=%d)", omap_connector->display->name, status, force);
- return ret;
+ return status;
}
static void omap_connector_destroy(struct drm_connector *connector)
{
struct omap_connector *omap_connector = to_omap_connector(connector);
- struct omap_dss_device *dssdev = omap_connector->dssdev;
- DBG("%s", omap_connector->dssdev->name);
- if (connector->polled == DRM_CONNECTOR_POLL_HPD &&
- dssdev->driver->unregister_hpd_cb) {
- dssdev->driver->unregister_hpd_cb(dssdev);
+ DBG("%s", omap_connector->display->name);
+
+ if (omap_connector->hpd) {
+ struct omap_dss_device *hpd = omap_connector->hpd;
+
+ hpd->ops->unregister_hpd_cb(hpd);
+ omapdss_device_put(hpd);
+ omap_connector->hpd = NULL;
}
+
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(omap_connector);
- omap_dss_put_device(dssdev);
+ omapdss_device_put(omap_connector->output);
+ omapdss_device_put(omap_connector->display);
}
#define MAX_EDID 512
-static int omap_connector_get_modes(struct drm_connector *connector)
+static int omap_connector_get_modes_edid(struct drm_connector *connector,
+ struct omap_dss_device *dssdev)
{
struct omap_connector *omap_connector = to_omap_connector(connector);
- struct omap_dss_device *dssdev = omap_connector->dssdev;
- struct omap_dss_driver *dssdrv = dssdev->driver;
- struct drm_device *dev = connector->dev;
- int n = 0;
+ enum drm_connector_status status;
+ void *edid;
+ int n;
- DBG("%s", omap_connector->dssdev->name);
+ status = omap_connector_detect(connector, false);
+ if (status != connector_status_connected)
+ goto no_edid;
- /* if display exposes EDID, then we parse that in the normal way to
- * build table of supported modes.. otherwise (ie. fixed resolution
- * LCD panels) we just return a single mode corresponding to the
- * currently configured timings:
- */
- if (dssdrv->read_edid) {
- void *edid = kzalloc(MAX_EDID, GFP_KERNEL);
-
- if (!edid)
- return 0;
-
- if ((dssdrv->read_edid(dssdev, edid, MAX_EDID) > 0) &&
- drm_edid_is_valid(edid)) {
- drm_connector_update_edid_property(
- connector, edid);
- n = drm_add_edid_modes(connector, edid);
-
- omap_connector->hdmi_mode =
- drm_detect_hdmi_monitor(edid);
- } else {
- drm_connector_update_edid_property(
- connector, NULL);
- }
+ edid = kzalloc(MAX_EDID, GFP_KERNEL);
+ if (!edid)
+ goto no_edid;
+ if (dssdev->ops->read_edid(dssdev, edid, MAX_EDID) <= 0 ||
+ !drm_edid_is_valid(edid)) {
kfree(edid);
- } else {
- struct drm_display_mode *mode = drm_mode_create(dev);
- struct videomode vm = {0};
+ goto no_edid;
+ }
- if (!mode)
- return 0;
+ drm_connector_update_edid_property(connector, edid);
+ n = drm_add_edid_modes(connector, edid);
- dssdrv->get_timings(dssdev, &vm);
+ omap_connector->hdmi_mode = drm_detect_hdmi_monitor(edid);
- drm_display_mode_from_videomode(&vm, mode);
+ kfree(edid);
+ return n;
- mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
- drm_mode_set_name(mode);
- drm_mode_probed_add(connector, mode);
+no_edid:
+ drm_connector_update_edid_property(connector, NULL);
+ return 0;
+}
- if (dssdrv->get_size) {
- dssdrv->get_size(dssdev,
+static int omap_connector_get_modes(struct drm_connector *connector)
+{
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+ struct omap_dss_device *dssdev;
+ struct drm_display_mode *mode;
+ struct videomode vm = {0};
+
+ DBG("%s", omap_connector->display->name);
+
+ /*
+ * If display exposes EDID, then we parse that in the normal way to
+ * build table of supported modes.
+ */
+ dssdev = omap_connector_find_device(connector,
+ OMAP_DSS_DEVICE_OP_EDID);
+ if (dssdev)
+ return omap_connector_get_modes_edid(connector, dssdev);
+
+ /*
+ * Otherwise we have either a fixed resolution panel or an output that
+ * doesn't support modes discovery (e.g. DVI or VGA with the DDC bus
+ * unconnected, or analog TV). Start by querying the size.
+ */
+ dssdev = omap_connector->display;
+ if (dssdev->driver && dssdev->driver->get_size)
+ dssdev->driver->get_size(dssdev,
&connector->display_info.width_mm,
&connector->display_info.height_mm);
- }
- n = 1;
+ /*
+ * Iterate over the pipeline to find the first device that can provide
+ * timing information. If we can't find any, we just let the KMS core
+ * add the default modes.
+ */
+ for (dssdev = omap_connector->display; dssdev; dssdev = dssdev->src) {
+ if (dssdev->ops->get_timings)
+ break;
}
+ if (!dssdev)
+ return 0;
- return n;
+ /* Add a single mode corresponding to the fixed panel timings. */
+ mode = drm_mode_create(connector->dev);
+ if (!mode)
+ return 0;
+
+ dssdev->ops->get_timings(dssdev, &vm);
+
+ drm_display_mode_from_videomode(&vm, mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
}
static int omap_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct omap_connector *omap_connector = to_omap_connector(connector);
- struct omap_dss_device *dssdev = omap_connector->dssdev;
- struct omap_dss_driver *dssdrv = dssdev->driver;
+ enum omap_channel channel = omap_connector->output->dispc_channel;
+ struct omap_drm_private *priv = connector->dev->dev_private;
+ struct omap_dss_device *dssdev;
struct videomode vm = {0};
struct drm_device *dev = connector->dev;
struct drm_display_mode *new_mode;
@@ -179,44 +279,31 @@ static int omap_connector_mode_valid(struct drm_connector *connector,
drm_display_mode_to_videomode(mode, &vm);
mode->vrefresh = drm_mode_vrefresh(mode);
- /*
- * if the panel driver doesn't have a check_timings, it's most likely
- * a fixed resolution panel, check if the timings match with the
- * panel's timings
- */
- if (dssdrv->check_timings) {
- r = dssdrv->check_timings(dssdev, &vm);
- } else {
- struct videomode t = {0};
+ r = priv->dispc_ops->mgr_check_timings(priv->dispc, channel, &vm);
+ if (r)
+ goto done;
- dssdrv->get_timings(dssdev, &t);
+ for (dssdev = omap_connector->output; dssdev; dssdev = dssdev->next) {
+ if (!dssdev->ops->check_timings)
+ continue;
- /*
- * Ignore the flags, as we don't get them from
- * drm_display_mode_to_videomode.
- */
- t.flags = 0;
-
- if (memcmp(&vm, &t, sizeof(vm)))
- r = -EINVAL;
- else
- r = 0;
+ r = dssdev->ops->check_timings(dssdev, &vm);
+ if (r)
+ goto done;
}
- if (!r) {
- /* check if vrefresh is still valid */
- new_mode = drm_mode_duplicate(dev, mode);
+ /* check if vrefresh is still valid */
+ new_mode = drm_mode_duplicate(dev, mode);
+ if (!new_mode)
+ return MODE_BAD;
- if (!new_mode)
- return MODE_BAD;
-
- new_mode->clock = vm.pixelclock / 1000;
- new_mode->vrefresh = 0;
- if (mode->vrefresh == drm_mode_vrefresh(new_mode))
- ret = MODE_OK;
- drm_mode_destroy(dev, new_mode);
- }
+ new_mode->clock = vm.pixelclock / 1000;
+ new_mode->vrefresh = 0;
+ if (mode->vrefresh == drm_mode_vrefresh(new_mode))
+ ret = MODE_OK;
+ drm_mode_destroy(dev, new_mode);
+done:
DBG("connector: mode %s: "
"%d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
(ret == MODE_OK) ? "valid" : "invalid",
@@ -243,52 +330,72 @@ static const struct drm_connector_helper_funcs omap_connector_helper_funcs = {
.mode_valid = omap_connector_mode_valid,
};
+static int omap_connector_get_type(struct omap_dss_device *display)
+{
+ switch (display->type) {
+ case OMAP_DISPLAY_TYPE_HDMI:
+ return DRM_MODE_CONNECTOR_HDMIA;
+ case OMAP_DISPLAY_TYPE_DVI:
+ return DRM_MODE_CONNECTOR_DVID;
+ case OMAP_DISPLAY_TYPE_DSI:
+ return DRM_MODE_CONNECTOR_DSI;
+ case OMAP_DISPLAY_TYPE_DPI:
+ case OMAP_DISPLAY_TYPE_DBI:
+ return DRM_MODE_CONNECTOR_DPI;
+ case OMAP_DISPLAY_TYPE_VENC:
+ /* TODO: This could also be composite */
+ return DRM_MODE_CONNECTOR_SVIDEO;
+ case OMAP_DISPLAY_TYPE_SDI:
+ return DRM_MODE_CONNECTOR_LVDS;
+ default:
+ return DRM_MODE_CONNECTOR_Unknown;
+ }
+}
+
/* initialize connector */
struct drm_connector *omap_connector_init(struct drm_device *dev,
- int connector_type, struct omap_dss_device *dssdev,
- struct drm_encoder *encoder)
+ struct omap_dss_device *output,
+ struct omap_dss_device *display,
+ struct drm_encoder *encoder)
{
struct drm_connector *connector = NULL;
struct omap_connector *omap_connector;
- bool hpd_supported = false;
-
- DBG("%s", dssdev->name);
+ struct omap_dss_device *dssdev;
- omap_dss_get_device(dssdev);
+ DBG("%s", display->name);
omap_connector = kzalloc(sizeof(*omap_connector), GFP_KERNEL);
if (!omap_connector)
goto fail;
- omap_connector->dssdev = dssdev;
+ omap_connector->output = omapdss_device_get(output);
+ omap_connector->display = omapdss_device_get(display);
connector = &omap_connector->base;
+ connector->interlace_allowed = 1;
+ connector->doublescan_allowed = 0;
drm_connector_init(dev, connector, &omap_connector_funcs,
- connector_type);
+ omap_connector_get_type(display));
drm_connector_helper_add(connector, &omap_connector_helper_funcs);
- if (dssdev->driver->register_hpd_cb) {
- int ret = dssdev->driver->register_hpd_cb(dssdev,
- omap_connector_hpd_cb,
- omap_connector);
- if (!ret)
- hpd_supported = true;
- else if (ret != -ENOTSUPP)
- DBG("%s: Failed to register HPD callback (%d).",
- dssdev->name, ret);
- }
-
- if (hpd_supported)
+ /*
+ * Initialize connector status handling. First try to find a device that
+ * supports hot-plug reporting. If it fails, fall back to a device that
+ * support polling. If that fails too, we don't support hot-plug
+ * detection at all.
+ */
+ dssdev = omap_connector_find_device(connector, OMAP_DSS_DEVICE_OP_HPD);
+ if (dssdev) {
+ omap_connector->hpd = omapdss_device_get(dssdev);
connector->polled = DRM_CONNECTOR_POLL_HPD;
- else if (dssdev->driver->detect)
- connector->polled = DRM_CONNECTOR_POLL_CONNECT |
- DRM_CONNECTOR_POLL_DISCONNECT;
- else
- connector->polled = 0;
-
- connector->interlace_allowed = 1;
- connector->doublescan_allowed = 0;
+ } else {
+ dssdev = omap_connector_find_device(connector,
+ OMAP_DSS_DEVICE_OP_DETECT);
+ if (dssdev)
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+ }
return connector;
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.h b/drivers/gpu/drm/omapdrm/omap_connector.h
index 98bbc779b302..854099801649 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.h
+++ b/drivers/gpu/drm/omapdrm/omap_connector.h
@@ -28,10 +28,13 @@ struct drm_encoder;
struct omap_dss_device;
struct drm_connector *omap_connector_init(struct drm_device *dev,
- int connector_type, struct omap_dss_device *dssdev,
- struct drm_encoder *encoder);
+ struct omap_dss_device *output,
+ struct omap_dss_device *display,
+ struct drm_encoder *encoder);
struct drm_encoder *omap_connector_attached_encoder(
struct drm_connector *connector);
bool omap_connector_get_hdmi_mode(struct drm_connector *connector);
+void omap_connector_enable_hpd(struct drm_connector *connector);
+void omap_connector_disable_hpd(struct drm_connector *connector);
#endif /* __OMAPDRM_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 6c4d40b824e4..62928ec0e7db 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -41,6 +41,7 @@ struct omap_crtc {
struct drm_crtc base;
const char *name;
+ struct omap_drm_pipeline *pipe;
enum omap_channel channel;
struct videomode vm;
@@ -108,38 +109,7 @@ int omap_crtc_wait_pending(struct drm_crtc *crtc)
* job of sequencing the setup of the video pipe in the proper order
*/
-/* ovl-mgr-id -> crtc */
-static struct omap_crtc *omap_crtcs[8];
-static struct omap_dss_device *omap_crtc_output[8];
-
/* we can probably ignore these until we support command-mode panels: */
-static int omap_crtc_dss_connect(struct omap_drm_private *priv,
- enum omap_channel channel,
- struct omap_dss_device *dst)
-{
- const struct dispc_ops *dispc_ops = priv->dispc_ops;
- struct dispc_device *dispc = priv->dispc;
-
- if (omap_crtc_output[channel])
- return -EINVAL;
-
- if (!(dispc_ops->mgr_get_supported_outputs(dispc, channel) & dst->id))
- return -EINVAL;
-
- omap_crtc_output[channel] = dst;
- dst->dispc_channel_connected = true;
-
- return 0;
-}
-
-static void omap_crtc_dss_disconnect(struct omap_drm_private *priv,
- enum omap_channel channel,
- struct omap_dss_device *dst)
-{
- omap_crtc_output[channel] = NULL;
- dst->dispc_channel_connected = false;
-}
-
static void omap_crtc_dss_start_update(struct omap_drm_private *priv,
enum omap_channel channel)
{
@@ -159,7 +129,7 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
if (WARN_ON(omap_crtc->enabled == enable))
return;
- if (omap_crtc_output[channel]->output_type == OMAP_DISPLAY_TYPE_HDMI) {
+ if (omap_crtc->pipe->output->output_type == OMAP_DISPLAY_TYPE_HDMI) {
priv->dispc_ops->mgr_enable(priv->dispc, channel, enable);
omap_crtc->enabled = enable;
return;
@@ -215,7 +185,8 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
static int omap_crtc_dss_enable(struct omap_drm_private *priv,
enum omap_channel channel)
{
- struct omap_crtc *omap_crtc = omap_crtcs[channel];
+ struct drm_crtc *crtc = priv->channels[channel]->crtc;
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
priv->dispc_ops->mgr_set_timings(priv->dispc, omap_crtc->channel,
&omap_crtc->vm);
@@ -227,7 +198,8 @@ static int omap_crtc_dss_enable(struct omap_drm_private *priv,
static void omap_crtc_dss_disable(struct omap_drm_private *priv,
enum omap_channel channel)
{
- struct omap_crtc *omap_crtc = omap_crtcs[channel];
+ struct drm_crtc *crtc = priv->channels[channel]->crtc;
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
omap_crtc_set_enabled(&omap_crtc->base, false);
}
@@ -236,7 +208,9 @@ static void omap_crtc_dss_set_timings(struct omap_drm_private *priv,
enum omap_channel channel,
const struct videomode *vm)
{
- struct omap_crtc *omap_crtc = omap_crtcs[channel];
+ struct drm_crtc *crtc = priv->channels[channel]->crtc;
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+
DBG("%s", omap_crtc->name);
omap_crtc->vm = *vm;
}
@@ -245,7 +219,8 @@ static void omap_crtc_dss_set_lcd_config(struct omap_drm_private *priv,
enum omap_channel channel,
const struct dss_lcd_mgr_config *config)
{
- struct omap_crtc *omap_crtc = omap_crtcs[channel];
+ struct drm_crtc *crtc = priv->channels[channel]->crtc;
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
DBG("%s", omap_crtc->name);
priv->dispc_ops->mgr_set_lcd_config(priv->dispc, omap_crtc->channel,
@@ -266,8 +241,6 @@ static void omap_crtc_dss_unregister_framedone(
}
static const struct dss_mgr_ops mgr_ops = {
- .connect = omap_crtc_dss_connect,
- .disconnect = omap_crtc_dss_disconnect,
.start_update = omap_crtc_dss_start_update,
.enable = omap_crtc_dss_enable,
.disable = omap_crtc_dss_disable,
@@ -447,11 +420,6 @@ static void omap_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
- struct omap_drm_private *priv = crtc->dev->dev_private;
- const u32 flags_mask = DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_DE_LOW |
- DISPLAY_FLAGS_PIXDATA_POSEDGE | DISPLAY_FLAGS_PIXDATA_NEGEDGE |
- DISPLAY_FLAGS_SYNC_POSEDGE | DISPLAY_FLAGS_SYNC_NEGEDGE;
- unsigned int i;
DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
omap_crtc->name, mode->base.id, mode->name,
@@ -461,38 +429,6 @@ static void omap_crtc_mode_set_nofb(struct drm_crtc *crtc)
mode->type, mode->flags);
drm_display_mode_to_videomode(mode, &omap_crtc->vm);
-
- /*
- * HACK: This fixes the vm flags.
- * struct drm_display_mode does not contain the VSYNC/HSYNC/DE flags
- * and they get lost when converting back and forth between
- * struct drm_display_mode and struct videomode. The hack below
- * goes and fetches the missing flags from the panel drivers.
- *
- * Correct solution would be to use DRM's bus-flags, but that's not
- * easily possible before the omapdrm's panel/encoder driver model
- * has been changed to the DRM model.
- */
-
- for (i = 0; i < priv->num_encoders; ++i) {
- struct drm_encoder *encoder = priv->encoders[i];
-
- if (encoder->crtc == crtc) {
- struct omap_dss_device *dssdev;
-
- dssdev = omap_encoder_get_dssdev(encoder);
-
- if (dssdev) {
- struct videomode vm = {0};
-
- dssdev->driver->get_timings(dssdev, &vm);
-
- omap_crtc->vm.flags |= vm.flags & flags_mask;
- }
-
- break;
- }
- }
}
static int omap_crtc_atomic_check(struct drm_crtc *crtc,
@@ -681,37 +617,29 @@ static const char *channel_names[] = {
void omap_crtc_pre_init(struct omap_drm_private *priv)
{
- memset(omap_crtcs, 0, sizeof(omap_crtcs));
-
- dss_install_mgr_ops(&mgr_ops, priv);
+ dss_install_mgr_ops(priv->dss, &mgr_ops, priv);
}
-void omap_crtc_pre_uninit(void)
+void omap_crtc_pre_uninit(struct omap_drm_private *priv)
{
- dss_uninstall_mgr_ops();
+ dss_uninstall_mgr_ops(priv->dss);
}
/* initialize crtc */
struct drm_crtc *omap_crtc_init(struct drm_device *dev,
- struct drm_plane *plane, struct omap_dss_device *dssdev)
+ struct omap_drm_pipeline *pipe,
+ struct drm_plane *plane)
{
struct omap_drm_private *priv = dev->dev_private;
struct drm_crtc *crtc = NULL;
struct omap_crtc *omap_crtc;
enum omap_channel channel;
- struct omap_dss_device *out;
int ret;
- out = omapdss_find_output_from_display(dssdev);
- channel = out->dispc_channel;
- omap_dss_put_device(out);
+ channel = pipe->output->dispc_channel;
DBG("%s", channel_names[channel]);
- /* Multiple displays on same channel is not allowed */
- if (WARN_ON(omap_crtcs[channel] != NULL))
- return ERR_PTR(-EINVAL);
-
omap_crtc = kzalloc(sizeof(*omap_crtc), GFP_KERNEL);
if (!omap_crtc)
return ERR_PTR(-ENOMEM);
@@ -720,6 +648,7 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
init_waitqueue_head(&omap_crtc->pending_wait);
+ omap_crtc->pipe = pipe;
omap_crtc->channel = channel;
omap_crtc->name = channel_names[channel];
@@ -727,7 +656,7 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
&omap_crtc_funcs, NULL);
if (ret < 0) {
dev_err(dev->dev, "%s(): could not init crtc for: %s\n",
- __func__, dssdev->name);
+ __func__, pipe->display->name);
kfree(omap_crtc);
return ERR_PTR(ret);
}
@@ -750,7 +679,5 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
omap_plane_install_properties(crtc->primary, &crtc->base);
- omap_crtcs[channel] = omap_crtc;
-
return crtc;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.h b/drivers/gpu/drm/omapdrm/omap_crtc.h
index eaab2d7f0324..d9de437ba9dd 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.h
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.h
@@ -27,15 +27,17 @@ enum omap_channel;
struct drm_crtc;
struct drm_device;
struct drm_plane;
+struct omap_drm_pipeline;
struct omap_dss_device;
struct videomode;
struct videomode *omap_crtc_timings(struct drm_crtc *crtc);
enum omap_channel omap_crtc_channel(struct drm_crtc *crtc);
void omap_crtc_pre_init(struct omap_drm_private *priv);
-void omap_crtc_pre_uninit(void);
+void omap_crtc_pre_uninit(struct omap_drm_private *priv);
struct drm_crtc *omap_crtc_init(struct drm_device *dev,
- struct drm_plane *plane, struct omap_dss_device *dssdev);
+ struct omap_drm_pipeline *pipe,
+ struct drm_plane *plane);
int omap_crtc_wait_pending(struct drm_crtc *crtc);
void omap_crtc_error_irq(struct drm_crtc *crtc, u32 irqstatus);
void omap_crtc_vblank_irq(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 1b6601e9b107..5f98506ac2c5 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -15,6 +15,8 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/of.h>
+#include <linux/sort.h>
#include <linux/sys_soc.h>
#include <drm/drm_atomic.h>
@@ -127,55 +129,92 @@ static const struct drm_mode_config_funcs omap_mode_config_funcs = {
.atomic_commit = drm_atomic_helper_commit,
};
-static int get_connector_type(struct omap_dss_device *dssdev)
+static void omap_disconnect_pipelines(struct drm_device *ddev)
{
- switch (dssdev->type) {
- case OMAP_DISPLAY_TYPE_HDMI:
- return DRM_MODE_CONNECTOR_HDMIA;
- case OMAP_DISPLAY_TYPE_DVI:
- return DRM_MODE_CONNECTOR_DVID;
- case OMAP_DISPLAY_TYPE_DSI:
- return DRM_MODE_CONNECTOR_DSI;
- case OMAP_DISPLAY_TYPE_DPI:
- case OMAP_DISPLAY_TYPE_DBI:
- return DRM_MODE_CONNECTOR_DPI;
- case OMAP_DISPLAY_TYPE_VENC:
- /* TODO: This could also be composite */
- return DRM_MODE_CONNECTOR_SVIDEO;
- case OMAP_DISPLAY_TYPE_SDI:
- return DRM_MODE_CONNECTOR_LVDS;
- default:
- return DRM_MODE_CONNECTOR_Unknown;
+ struct omap_drm_private *priv = ddev->dev_private;
+ unsigned int i;
+
+ for (i = 0; i < priv->num_pipes; i++) {
+ struct omap_drm_pipeline *pipe = &priv->pipes[i];
+
+ omapdss_device_disconnect(NULL, pipe->output);
+
+ omapdss_device_put(pipe->output);
+ omapdss_device_put(pipe->display);
+ pipe->output = NULL;
+ pipe->display = NULL;
}
+
+ memset(&priv->channels, 0, sizeof(priv->channels));
+
+ priv->num_pipes = 0;
}
-static void omap_disconnect_dssdevs(void)
+static int omap_compare_pipes(const void *a, const void *b)
{
- struct omap_dss_device *dssdev = NULL;
+ const struct omap_drm_pipeline *pipe1 = a;
+ const struct omap_drm_pipeline *pipe2 = b;
- for_each_dss_dev(dssdev)
- dssdev->driver->disconnect(dssdev);
+ if (pipe1->display->alias_id > pipe2->display->alias_id)
+ return 1;
+ else if (pipe1->display->alias_id < pipe2->display->alias_id)
+ return -1;
+ return 0;
}
-static int omap_connect_dssdevs(void)
+static int omap_connect_pipelines(struct drm_device *ddev)
{
+ struct omap_drm_private *priv = ddev->dev_private;
+ struct omap_dss_device *output = NULL;
+ unsigned int i;
int r;
- struct omap_dss_device *dssdev = NULL;
if (!omapdss_stack_is_ready())
return -EPROBE_DEFER;
- for_each_dss_dev(dssdev) {
- r = dssdev->driver->connect(dssdev);
+ for_each_dss_output(output) {
+ r = omapdss_device_connect(priv->dss, NULL, output);
if (r == -EPROBE_DEFER) {
- omap_dss_put_device(dssdev);
+ omapdss_device_put(output);
goto cleanup;
} else if (r) {
- dev_warn(dssdev->dev, "could not connect display: %s\n",
- dssdev->name);
+ dev_warn(output->dev, "could not connect output %s\n",
+ output->name);
+ } else {
+ struct omap_drm_pipeline *pipe;
+
+ pipe = &priv->pipes[priv->num_pipes++];
+ pipe->output = omapdss_device_get(output);
+ pipe->display = omapdss_display_get(output);
+
+ if (priv->num_pipes == ARRAY_SIZE(priv->pipes)) {
+ /* To balance the 'for_each_dss_output' loop */
+ omapdss_device_put(output);
+ break;
+ }
}
}
+ /* Sort the list by DT aliases */
+ sort(priv->pipes, priv->num_pipes, sizeof(priv->pipes[0]),
+ omap_compare_pipes, NULL);
+
+ /*
+ * Populate the pipeline lookup table by DISPC channel. Only one display
+ * is allowed per channel.
+ */
+ for (i = 0; i < priv->num_pipes; ++i) {
+ struct omap_drm_pipeline *pipe = &priv->pipes[i];
+ enum omap_channel channel = pipe->output->dispc_channel;
+
+ if (WARN_ON(priv->channels[channel] != NULL)) {
+ r = -EINVAL;
+ goto cleanup;
+ }
+
+ priv->channels[channel] = pipe;
+ }
+
return 0;
cleanup:
@@ -183,7 +222,7 @@ cleanup:
* if we are deferring probe, we disconnect the devices we previously
* connected
*/
- omap_disconnect_dssdevs();
+ omap_disconnect_pipelines(ddev);
return r;
}
@@ -204,10 +243,9 @@ static int omap_modeset_init_properties(struct drm_device *dev)
static int omap_modeset_init(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
- struct omap_dss_device *dssdev = NULL;
int num_ovls = priv->dispc_ops->get_num_ovls(priv->dispc);
int num_mgrs = priv->dispc_ops->get_num_mgrs(priv->dispc);
- int num_crtcs, crtc_idx, plane_idx;
+ unsigned int i;
int ret;
u32 plane_crtc_mask;
@@ -225,87 +263,62 @@ static int omap_modeset_init(struct drm_device *dev)
* configuration does not match the expectations or exceeds
* the available resources, the configuration is rejected.
*/
- num_crtcs = 0;
- for_each_dss_dev(dssdev)
- if (omapdss_device_is_connected(dssdev))
- num_crtcs++;
-
- if (num_crtcs > num_mgrs || num_crtcs > num_ovls ||
- num_crtcs > ARRAY_SIZE(priv->crtcs) ||
- num_crtcs > ARRAY_SIZE(priv->planes) ||
- num_crtcs > ARRAY_SIZE(priv->encoders) ||
- num_crtcs > ARRAY_SIZE(priv->connectors)) {
+ if (priv->num_pipes > num_mgrs || priv->num_pipes > num_ovls) {
dev_err(dev->dev, "%s(): Too many connected displays\n",
__func__);
return -EINVAL;
}
- /* All planes can be put to any CRTC */
- plane_crtc_mask = (1 << num_crtcs) - 1;
+ /* Create all planes first. They can all be put to any CRTC. */
+ plane_crtc_mask = (1 << priv->num_pipes) - 1;
+
+ for (i = 0; i < num_ovls; i++) {
+ enum drm_plane_type type = i < priv->num_pipes
+ ? DRM_PLANE_TYPE_PRIMARY
+ : DRM_PLANE_TYPE_OVERLAY;
+ struct drm_plane *plane;
+
+ if (WARN_ON(priv->num_planes >= ARRAY_SIZE(priv->planes)))
+ return -EINVAL;
+
+ plane = omap_plane_init(dev, i, type, plane_crtc_mask);
+ if (IS_ERR(plane))
+ return PTR_ERR(plane);
- dssdev = NULL;
+ priv->planes[priv->num_planes++] = plane;
+ }
- crtc_idx = 0;
- plane_idx = 0;
- for_each_dss_dev(dssdev) {
+ /* Create the CRTCs, encoders and connectors. */
+ for (i = 0; i < priv->num_pipes; i++) {
+ struct omap_drm_pipeline *pipe = &priv->pipes[i];
+ struct omap_dss_device *display = pipe->display;
struct drm_connector *connector;
struct drm_encoder *encoder;
- struct drm_plane *plane;
struct drm_crtc *crtc;
- if (!omapdss_device_is_connected(dssdev))
- continue;
-
- encoder = omap_encoder_init(dev, dssdev);
+ encoder = omap_encoder_init(dev, pipe->output, display);
if (!encoder)
return -ENOMEM;
- connector = omap_connector_init(dev,
- get_connector_type(dssdev), dssdev, encoder);
+ connector = omap_connector_init(dev, pipe->output, display,
+ encoder);
if (!connector)
return -ENOMEM;
- plane = omap_plane_init(dev, plane_idx, DRM_PLANE_TYPE_PRIMARY,
- plane_crtc_mask);
- if (IS_ERR(plane))
- return PTR_ERR(plane);
-
- crtc = omap_crtc_init(dev, plane, dssdev);
+ crtc = omap_crtc_init(dev, pipe, priv->planes[i]);
if (IS_ERR(crtc))
return PTR_ERR(crtc);
drm_connector_attach_encoder(connector, encoder);
- encoder->possible_crtcs = (1 << crtc_idx);
-
- priv->crtcs[priv->num_crtcs++] = crtc;
- priv->planes[priv->num_planes++] = plane;
- priv->encoders[priv->num_encoders++] = encoder;
- priv->connectors[priv->num_connectors++] = connector;
+ encoder->possible_crtcs = 1 << i;
- plane_idx++;
- crtc_idx++;
+ pipe->crtc = crtc;
+ pipe->encoder = encoder;
+ pipe->connector = connector;
}
- /*
- * Create normal planes for the remaining overlays:
- */
- for (; plane_idx < num_ovls; plane_idx++) {
- struct drm_plane *plane;
-
- if (WARN_ON(priv->num_planes >= ARRAY_SIZE(priv->planes)))
- return -EINVAL;
-
- plane = omap_plane_init(dev, plane_idx, DRM_PLANE_TYPE_OVERLAY,
- plane_crtc_mask);
- if (IS_ERR(plane))
- return PTR_ERR(plane);
-
- priv->planes[priv->num_planes++] = plane;
- }
-
- DBG("registered %d planes, %d crtcs, %d encoders and %d connectors\n",
- priv->num_planes, priv->num_crtcs, priv->num_encoders,
- priv->num_connectors);
+ DBG("registered %u planes, %u crtcs/encoders/connectors\n",
+ priv->num_planes, priv->num_pipes);
dev->mode_config.min_width = 8;
dev->mode_config.min_height = 2;
@@ -335,27 +348,25 @@ static int omap_modeset_init(struct drm_device *dev)
/*
* Enable the HPD in external components if supported
*/
-static void omap_modeset_enable_external_hpd(void)
+static void omap_modeset_enable_external_hpd(struct drm_device *ddev)
{
- struct omap_dss_device *dssdev = NULL;
+ struct omap_drm_private *priv = ddev->dev_private;
+ int i;
- for_each_dss_dev(dssdev) {
- if (dssdev->driver->enable_hpd)
- dssdev->driver->enable_hpd(dssdev);
- }
+ for (i = 0; i < priv->num_pipes; i++)
+ omap_connector_enable_hpd(priv->pipes[i].connector);
}
/*
* Disable the HPD in external components if supported
*/
-static void omap_modeset_disable_external_hpd(void)
+static void omap_modeset_disable_external_hpd(struct drm_device *ddev)
{
- struct omap_dss_device *dssdev = NULL;
+ struct omap_drm_private *priv = ddev->dev_private;
+ int i;
- for_each_dss_dev(dssdev) {
- if (dssdev->driver->disable_hpd)
- dssdev->driver->disable_hpd(dssdev);
- }
+ for (i = 0; i < priv->num_pipes; i++)
+ omap_connector_disable_hpd(priv->pipes[i].connector);
}
/*
@@ -525,6 +536,14 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
DBG("%s", dev_name(dev));
+ /* Allocate and initialize the DRM device. */
+ ddev = drm_dev_alloc(&omap_drm_driver, dev);
+ if (IS_ERR(ddev))
+ return PTR_ERR(ddev);
+
+ priv->ddev = ddev;
+ ddev->dev_private = priv;
+
priv->dev = dev;
priv->dss = omapdss_get_dss();
priv->dispc = dispc_get_dispc(priv->dss);
@@ -532,7 +551,7 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
omap_crtc_pre_init(priv);
- ret = omap_connect_dssdevs();
+ ret = omap_connect_pipelines(ddev);
if (ret)
goto err_crtc_uninit;
@@ -543,16 +562,6 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
mutex_init(&priv->list_lock);
INIT_LIST_HEAD(&priv->obj_list);
- /* Allocate and initialize the DRM device. */
- ddev = drm_dev_alloc(&omap_drm_driver, priv->dev);
- if (IS_ERR(ddev)) {
- ret = PTR_ERR(ddev);
- goto err_destroy_wq;
- }
-
- priv->ddev = ddev;
- ddev->dev_private = priv;
-
/* Get memory bandwidth limits */
if (priv->dispc_ops->get_memory_bandwidth_limit)
priv->max_bandwidth =
@@ -563,23 +572,23 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
ret = omap_modeset_init(ddev);
if (ret) {
dev_err(priv->dev, "omap_modeset_init failed: ret=%d\n", ret);
- goto err_free_drm_dev;
+ goto err_gem_deinit;
}
/* Initialize vblank handling, start with all CRTCs disabled. */
- ret = drm_vblank_init(ddev, priv->num_crtcs);
+ ret = drm_vblank_init(ddev, priv->num_pipes);
if (ret) {
dev_err(priv->dev, "could not init vblank\n");
goto err_cleanup_modeset;
}
- for (i = 0; i < priv->num_crtcs; i++)
- drm_crtc_vblank_off(priv->crtcs[i]);
+ for (i = 0; i < priv->num_pipes; i++)
+ drm_crtc_vblank_off(priv->pipes[i].crtc);
omap_fbdev_init(ddev);
drm_kms_helper_poll_init(ddev);
- omap_modeset_enable_external_hpd();
+ omap_modeset_enable_external_hpd(ddev);
/*
* Register the DRM device with the core and the connectors with
@@ -592,21 +601,20 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
return 0;
err_cleanup_helpers:
- omap_modeset_disable_external_hpd();
+ omap_modeset_disable_external_hpd(ddev);
drm_kms_helper_poll_fini(ddev);
omap_fbdev_fini(ddev);
err_cleanup_modeset:
drm_mode_config_cleanup(ddev);
omap_drm_irq_uninstall(ddev);
-err_free_drm_dev:
+err_gem_deinit:
omap_gem_deinit(ddev);
- drm_dev_unref(ddev);
-err_destroy_wq:
destroy_workqueue(priv->wq);
- omap_disconnect_dssdevs();
+ omap_disconnect_pipelines(ddev);
err_crtc_uninit:
- omap_crtc_pre_uninit();
+ omap_crtc_pre_uninit(priv);
+ drm_dev_unref(ddev);
return ret;
}
@@ -618,7 +626,7 @@ static void omapdrm_cleanup(struct omap_drm_private *priv)
drm_dev_unregister(ddev);
- omap_modeset_disable_external_hpd();
+ omap_modeset_disable_external_hpd(ddev);
drm_kms_helper_poll_fini(ddev);
omap_fbdev_fini(ddev);
@@ -630,12 +638,12 @@ static void omapdrm_cleanup(struct omap_drm_private *priv)
omap_drm_irq_uninstall(ddev);
omap_gem_deinit(ddev);
- drm_dev_unref(ddev);
-
destroy_workqueue(priv->wq);
- omap_disconnect_dssdevs();
- omap_crtc_pre_uninit();
+ omap_disconnect_pipelines(ddev);
+ omap_crtc_pre_uninit(priv);
+
+ drm_dev_unref(ddev);
}
static int pdev_probe(struct platform_device *pdev)
@@ -677,36 +685,36 @@ static int pdev_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM_SLEEP
-static int omap_drm_suspend_all_displays(void)
+static int omap_drm_suspend_all_displays(struct drm_device *ddev)
{
- struct omap_dss_device *dssdev = NULL;
+ struct omap_drm_private *priv = ddev->dev_private;
+ int i;
- for_each_dss_dev(dssdev) {
- if (!dssdev->driver)
- continue;
+ for (i = 0; i < priv->num_pipes; i++) {
+ struct omap_dss_device *display = priv->pipes[i].display;
- if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
- dssdev->driver->disable(dssdev);
- dssdev->activate_after_resume = true;
+ if (display->state == OMAP_DSS_DISPLAY_ACTIVE) {
+ display->ops->disable(display);
+ display->activate_after_resume = true;
} else {
- dssdev->activate_after_resume = false;
+ display->activate_after_resume = false;
}
}
return 0;
}
-static int omap_drm_resume_all_displays(void)
+static int omap_drm_resume_all_displays(struct drm_device *ddev)
{
- struct omap_dss_device *dssdev = NULL;
+ struct omap_drm_private *priv = ddev->dev_private;
+ int i;
- for_each_dss_dev(dssdev) {
- if (!dssdev->driver)
- continue;
+ for (i = 0; i < priv->num_pipes; i++) {
+ struct omap_dss_device *display = priv->pipes[i].display;
- if (dssdev->activate_after_resume) {
- dssdev->driver->enable(dssdev);
- dssdev->activate_after_resume = false;
+ if (display->activate_after_resume) {
+ display->ops->enable(display);
+ display->activate_after_resume = false;
}
}
@@ -721,7 +729,7 @@ static int omap_drm_suspend(struct device *dev)
drm_kms_helper_poll_disable(drm_dev);
drm_modeset_lock_all(drm_dev);
- omap_drm_suspend_all_displays();
+ omap_drm_suspend_all_displays(drm_dev);
drm_modeset_unlock_all(drm_dev);
return 0;
@@ -733,7 +741,7 @@ static int omap_drm_resume(struct device *dev)
struct drm_device *drm_dev = priv->ddev;
drm_modeset_lock_all(drm_dev);
- omap_drm_resume_all_displays();
+ omap_drm_resume_all_displays(drm_dev);
drm_modeset_unlock_all(drm_dev);
drm_kms_helper_poll_enable(drm_dev);
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index f27c8e216adf..bd7f2c227a25 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -45,6 +45,14 @@
struct omap_drm_usergart;
+struct omap_drm_pipeline {
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ struct omap_dss_device *output;
+ struct omap_dss_device *display;
+};
+
struct omap_drm_private {
struct drm_device *ddev;
struct device *dev;
@@ -54,18 +62,13 @@ struct omap_drm_private {
struct dispc_device *dispc;
const struct dispc_ops *dispc_ops;
- unsigned int num_crtcs;
- struct drm_crtc *crtcs[8];
+ unsigned int num_pipes;
+ struct omap_drm_pipeline pipes[8];
+ struct omap_drm_pipeline *channels[8];
unsigned int num_planes;
struct drm_plane *planes[8];
- unsigned int num_encoders;
- struct drm_encoder *encoders[8];
-
- unsigned int num_connectors;
- struct drm_connector *connectors[8];
-
struct drm_fb_helper *fbdev;
struct workqueue_struct *wq;
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index fcdf4b0a8eec..452e625f6ce3 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -36,16 +36,10 @@
*/
struct omap_encoder {
struct drm_encoder base;
- struct omap_dss_device *dssdev;
+ struct omap_dss_device *output;
+ struct omap_dss_device *display;
};
-struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder)
-{
- struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
-
- return omap_encoder->dssdev;
-}
-
static void omap_encoder_destroy(struct drm_encoder *encoder)
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
@@ -59,16 +53,65 @@ static const struct drm_encoder_funcs omap_encoder_funcs = {
};
static void omap_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- struct omap_dss_device *dssdev = omap_encoder->dssdev;
struct drm_connector *connector;
+ struct omap_dss_device *dssdev;
+ struct videomode vm = { 0 };
bool hdmi_mode;
int r;
+ drm_display_mode_to_videomode(adjusted_mode, &vm);
+
+ /*
+ * HACK: This fixes the vm flags.
+ * struct drm_display_mode does not contain the VSYNC/HSYNC/DE flags and
+ * they get lost when converting back and forth between struct
+ * drm_display_mode and struct videomode. The hack below goes and
+ * fetches the missing flags.
+ *
+ * A better solution is to use DRM's bus-flags through the whole driver.
+ */
+ for (dssdev = omap_encoder->output; dssdev; dssdev = dssdev->next) {
+ unsigned long bus_flags = dssdev->bus_flags;
+
+ if (!(vm.flags & (DISPLAY_FLAGS_DE_LOW |
+ DISPLAY_FLAGS_DE_HIGH))) {
+ if (bus_flags & DRM_BUS_FLAG_DE_LOW)
+ vm.flags |= DISPLAY_FLAGS_DE_LOW;
+ else if (bus_flags & DRM_BUS_FLAG_DE_HIGH)
+ vm.flags |= DISPLAY_FLAGS_DE_HIGH;
+ }
+
+ if (!(vm.flags & (DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_PIXDATA_NEGEDGE))) {
+ if (bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE)
+ vm.flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE;
+ else if (bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
+ vm.flags |= DISPLAY_FLAGS_PIXDATA_NEGEDGE;
+ }
+
+ if (!(vm.flags & (DISPLAY_FLAGS_SYNC_POSEDGE |
+ DISPLAY_FLAGS_SYNC_NEGEDGE))) {
+ if (bus_flags & DRM_BUS_FLAG_SYNC_POSEDGE)
+ vm.flags |= DISPLAY_FLAGS_SYNC_POSEDGE;
+ else if (bus_flags & DRM_BUS_FLAG_SYNC_NEGEDGE)
+ vm.flags |= DISPLAY_FLAGS_SYNC_NEGEDGE;
+ }
+ }
+
+ /* Set timings for all devices in the display pipeline. */
+ dss_mgr_set_timings(omap_encoder->output, &vm);
+
+ for (dssdev = omap_encoder->output; dssdev; dssdev = dssdev->next) {
+ if (dssdev->ops->set_timings)
+ dssdev->ops->set_timings(dssdev, &vm);
+ }
+
+ /* Set the HDMI mode and HDMI infoframe if applicable. */
hdmi_mode = false;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (connector->encoder == encoder) {
@@ -77,73 +120,36 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
}
}
- if (dssdev->driver->set_hdmi_mode)
- dssdev->driver->set_hdmi_mode(dssdev, hdmi_mode);
+ dssdev = omap_encoder->output;
+
+ if (dssdev->ops->hdmi.set_hdmi_mode)
+ dssdev->ops->hdmi.set_hdmi_mode(dssdev, hdmi_mode);
- if (hdmi_mode && dssdev->driver->set_hdmi_infoframe) {
+ if (hdmi_mode && dssdev->ops->hdmi.set_infoframe) {
struct hdmi_avi_infoframe avi;
r = drm_hdmi_avi_infoframe_from_display_mode(&avi, adjusted_mode,
false);
if (r == 0)
- dssdev->driver->set_hdmi_infoframe(dssdev, &avi);
+ dssdev->ops->hdmi.set_infoframe(dssdev, &avi);
}
}
static void omap_encoder_disable(struct drm_encoder *encoder)
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- struct omap_dss_device *dssdev = omap_encoder->dssdev;
- struct omap_dss_driver *dssdrv = dssdev->driver;
+ struct omap_dss_device *dssdev = omap_encoder->display;
- dssdrv->disable(dssdev);
-}
-
-static int omap_encoder_update(struct drm_encoder *encoder,
- enum omap_channel channel,
- struct videomode *vm)
-{
- struct drm_device *dev = encoder->dev;
- struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- struct omap_dss_device *dssdev = omap_encoder->dssdev;
- struct omap_dss_driver *dssdrv = dssdev->driver;
- int ret;
-
- if (dssdrv->check_timings) {
- ret = dssdrv->check_timings(dssdev, vm);
- } else {
- struct videomode t = {0};
-
- dssdrv->get_timings(dssdev, &t);
-
- if (memcmp(vm, &t, sizeof(*vm)))
- ret = -EINVAL;
- else
- ret = 0;
- }
-
- if (ret) {
- dev_err(dev->dev, "could not set timings: %d\n", ret);
- return ret;
- }
-
- if (dssdrv->set_timings)
- dssdrv->set_timings(dssdev, vm);
-
- return 0;
+ dssdev->ops->disable(dssdev);
}
static void omap_encoder_enable(struct drm_encoder *encoder)
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- struct omap_dss_device *dssdev = omap_encoder->dssdev;
- struct omap_dss_driver *dssdrv = dssdev->driver;
+ struct omap_dss_device *dssdev = omap_encoder->display;
int r;
- omap_encoder_update(encoder, omap_crtc_channel(encoder->crtc),
- omap_crtc_timings(encoder->crtc));
-
- r = dssdrv->enable(dssdev);
+ r = dssdev->ops->enable(dssdev);
if (r)
dev_err(encoder->dev->dev,
"Failed to enable display '%s': %d\n",
@@ -154,7 +160,36 @@ static int omap_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- return 0;
+ struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+ enum omap_channel channel = omap_encoder->output->dispc_channel;
+ struct drm_device *dev = encoder->dev;
+ struct omap_drm_private *priv = dev->dev_private;
+ struct omap_dss_device *dssdev;
+ struct videomode vm = { 0 };
+ int ret;
+
+ drm_display_mode_to_videomode(&crtc_state->mode, &vm);
+
+ ret = priv->dispc_ops->mgr_check_timings(priv->dispc, channel, &vm);
+ if (ret)
+ goto done;
+
+ for (dssdev = omap_encoder->output; dssdev; dssdev = dssdev->next) {
+ if (!dssdev->ops->check_timings)
+ continue;
+
+ ret = dssdev->ops->check_timings(dssdev, &vm);
+ if (ret)
+ goto done;
+ }
+
+ drm_display_mode_from_videomode(&vm, &crtc_state->adjusted_mode);
+
+done:
+ if (ret)
+ dev_err(dev->dev, "invalid timings: %d\n", ret);
+
+ return ret;
}
static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = {
@@ -166,7 +201,8 @@ static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = {
/* initialize encoder */
struct drm_encoder *omap_encoder_init(struct drm_device *dev,
- struct omap_dss_device *dssdev)
+ struct omap_dss_device *output,
+ struct omap_dss_device *display)
{
struct drm_encoder *encoder = NULL;
struct omap_encoder *omap_encoder;
@@ -175,7 +211,8 @@ struct drm_encoder *omap_encoder_init(struct drm_device *dev,
if (!omap_encoder)
goto fail;
- omap_encoder->dssdev = dssdev;
+ omap_encoder->output = output;
+ omap_encoder->display = display;
encoder = &omap_encoder->base;
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.h b/drivers/gpu/drm/omapdrm/omap_encoder.h
index d2f308bec494..a7b5dde63ecb 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.h
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.h
@@ -25,9 +25,7 @@ struct drm_encoder;
struct omap_dss_device;
struct drm_encoder *omap_encoder_init(struct drm_device *dev,
- struct omap_dss_device *dssdev);
-
-/* map crtc to vblank mask */
-struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder);
+ struct omap_dss_device *output,
+ struct omap_dss_device *display);
#endif /* __OMAPDRM_ENCODER_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index d958cc813a94..b445309b0143 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -243,7 +243,7 @@ void omap_fbdev_init(struct drm_device *dev)
struct drm_fb_helper *helper;
int ret = 0;
- if (!priv->num_crtcs || !priv->num_connectors)
+ if (!priv->num_pipes)
return;
fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
@@ -256,7 +256,7 @@ void omap_fbdev_init(struct drm_device *dev)
drm_fb_helper_prepare(dev, helper, &omap_fb_helper_funcs);
- ret = drm_fb_helper_init(dev, helper, priv->num_connectors);
+ ret = drm_fb_helper_init(dev, helper, priv->num_pipes);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c
index c85115049f86..329ad26d6d50 100644
--- a/drivers/gpu/drm/omapdrm/omap_irq.c
+++ b/drivers/gpu/drm/omapdrm/omap_irq.c
@@ -206,8 +206,8 @@ static irqreturn_t omap_irq_handler(int irq, void *arg)
VERB("irqs: %08x", irqstatus);
- for (id = 0; id < priv->num_crtcs; id++) {
- struct drm_crtc *crtc = priv->crtcs[id];
+ for (id = 0; id < priv->num_pipes; id++) {
+ struct drm_crtc *crtc = priv->pipes[id].crtc;
enum omap_channel channel = omap_crtc_channel(crtc);
if (irqstatus & priv->dispc_ops->mgr_get_vsync_irq(priv->dispc, channel)) {
diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c
index 8a1687887ae9..3f6550e6b6a4 100644
--- a/drivers/gpu/drm/panel/panel-lvds.c
+++ b/drivers/gpu/drm/panel/panel-lvds.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Generic LVDS panel driver
*
@@ -5,11 +6,6 @@
* Copyright (C) 2016 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart ([email protected])
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/backlight.h>
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 0570c6826bff..87d16a0ce01e 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -28,6 +28,7 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "qxl_drv.h"
#include "qxl_object.h"
@@ -37,7 +38,8 @@ static bool qxl_head_enabled(struct qxl_head *head)
return head->width && head->height;
}
-static void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count)
+static int qxl_alloc_client_monitors_config(struct qxl_device *qdev,
+ unsigned int count)
{
if (qdev->client_monitors_config &&
count > qdev->client_monitors_config->count) {
@@ -49,15 +51,17 @@ static void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned c
sizeof(struct qxl_monitors_config) +
sizeof(struct qxl_head) * count, GFP_KERNEL);
if (!qdev->client_monitors_config)
- return;
+ return -ENOMEM;
}
qdev->client_monitors_config->count = count;
+ return 0;
}
enum {
MONITORS_CONFIG_MODIFIED,
MONITORS_CONFIG_UNCHANGED,
MONITORS_CONFIG_BAD_CRC,
+ MONITORS_CONFIG_ERROR,
};
static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
@@ -87,7 +91,10 @@ static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
&& (num_monitors != qdev->client_monitors_config->count)) {
status = MONITORS_CONFIG_MODIFIED;
}
- qxl_alloc_client_monitors_config(qdev, num_monitors);
+ if (qxl_alloc_client_monitors_config(qdev, num_monitors)) {
+ status = MONITORS_CONFIG_ERROR;
+ return status;
+ }
/* we copy max from the client but it isn't used */
qdev->client_monitors_config->max_allowed =
qdev->monitors_config->max_allowed;
@@ -161,6 +168,10 @@ void qxl_display_read_client_monitors_config(struct qxl_device *qdev)
break;
udelay(5);
}
+ if (status == MONITORS_CONFIG_ERROR) {
+ DRM_DEBUG_KMS("ignoring client monitors config: error");
+ return;
+ }
if (status == MONITORS_CONFIG_BAD_CRC) {
DRM_DEBUG_KMS("ignoring client monitors config: bad crc");
return;
@@ -378,17 +389,6 @@ static const struct drm_crtc_funcs qxl_crtc_funcs = {
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
};
-void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb)
-{
- struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb);
- struct qxl_bo *bo = gem_to_qxl_bo(qxl_fb->obj);
-
- WARN_ON(bo->shadow);
- drm_gem_object_put_unlocked(qxl_fb->obj);
- drm_framebuffer_cleanup(fb);
- kfree(qxl_fb);
-}
-
static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb,
struct drm_file *file_priv,
unsigned flags, unsigned color,
@@ -396,15 +396,14 @@ static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb,
unsigned num_clips)
{
/* TODO: vmwgfx where this was cribbed from had locking. Why? */
- struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb);
- struct qxl_device *qdev = qxl_fb->base.dev->dev_private;
+ struct qxl_device *qdev = fb->dev->dev_private;
struct drm_clip_rect norect;
struct qxl_bo *qobj;
int inc = 1;
drm_modeset_lock_all(fb->dev);
- qobj = gem_to_qxl_bo(qxl_fb->obj);
+ qobj = gem_to_qxl_bo(fb->obj[0]);
/* if we aren't primary surface ignore this */
if (!qobj->is_primary) {
drm_modeset_unlock_all(fb->dev);
@@ -422,7 +421,7 @@ static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb,
inc = 2; /* skip source rects */
}
- qxl_draw_dirty_fb(qdev, qxl_fb, qobj, flags, color,
+ qxl_draw_dirty_fb(qdev, fb, qobj, flags, color,
clips, num_clips, inc);
drm_modeset_unlock_all(fb->dev);
@@ -431,31 +430,11 @@ static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb,
}
static const struct drm_framebuffer_funcs qxl_fb_funcs = {
- .destroy = qxl_user_framebuffer_destroy,
+ .destroy = drm_gem_fb_destroy,
.dirty = qxl_framebuffer_surface_dirty,
-/* TODO?
- * .create_handle = qxl_user_framebuffer_create_handle, */
+ .create_handle = drm_gem_fb_create_handle,
};
-int
-qxl_framebuffer_init(struct drm_device *dev,
- struct qxl_framebuffer *qfb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj,
- const struct drm_framebuffer_funcs *funcs)
-{
- int ret;
-
- qfb->obj = obj;
- drm_helper_mode_fill_fb_struct(dev, &qfb->base, mode_cmd);
- ret = drm_framebuffer_init(dev, &qfb->base, funcs);
- if (ret) {
- qfb->obj = NULL;
- return ret;
- }
- return 0;
-}
-
static void qxl_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
@@ -478,14 +457,12 @@ static int qxl_primary_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct qxl_device *qdev = plane->dev->dev_private;
- struct qxl_framebuffer *qfb;
struct qxl_bo *bo;
if (!state->crtc || !state->fb)
return 0;
- qfb = to_qxl_framebuffer(state->fb);
- bo = gem_to_qxl_bo(qfb->obj);
+ bo = gem_to_qxl_bo(state->fb->obj[0]);
if (bo->surf.stride * bo->surf.height > qdev->vram_size) {
DRM_ERROR("Mode doesn't fit in vram size (vgamem)");
@@ -546,23 +523,19 @@ static void qxl_primary_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct qxl_device *qdev = plane->dev->dev_private;
- struct qxl_framebuffer *qfb =
- to_qxl_framebuffer(plane->state->fb);
- struct qxl_framebuffer *qfb_old;
- struct qxl_bo *bo = gem_to_qxl_bo(qfb->obj);
+ struct qxl_bo *bo = gem_to_qxl_bo(plane->state->fb->obj[0]);
struct qxl_bo *bo_old;
struct drm_clip_rect norect = {
.x1 = 0,
.y1 = 0,
- .x2 = qfb->base.width,
- .y2 = qfb->base.height
+ .x2 = plane->state->fb->width,
+ .y2 = plane->state->fb->height
};
int ret;
bool same_shadow = false;
if (old_state->fb) {
- qfb_old = to_qxl_framebuffer(old_state->fb);
- bo_old = gem_to_qxl_bo(qfb_old->obj);
+ bo_old = gem_to_qxl_bo(old_state->fb->obj[0]);
} else {
bo_old = NULL;
}
@@ -592,7 +565,7 @@ static void qxl_primary_atomic_update(struct drm_plane *plane,
bo->is_primary = true;
}
- qxl_draw_dirty_fb(qdev, qfb, bo, 0, 0, &norect, 1, 1);
+ qxl_draw_dirty_fb(qdev, plane->state->fb, bo, 0, 0, &norect, 1, 1);
}
static void qxl_primary_atomic_disable(struct drm_plane *plane,
@@ -601,9 +574,7 @@ static void qxl_primary_atomic_disable(struct drm_plane *plane,
struct qxl_device *qdev = plane->dev->dev_private;
if (old_state->fb) {
- struct qxl_framebuffer *qfb =
- to_qxl_framebuffer(old_state->fb);
- struct qxl_bo *bo = gem_to_qxl_bo(qfb->obj);
+ struct qxl_bo *bo = gem_to_qxl_bo(old_state->fb->obj[0]);
if (bo->is_primary) {
qxl_io_destroy_primary(qdev);
@@ -635,7 +606,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
return;
if (fb != old_state->fb) {
- obj = to_qxl_framebuffer(fb)->obj;
+ obj = fb->obj[0];
user_bo = gem_to_qxl_bo(obj);
/* pinning is done in the prepare/cleanup framevbuffer */
@@ -755,13 +726,13 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane,
if (!new_state->fb)
return 0;
- obj = to_qxl_framebuffer(new_state->fb)->obj;
+ obj = new_state->fb->obj[0];
user_bo = gem_to_qxl_bo(obj);
if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
user_bo->is_dumb && !user_bo->shadow) {
if (plane->state->fb) {
- obj = to_qxl_framebuffer(plane->state->fb)->obj;
+ obj = plane->state->fb->obj[0];
old_bo = gem_to_qxl_bo(obj);
}
if (old_bo && old_bo->shadow &&
@@ -805,7 +776,7 @@ static void qxl_plane_cleanup_fb(struct drm_plane *plane,
return;
}
- obj = to_qxl_framebuffer(old_state->fb)->obj;
+ obj = old_state->fb->obj[0];
user_bo = gem_to_qxl_bo(obj);
qxl_bo_unpin(user_bo);
@@ -1105,26 +1076,8 @@ qxl_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- struct drm_gem_object *obj;
- struct qxl_framebuffer *qxl_fb;
- int ret;
-
- obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
- if (!obj)
- return NULL;
-
- qxl_fb = kzalloc(sizeof(*qxl_fb), GFP_KERNEL);
- if (qxl_fb == NULL)
- return NULL;
-
- ret = qxl_framebuffer_init(dev, qxl_fb, mode_cmd, obj, &qxl_fb_funcs);
- if (ret) {
- kfree(qxl_fb);
- drm_gem_object_put_unlocked(obj);
- return NULL;
- }
-
- return &qxl_fb->base;
+ return drm_gem_fb_create_with_funcs(dev, file_priv, mode_cmd,
+ &qxl_fb_funcs);
}
static const struct drm_mode_config_funcs qxl_mode_funcs = {
@@ -1211,7 +1164,6 @@ int qxl_modeset_init(struct qxl_device *qdev)
}
qxl_display_read_client_monitors_config(qdev);
- qdev->mode_info.mode_config_initialized = true;
drm_mode_config_reset(&qdev->ddev);
@@ -1227,8 +1179,5 @@ void qxl_modeset_fini(struct qxl_device *qdev)
qxl_fbdev_fini(qdev);
qxl_destroy_monitors_object(qdev);
- if (qdev->mode_info.mode_config_initialized) {
- drm_mode_config_cleanup(&qdev->ddev);
- qdev->mode_info.mode_config_initialized = false;
- }
+ drm_mode_config_cleanup(&qdev->ddev);
}
diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c
index 4d8681e84e68..cc5b32e749ce 100644
--- a/drivers/gpu/drm/qxl/qxl_draw.c
+++ b/drivers/gpu/drm/qxl/qxl_draw.c
@@ -262,7 +262,7 @@ out_free_drawable:
* by treating them differently in the server.
*/
void qxl_draw_dirty_fb(struct qxl_device *qdev,
- struct qxl_framebuffer *qxl_fb,
+ struct drm_framebuffer *fb,
struct qxl_bo *bo,
unsigned flags, unsigned color,
struct drm_clip_rect *clips,
@@ -281,9 +281,9 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
struct qxl_drawable *drawable;
struct qxl_rect drawable_rect;
struct qxl_rect *rects;
- int stride = qxl_fb->base.pitches[0];
+ int stride = fb->pitches[0];
/* depth is not actually interesting, we don't mask with it */
- int depth = qxl_fb->base.format->cpp[0] * 8;
+ int depth = fb->format->cpp[0] * 8;
uint8_t *surface_base;
struct qxl_release *release;
struct qxl_bo *clips_bo;
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 2445e75cf7ea..13c8a662f9b4 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -119,7 +119,7 @@ qxl_pci_remove(struct pci_dev *pdev)
dev->dev_private = NULL;
kfree(qdev);
- drm_dev_unref(dev);
+ drm_dev_put(dev);
}
static const struct file_operations qxl_fops = {
@@ -136,20 +136,11 @@ static int qxl_drm_freeze(struct drm_device *dev)
{
struct pci_dev *pdev = dev->pdev;
struct qxl_device *qdev = dev->dev_private;
- struct drm_crtc *crtc;
-
- drm_kms_helper_poll_disable(dev);
-
- console_lock();
- qxl_fbdev_set_suspend(qdev, 1);
- console_unlock();
+ int ret;
- /* unpin the front buffers */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- if (crtc->enabled)
- (*crtc_funcs->disable)(crtc);
- }
+ ret = drm_mode_config_helper_suspend(dev);
+ if (ret)
+ return ret;
qxl_destroy_monitors_object(qdev);
qxl_surf_evict(qdev);
@@ -175,14 +166,7 @@ static int qxl_drm_resume(struct drm_device *dev, bool thaw)
}
qxl_create_monitors_object(qdev);
- drm_helper_resume_force_mode(dev);
-
- console_lock();
- qxl_fbdev_set_suspend(qdev, 0);
- console_unlock();
-
- drm_kms_helper_poll_enable(dev);
- return 0;
+ return drm_mode_config_helper_resume(dev);
}
static int qxl_pm_suspend(struct device *dev)
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 01220d386b0a..8ff70a7281a7 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -38,6 +38,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
#include <drm/drmP.h>
#include <drm/ttm/ttm_bo_api.h>
@@ -121,15 +122,9 @@ struct qxl_output {
struct drm_encoder enc;
};
-struct qxl_framebuffer {
- struct drm_framebuffer base;
- struct drm_gem_object *obj;
-};
-
#define to_qxl_crtc(x) container_of(x, struct qxl_crtc, base)
#define drm_connector_to_qxl_output(x) container_of(x, struct qxl_output, base)
#define drm_encoder_to_qxl_output(x) container_of(x, struct qxl_output, enc)
-#define to_qxl_framebuffer(x) container_of(x, struct qxl_framebuffer, base)
struct qxl_mman {
struct ttm_bo_global_ref bo_global_ref;
@@ -138,13 +133,6 @@ struct qxl_mman {
struct ttm_bo_device bdev;
};
-struct qxl_mode_info {
- bool mode_config_initialized;
-
- /* pointer to fbdev info structure */
- struct qxl_fbdev *qfbdev;
-};
-
struct qxl_memslot {
uint8_t generation;
@@ -232,10 +220,9 @@ struct qxl_device {
void *ram;
struct qxl_mman mman;
struct qxl_gem gem;
- struct qxl_mode_info mode_info;
- struct fb_info *fbdev_info;
- struct qxl_framebuffer *fbdev_qfb;
+ struct drm_fb_helper fb_helper;
+
void *ram_physical;
struct qxl_ring *release_ring;
@@ -349,19 +336,8 @@ qxl_bo_physical_address(struct qxl_device *qdev, struct qxl_bo *bo,
int qxl_fbdev_init(struct qxl_device *qdev);
void qxl_fbdev_fini(struct qxl_device *qdev);
-int qxl_get_handle_for_primary_fb(struct qxl_device *qdev,
- struct drm_file *file_priv,
- uint32_t *handle);
-void qxl_fbdev_set_suspend(struct qxl_device *qdev, int state);
/* qxl_display.c */
-void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb);
-int
-qxl_framebuffer_init(struct drm_device *dev,
- struct qxl_framebuffer *rfb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj,
- const struct drm_framebuffer_funcs *funcs);
void qxl_display_read_client_monitors_config(struct qxl_device *qdev);
int qxl_create_monitors_object(struct qxl_device *qdev);
int qxl_destroy_monitors_object(struct qxl_device *qdev);
@@ -471,7 +447,7 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
int stride /* filled in if 0 */);
void qxl_draw_dirty_fb(struct qxl_device *qdev,
- struct qxl_framebuffer *qxl_fb,
+ struct drm_framebuffer *fb,
struct qxl_bo *bo,
unsigned flags, unsigned color,
struct drm_clip_rect *clips,
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index ca465c0d49fa..2294b7f14fdf 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -30,24 +30,12 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "qxl_drv.h"
#include "qxl_object.h"
-#define QXL_DIRTY_DELAY (HZ / 30)
-
-struct qxl_fbdev {
- struct drm_fb_helper helper;
- struct qxl_framebuffer qfb;
- struct qxl_device *qdev;
-
- spinlock_t delayed_ops_lock;
- struct list_head delayed_ops;
- void *shadow;
- int size;
-};
-
static void qxl_fb_image_init(struct qxl_fb_image *qxl_fb_image,
struct qxl_device *qdev, struct fb_info *info,
const struct fb_image *image)
@@ -73,13 +61,6 @@ static void qxl_fb_image_init(struct qxl_fb_image *qxl_fb_image,
}
}
-#ifdef CONFIG_DRM_FBDEV_EMULATION
-static struct fb_deferred_io qxl_defio = {
- .delay = QXL_DIRTY_DELAY,
- .deferred_io = drm_fb_helper_deferred_io,
-};
-#endif
-
static struct fb_ops qxlfb_ops = {
.owner = THIS_MODULE,
DRM_FB_HELPER_DEFAULT_OPS,
@@ -98,26 +79,10 @@ static void qxlfb_destroy_pinned_object(struct drm_gem_object *gobj)
drm_gem_object_put_unlocked(gobj);
}
-int qxl_get_handle_for_primary_fb(struct qxl_device *qdev,
- struct drm_file *file_priv,
- uint32_t *handle)
-{
- int r;
- struct drm_gem_object *gobj = qdev->fbdev_qfb->obj;
-
- BUG_ON(!gobj);
- /* drm_get_handle_create adds a reference - good */
- r = drm_gem_handle_create(file_priv, gobj, handle);
- if (r)
- return r;
- return 0;
-}
-
-static int qxlfb_create_pinned_object(struct qxl_fbdev *qfbdev,
+static int qxlfb_create_pinned_object(struct qxl_device *qdev,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object **gobj_p)
{
- struct qxl_device *qdev = qfbdev->qdev;
struct drm_gem_object *gobj = NULL;
struct qxl_bo *qbo = NULL;
int ret;
@@ -174,13 +139,12 @@ static int qxlfb_framebuffer_dirty(struct drm_framebuffer *fb,
unsigned num_clips)
{
struct qxl_device *qdev = fb->dev->dev_private;
- struct fb_info *info = qdev->fbdev_info;
- struct qxl_fbdev *qfbdev = info->par;
+ struct fb_info *info = qdev->fb_helper.fbdev;
struct qxl_fb_image qxl_fb_image;
struct fb_image *image = &qxl_fb_image.fb_image;
/* TODO: hard coding 32 bpp */
- int stride = qfbdev->qfb.base.pitches[0];
+ int stride = fb->pitches[0];
/*
* we are using a shadow draw buffer, at qdev->surface0_shadow
@@ -199,7 +163,7 @@ static int qxlfb_framebuffer_dirty(struct drm_framebuffer *fb,
image->cmap.green = NULL;
image->cmap.blue = NULL;
image->cmap.transp = NULL;
- image->data = qfbdev->shadow + (clips->x1 * 4) + (stride * clips->y1);
+ image->data = info->screen_base + (clips->x1 * 4) + (stride * clips->y1);
qxl_fb_image_init(&qxl_fb_image, qdev, info, NULL);
qxl_draw_opaque_fb(&qxl_fb_image, stride);
@@ -208,21 +172,22 @@ static int qxlfb_framebuffer_dirty(struct drm_framebuffer *fb,
}
static const struct drm_framebuffer_funcs qxlfb_fb_funcs = {
- .destroy = qxl_user_framebuffer_destroy,
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
.dirty = qxlfb_framebuffer_dirty,
};
-static int qxlfb_create(struct qxl_fbdev *qfbdev,
+static int qxlfb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
- struct qxl_device *qdev = qfbdev->qdev;
+ struct qxl_device *qdev =
+ container_of(helper, struct qxl_device, fb_helper);
struct fb_info *info;
struct drm_framebuffer *fb = NULL;
struct drm_mode_fb_cmd2 mode_cmd;
struct drm_gem_object *gobj = NULL;
struct qxl_bo *qbo = NULL;
int ret;
- int size;
int bpp = sizes->surface_bpp;
int depth = sizes->surface_depth;
void *shadow;
@@ -233,7 +198,7 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 1) / 8), 64);
mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
- ret = qxlfb_create_pinned_object(qfbdev, &mode_cmd, &gobj);
+ ret = qxlfb_create_pinned_object(qdev, &mode_cmd, &gobj);
if (ret < 0)
return ret;
@@ -247,25 +212,26 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
DRM_DEBUG_DRIVER("surface0 at gpu offset %lld, mmap_offset %lld (virt %p, shadow %p)\n",
qxl_bo_gpu_offset(qbo), qxl_bo_mmap_offset(qbo),
qbo->kptr, shadow);
- size = mode_cmd.pitches[0] * mode_cmd.height;
- info = drm_fb_helper_alloc_fbi(&qfbdev->helper);
+ info = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(info)) {
ret = PTR_ERR(info);
goto out_unref;
}
- info->par = qfbdev;
-
- qxl_framebuffer_init(&qdev->ddev, &qfbdev->qfb, &mode_cmd, gobj,
- &qxlfb_fb_funcs);
+ info->par = helper;
- fb = &qfbdev->qfb.base;
+ fb = drm_gem_fbdev_fb_create(&qdev->ddev, sizes, 64, gobj,
+ &qxlfb_fb_funcs);
+ if (IS_ERR(fb)) {
+ DRM_ERROR("Failed to create framebuffer: %ld\n", PTR_ERR(fb));
+ ret = PTR_ERR(fb);
+ goto out_unref;
+ }
/* setup helper with fb data */
- qfbdev->helper.fb = fb;
+ qdev->fb_helper.fb = fb;
- qfbdev->shadow = shadow;
strcpy(info->fix.id, "qxldrmfb");
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
@@ -278,10 +244,10 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
*/
info->fix.smem_start = qdev->vram_base; /* TODO - correct? */
info->fix.smem_len = gobj->size;
- info->screen_base = qfbdev->shadow;
+ info->screen_base = shadow;
info->screen_size = gobj->size;
- drm_fb_helper_fill_var(info, &qfbdev->helper, sizes->fb_width,
+ drm_fb_helper_fill_var(info, &qdev->fb_helper, sizes->fb_width,
sizes->fb_height);
/* setup aperture base/size for vesafb takeover */
@@ -296,13 +262,9 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
goto out_unref;
}
-#ifdef CONFIG_DRM_FBDEV_EMULATION
- info->fbdefio = &qxl_defio;
- fb_deferred_io_init(info);
-#endif
+ /* XXX error handling. */
+ drm_fb_helper_defio_init(helper);
- qdev->fbdev_info = info;
- qdev->fbdev_qfb = &qfbdev->qfb;
DRM_INFO("fb mappable at 0x%lX, size %lu\n", info->fix.smem_start, (unsigned long)info->screen_size);
DRM_INFO("fb: depth %d, pitch %d, width %d, height %d\n",
fb->format->depth, fb->pitches[0], fb->width, fb->height);
@@ -313,119 +275,26 @@ out_unref:
qxl_bo_kunmap(qbo);
qxl_bo_unpin(qbo);
}
- if (fb && ret) {
- drm_gem_object_put_unlocked(gobj);
- drm_framebuffer_cleanup(fb);
- kfree(fb);
- }
drm_gem_object_put_unlocked(gobj);
return ret;
}
-static int qxl_fb_find_or_create_single(
- struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct qxl_fbdev *qfbdev =
- container_of(helper, struct qxl_fbdev, helper);
- int new_fb = 0;
- int ret;
-
- if (!helper->fb) {
- ret = qxlfb_create(qfbdev, sizes);
- if (ret)
- return ret;
- new_fb = 1;
- }
- return new_fb;
-}
-
-static int qxl_fbdev_destroy(struct drm_device *dev, struct qxl_fbdev *qfbdev)
-{
- struct qxl_framebuffer *qfb = &qfbdev->qfb;
-
- drm_fb_helper_unregister_fbi(&qfbdev->helper);
-
- if (qfb->obj) {
- qxlfb_destroy_pinned_object(qfb->obj);
- qfb->obj = NULL;
- }
- drm_fb_helper_fini(&qfbdev->helper);
- vfree(qfbdev->shadow);
- drm_framebuffer_cleanup(&qfb->base);
-
- return 0;
-}
-
static const struct drm_fb_helper_funcs qxl_fb_helper_funcs = {
- .fb_probe = qxl_fb_find_or_create_single,
+ .fb_probe = qxlfb_create,
};
int qxl_fbdev_init(struct qxl_device *qdev)
{
- int ret = 0;
-
-#ifdef CONFIG_DRM_FBDEV_EMULATION
- struct qxl_fbdev *qfbdev;
- int bpp_sel = 32; /* TODO: parameter from somewhere? */
-
- qfbdev = kzalloc(sizeof(struct qxl_fbdev), GFP_KERNEL);
- if (!qfbdev)
- return -ENOMEM;
-
- qfbdev->qdev = qdev;
- qdev->mode_info.qfbdev = qfbdev;
- spin_lock_init(&qfbdev->delayed_ops_lock);
- INIT_LIST_HEAD(&qfbdev->delayed_ops);
-
- drm_fb_helper_prepare(&qdev->ddev, &qfbdev->helper,
- &qxl_fb_helper_funcs);
-
- ret = drm_fb_helper_init(&qdev->ddev, &qfbdev->helper,
- QXLFB_CONN_LIMIT);
- if (ret)
- goto free;
-
- ret = drm_fb_helper_single_add_all_connectors(&qfbdev->helper);
- if (ret)
- goto fini;
-
- ret = drm_fb_helper_initial_config(&qfbdev->helper, bpp_sel);
- if (ret)
- goto fini;
-
- return 0;
-
-fini:
- drm_fb_helper_fini(&qfbdev->helper);
-free:
- kfree(qfbdev);
-#endif
-
- return ret;
+ return drm_fb_helper_fbdev_setup(&qdev->ddev, &qdev->fb_helper,
+ &qxl_fb_helper_funcs, 32,
+ QXLFB_CONN_LIMIT);
}
void qxl_fbdev_fini(struct qxl_device *qdev)
{
- if (!qdev->mode_info.qfbdev)
- return;
+ struct fb_info *fbi = qdev->fb_helper.fbdev;
+ void *shadow = fbi ? fbi->screen_buffer : NULL;
- qxl_fbdev_destroy(&qdev->ddev, qdev->mode_info.qfbdev);
- kfree(qdev->mode_info.qfbdev);
- qdev->mode_info.qfbdev = NULL;
-}
-
-void qxl_fbdev_set_suspend(struct qxl_device *qdev, int state)
-{
- if (!qdev->mode_info.qfbdev)
- return;
-
- drm_fb_helper_set_suspend(&qdev->mode_info.qfbdev->helper, state);
-}
-
-bool qxl_fbdev_qobj_is_fb(struct qxl_device *qdev, struct qxl_bo *qobj)
-{
- if (qobj == gem_to_qxl_bo(qdev->mode_info.qfbdev->qfb.obj))
- return true;
- return false;
+ drm_fb_helper_fbdev_teardown(&qdev->ddev);
+ vfree(shadow);
}
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
index f5c1e7872e92..89606c819d82 100644
--- a/drivers/gpu/drm/qxl/qxl_gem.c
+++ b/drivers/gpu/drm/qxl/qxl_gem.c
@@ -40,7 +40,7 @@ void qxl_gem_object_free(struct drm_gem_object *gobj)
qxl_surface_evict(qdev, qobj, false);
tbo = &qobj->tbo;
- ttm_bo_unref(&tbo);
+ ttm_bo_put(tbo);
}
int qxl_gem_object_create(struct qxl_device *qdev, int size,
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index 771250aed78d..e25c589d5f50 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -102,8 +102,10 @@ int qxl_device_init(struct qxl_device *qdev,
int r, sb;
r = drm_dev_init(&qdev->ddev, drv, &pdev->dev);
- if (r)
- return r;
+ if (r) {
+ pr_err("Unable to init drm dev");
+ goto error;
+ }
qdev->ddev.pdev = pdev;
pci_set_drvdata(pdev, &qdev->ddev);
@@ -121,6 +123,11 @@ int qxl_device_init(struct qxl_device *qdev,
qdev->io_base = pci_resource_start(pdev, 3);
qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0));
+ if (!qdev->vram_mapping) {
+ pr_err("Unable to create vram_mapping");
+ r = -ENOMEM;
+ goto error;
+ }
if (pci_resource_len(pdev, 4) > 0) {
/* 64bit surface bar present */
@@ -139,6 +146,11 @@ int qxl_device_init(struct qxl_device *qdev,
qdev->surface_mapping =
io_mapping_create_wc(qdev->surfaceram_base,
qdev->surfaceram_size);
+ if (!qdev->surface_mapping) {
+ pr_err("Unable to create surface_mapping");
+ r = -ENOMEM;
+ goto vram_mapping_free;
+ }
}
DRM_DEBUG_KMS("qxl: vram %llx-%llx(%dM %dk), surface %llx-%llx(%dM %dk, %s)\n",
@@ -155,20 +167,29 @@ int qxl_device_init(struct qxl_device *qdev,
qdev->rom = ioremap(qdev->rom_base, qdev->rom_size);
if (!qdev->rom) {
pr_err("Unable to ioremap ROM\n");
- return -ENOMEM;
+ r = -ENOMEM;
+ goto surface_mapping_free;
}
- qxl_check_device(qdev);
+ if (!qxl_check_device(qdev)) {
+ r = -ENODEV;
+ goto surface_mapping_free;
+ }
r = qxl_bo_init(qdev);
if (r) {
DRM_ERROR("bo init failed %d\n", r);
- return r;
+ goto rom_unmap;
}
qdev->ram_header = ioremap(qdev->vram_base +
qdev->rom->ram_header_offset,
sizeof(*qdev->ram_header));
+ if (!qdev->ram_header) {
+ DRM_ERROR("Unable to ioremap RAM header\n");
+ r = -ENOMEM;
+ goto bo_fini;
+ }
qdev->command_ring = qxl_ring_create(&(qdev->ram_header->cmd_ring_hdr),
sizeof(struct qxl_command),
@@ -176,6 +197,11 @@ int qxl_device_init(struct qxl_device *qdev,
qdev->io_base + QXL_IO_NOTIFY_CMD,
false,
&qdev->display_event);
+ if (!qdev->command_ring) {
+ DRM_ERROR("Unable to create command ring\n");
+ r = -ENOMEM;
+ goto ram_header_unmap;
+ }
qdev->cursor_ring = qxl_ring_create(
&(qdev->ram_header->cursor_ring_hdr),
@@ -185,12 +211,23 @@ int qxl_device_init(struct qxl_device *qdev,
false,
&qdev->cursor_event);
+ if (!qdev->cursor_ring) {
+ DRM_ERROR("Unable to create cursor ring\n");
+ r = -ENOMEM;
+ goto command_ring_free;
+ }
+
qdev->release_ring = qxl_ring_create(
&(qdev->ram_header->release_ring_hdr),
sizeof(uint64_t),
QXL_RELEASE_RING_SIZE, 0, true,
NULL);
+ if (!qdev->release_ring) {
+ DRM_ERROR("Unable to create release ring\n");
+ r = -ENOMEM;
+ goto cursor_ring_free;
+ }
/* TODO - slot initialization should happen on reset. where is our
* reset handler? */
qdev->n_mem_slots = qdev->rom->slots_end;
@@ -203,6 +240,12 @@ int qxl_device_init(struct qxl_device *qdev,
kmalloc_array(qdev->n_mem_slots, sizeof(struct qxl_memslot),
GFP_KERNEL);
+ if (!qdev->mem_slots) {
+ DRM_ERROR("Unable to alloc mem slots\n");
+ r = -ENOMEM;
+ goto release_ring_free;
+ }
+
idr_init(&qdev->release_idr);
spin_lock_init(&qdev->release_idr_lock);
spin_lock_init(&qdev->release_lock);
@@ -218,8 +261,10 @@ int qxl_device_init(struct qxl_device *qdev,
/* must initialize irq before first async io - slot creation */
r = qxl_irq_init(qdev);
- if (r)
- return r;
+ if (r) {
+ DRM_ERROR("Unable to init qxl irq\n");
+ goto mem_slots_free;
+ }
/*
* Note that virtual is surface0. We rely on the single ioremap done
@@ -243,6 +288,27 @@ int qxl_device_init(struct qxl_device *qdev,
INIT_WORK(&qdev->gc_work, qxl_gc_work);
return 0;
+
+mem_slots_free:
+ kfree(qdev->mem_slots);
+release_ring_free:
+ qxl_ring_free(qdev->release_ring);
+cursor_ring_free:
+ qxl_ring_free(qdev->cursor_ring);
+command_ring_free:
+ qxl_ring_free(qdev->command_ring);
+ram_header_unmap:
+ iounmap(qdev->ram_header);
+bo_fini:
+ qxl_bo_fini(qdev);
+rom_unmap:
+ iounmap(qdev->rom);
+surface_mapping_free:
+ io_mapping_free(qdev->surface_mapping);
+vram_mapping_free:
+ io_mapping_free(qdev->vram_mapping);
+error:
+ return r;
}
void qxl_device_fini(struct qxl_device *qdev)
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 2a7977a23b31..99c63eeb2866 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -316,27 +316,6 @@ static struct drm_driver kms_driver;
bool radeon_device_is_virtual(void);
-static int radeon_kick_out_firmware_fb(struct pci_dev *pdev)
-{
- struct apertures_struct *ap;
- bool primary = false;
-
- ap = alloc_apertures(1);
- if (!ap)
- return -ENOMEM;
-
- ap->ranges[0].base = pci_resource_start(pdev, 0);
- ap->ranges[0].size = pci_resource_len(pdev, 0);
-
-#ifdef CONFIG_X86
- primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
-#endif
- drm_fb_helper_remove_conflicting_framebuffers(ap, "radeondrmfb", primary);
- kfree(ap);
-
- return 0;
-}
-
static int radeon_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -346,7 +325,7 @@ static int radeon_pci_probe(struct pci_dev *pdev,
return -EPROBE_DEFER;
/* Get rid of things like offb */
- ret = radeon_kick_out_firmware_fb(pdev);
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "radeondrmfb");
if (ret)
return ret;
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index edde8d4b87a3..225141656e19 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config DRM_RCAR_DU
tristate "DRM Support for R-Car Display Unit"
depends on DRM && OF
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 15dc9caa128b..6288b9ad9e24 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* rcar_du_crtc.c -- R-Car Display Unit CRTCs
*
* Copyright (C) 2013-2015 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart ([email protected])
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/clk.h>
@@ -198,6 +194,47 @@ done:
best_diff);
}
+struct du_clk_params {
+ struct clk *clk;
+ unsigned long rate;
+ unsigned long diff;
+ u32 escr;
+};
+
+static void rcar_du_escr_divider(struct clk *clk, unsigned long target,
+ u32 escr, struct du_clk_params *params)
+{
+ unsigned long rate;
+ unsigned long diff;
+ u32 div;
+
+ /*
+ * If the target rate has already been achieved perfectly we can't do
+ * better.
+ */
+ if (params->diff == 0)
+ return;
+
+ /*
+ * Compute the input clock rate and internal divisor values to obtain
+ * the clock rate closest to the target frequency.
+ */
+ rate = clk_round_rate(clk, target);
+ div = clamp(DIV_ROUND_CLOSEST(rate, target), 1UL, 64UL) - 1;
+ diff = abs(rate / (div + 1) - target);
+
+ /*
+ * Store the parameters if the resulting frequency is better than any
+ * previously calculated value.
+ */
+ if (diff < params->diff) {
+ params->clk = clk;
+ params->rate = rate;
+ params->diff = diff;
+ params->escr = escr | div;
+ }
+}
+
static const struct soc_device_attribute rcar_du_r8a7795_es1[] = {
{ .soc_id = "r8a7795", .revision = "ES1.*" },
{ /* sentinel */ }
@@ -208,89 +245,83 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
const struct drm_display_mode *mode = &rcrtc->crtc.state->adjusted_mode;
struct rcar_du_device *rcdu = rcrtc->group->dev;
unsigned long mode_clock = mode->clock * 1000;
- unsigned long clk;
- u32 value;
+ u32 dsmr;
u32 escr;
- u32 div;
-
- /*
- * Compute the clock divisor and select the internal or external dot
- * clock based on the requested frequency.
- */
- clk = clk_get_rate(rcrtc->clock);
- div = DIV_ROUND_CLOSEST(clk, mode_clock);
- div = clamp(div, 1U, 64U) - 1;
- escr = div | ESCR_DCLKSEL_CLKS;
- if (rcrtc->extclock) {
+ if (rcdu->info->dpll_mask & (1 << rcrtc->index)) {
+ unsigned long target = mode_clock;
struct dpll_info dpll = { 0 };
unsigned long extclk;
- unsigned long extrate;
- unsigned long rate;
- u32 extdiv;
+ u32 dpllcr;
+ u32 div = 0;
- extclk = clk_get_rate(rcrtc->extclock);
- if (rcdu->info->dpll_ch & (1 << rcrtc->index)) {
- unsigned long target = mode_clock;
+ /*
+ * DU channels that have a display PLL can't use the internal
+ * system clock, and have no internal clock divider.
+ */
- /*
- * The H3 ES1.x exhibits dot clock duty cycle stability
- * issues. We can work around them by configuring the
- * DPLL to twice the desired frequency, coupled with a
- * /2 post-divider. This isn't needed on other SoCs and
- * breaks HDMI output on M3-W for a currently unknown
- * reason, so restrict the workaround to H3 ES1.x.
- */
- if (soc_device_match(rcar_du_r8a7795_es1))
- target *= 2;
+ if (WARN_ON(!rcrtc->extclock))
+ return;
- rcar_du_dpll_divider(rcrtc, &dpll, extclk, target);
- extclk = dpll.output;
+ /*
+ * The H3 ES1.x exhibits dot clock duty cycle stability issues.
+ * We can work around them by configuring the DPLL to twice the
+ * desired frequency, coupled with a /2 post-divider. Restrict
+ * the workaround to H3 ES1.x as ES2.0 and all other SoCs have
+ * no post-divider when a display PLL is present (as shown by
+ * the workaround breaking HDMI output on M3-W during testing).
+ */
+ if (soc_device_match(rcar_du_r8a7795_es1)) {
+ target *= 2;
+ div = 1;
}
- extdiv = DIV_ROUND_CLOSEST(extclk, mode_clock);
- extdiv = clamp(extdiv, 1U, 64U) - 1;
+ extclk = clk_get_rate(rcrtc->extclock);
+ rcar_du_dpll_divider(rcrtc, &dpll, extclk, target);
- rate = clk / (div + 1);
- extrate = extclk / (extdiv + 1);
+ dpllcr = DPLLCR_CODE | DPLLCR_CLKE
+ | DPLLCR_FDPLL(dpll.fdpll)
+ | DPLLCR_N(dpll.n) | DPLLCR_M(dpll.m)
+ | DPLLCR_STBY;
- if (abs((long)extrate - (long)mode_clock) <
- abs((long)rate - (long)mode_clock)) {
+ if (rcrtc->index == 1)
+ dpllcr |= DPLLCR_PLCS1
+ | DPLLCR_INCS_DOTCLKIN1;
+ else
+ dpllcr |= DPLLCR_PLCS0
+ | DPLLCR_INCS_DOTCLKIN0;
- if (rcdu->info->dpll_ch & (1 << rcrtc->index)) {
- u32 dpllcr = DPLLCR_CODE | DPLLCR_CLKE
- | DPLLCR_FDPLL(dpll.fdpll)
- | DPLLCR_N(dpll.n) | DPLLCR_M(dpll.m)
- | DPLLCR_STBY;
+ rcar_du_group_write(rcrtc->group, DPLLCR, dpllcr);
- if (rcrtc->index == 1)
- dpllcr |= DPLLCR_PLCS1
- | DPLLCR_INCS_DOTCLKIN1;
- else
- dpllcr |= DPLLCR_PLCS0
- | DPLLCR_INCS_DOTCLKIN0;
+ escr = ESCR_DCLKSEL_DCLKIN | div;
+ } else {
+ struct du_clk_params params = { .diff = (unsigned long)-1 };
- rcar_du_group_write(rcrtc->group, DPLLCR,
- dpllcr);
- }
+ rcar_du_escr_divider(rcrtc->clock, mode_clock,
+ ESCR_DCLKSEL_CLKS, &params);
+ if (rcrtc->extclock)
+ rcar_du_escr_divider(rcrtc->extclock, mode_clock,
+ ESCR_DCLKSEL_DCLKIN, &params);
- escr = ESCR_DCLKSEL_DCLKIN | extdiv;
- }
+ dev_dbg(rcrtc->group->dev->dev, "mode clock %lu %s rate %lu\n",
+ mode_clock, params.clk == rcrtc->clock ? "cpg" : "ext",
+ params.rate);
- dev_dbg(rcrtc->group->dev->dev,
- "mode clock %lu extrate %lu rate %lu ESCR 0x%08x\n",
- mode_clock, extrate, rate, escr);
+ clk_set_rate(params.clk, params.rate);
+ escr = params.escr;
}
- rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? ESCR2 : ESCR,
- escr);
- rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? OTAR2 : OTAR, 0);
+ dev_dbg(rcrtc->group->dev->dev, "%s: ESCR 0x%08x\n", __func__, escr);
+
+ rcar_du_crtc_write(rcrtc, rcrtc->index % 2 ? ESCR13 : ESCR02, escr);
+ rcar_du_crtc_write(rcrtc, rcrtc->index % 2 ? OTAR13 : OTAR02, 0);
/* Signal polarities */
- value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? DSMR_VSL : 0)
- | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? DSMR_HSL : 0)
- | DSMR_DIPM_DISP | DSMR_CSPM;
- rcar_du_crtc_write(rcrtc, DSMR, value);
+ dsmr = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? DSMR_VSL : 0)
+ | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? DSMR_HSL : 0)
+ | ((mode->flags & DRM_MODE_FLAG_INTERLACE) ? DSMR_ODEV : 0)
+ | DSMR_DIPM_DISP | DSMR_CSPM;
+ rcar_du_crtc_write(rcrtc, DSMR, dsmr);
/* Display timings */
rcar_du_crtc_write(rcrtc, HDSR, mode->htotal - mode->hsync_start - 19);
@@ -684,13 +715,86 @@ static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc,
rcar_du_vsp_atomic_flush(rcrtc);
}
+enum drm_mode_status rcar_du_crtc_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
+ struct rcar_du_device *rcdu = rcrtc->group->dev;
+ bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
+
+ if (interlaced && !rcar_du_has(rcdu, RCAR_DU_FEATURE_INTERLACED))
+ return MODE_NO_INTERLACE;
+
+ return MODE_OK;
+}
+
static const struct drm_crtc_helper_funcs crtc_helper_funcs = {
.atomic_begin = rcar_du_crtc_atomic_begin,
.atomic_flush = rcar_du_crtc_atomic_flush,
.atomic_enable = rcar_du_crtc_atomic_enable,
.atomic_disable = rcar_du_crtc_atomic_disable,
+ .mode_valid = rcar_du_crtc_mode_valid,
};
+static void rcar_du_crtc_crc_init(struct rcar_du_crtc *rcrtc)
+{
+ struct rcar_du_device *rcdu = rcrtc->group->dev;
+ const char **sources;
+ unsigned int count;
+ int i = -1;
+
+ /* CRC available only on Gen3 HW. */
+ if (rcdu->info->gen < 3)
+ return;
+
+ /* Reserve 1 for "auto" source. */
+ count = rcrtc->vsp->num_planes + 1;
+
+ sources = kmalloc_array(count, sizeof(*sources), GFP_KERNEL);
+ if (!sources)
+ return;
+
+ sources[0] = kstrdup("auto", GFP_KERNEL);
+ if (!sources[0])
+ goto error;
+
+ for (i = 0; i < rcrtc->vsp->num_planes; ++i) {
+ struct drm_plane *plane = &rcrtc->vsp->planes[i].plane;
+ char name[16];
+
+ sprintf(name, "plane%u", plane->base.id);
+ sources[i + 1] = kstrdup(name, GFP_KERNEL);
+ if (!sources[i + 1])
+ goto error;
+ }
+
+ rcrtc->sources = sources;
+ rcrtc->sources_count = count;
+ return;
+
+error:
+ while (i >= 0) {
+ kfree(sources[i]);
+ i--;
+ }
+ kfree(sources);
+}
+
+static void rcar_du_crtc_crc_cleanup(struct rcar_du_crtc *rcrtc)
+{
+ unsigned int i;
+
+ if (!rcrtc->sources)
+ return;
+
+ for (i = 0; i < rcrtc->sources_count; i++)
+ kfree(rcrtc->sources[i]);
+ kfree(rcrtc->sources);
+
+ rcrtc->sources = NULL;
+ rcrtc->sources_count = 0;
+}
+
static struct drm_crtc_state *
rcar_du_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
{
@@ -717,6 +821,15 @@ static void rcar_du_crtc_atomic_destroy_state(struct drm_crtc *crtc,
kfree(to_rcar_crtc_state(state));
}
+static void rcar_du_crtc_cleanup(struct drm_crtc *crtc)
+{
+ struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
+
+ rcar_du_crtc_crc_cleanup(rcrtc);
+
+ return drm_crtc_cleanup(crtc);
+}
+
static void rcar_du_crtc_reset(struct drm_crtc *crtc)
{
struct rcar_du_crtc_state *state;
@@ -756,17 +869,11 @@ static void rcar_du_crtc_disable_vblank(struct drm_crtc *crtc)
rcrtc->vblank_enable = false;
}
-static int rcar_du_crtc_set_crc_source(struct drm_crtc *crtc,
- const char *source_name,
- size_t *values_cnt)
+static int rcar_du_crtc_parse_crc_source(struct rcar_du_crtc *rcrtc,
+ const char *source_name,
+ enum vsp1_du_crc_source *source)
{
- struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
- struct drm_modeset_acquire_ctx ctx;
- struct drm_crtc_state *crtc_state;
- struct drm_atomic_state *state;
- enum vsp1_du_crc_source source;
- unsigned int index = 0;
- unsigned int i;
+ unsigned int index;
int ret;
/*
@@ -774,31 +881,72 @@ static int rcar_du_crtc_set_crc_source(struct drm_crtc *crtc,
* CRC on an input plane (%u is the plane ID), and "auto" to compute the
* CRC on the composer (VSP) output.
*/
+
if (!source_name) {
- source = VSP1_DU_CRC_NONE;
+ *source = VSP1_DU_CRC_NONE;
+ return 0;
} else if (!strcmp(source_name, "auto")) {
- source = VSP1_DU_CRC_OUTPUT;
+ *source = VSP1_DU_CRC_OUTPUT;
+ return 0;
} else if (strstarts(source_name, "plane")) {
- source = VSP1_DU_CRC_PLANE;
+ unsigned int i;
+
+ *source = VSP1_DU_CRC_PLANE;
ret = kstrtouint(source_name + strlen("plane"), 10, &index);
if (ret < 0)
return ret;
for (i = 0; i < rcrtc->vsp->num_planes; ++i) {
- if (index == rcrtc->vsp->planes[i].plane.base.id) {
- index = i;
- break;
- }
+ if (index == rcrtc->vsp->planes[i].plane.base.id)
+ return i;
}
+ }
- if (i >= rcrtc->vsp->num_planes)
- return -EINVAL;
- } else {
+ return -EINVAL;
+}
+
+static int rcar_du_crtc_verify_crc_source(struct drm_crtc *crtc,
+ const char *source_name,
+ size_t *values_cnt)
+{
+ struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
+ enum vsp1_du_crc_source source;
+
+ if (rcar_du_crtc_parse_crc_source(rcrtc, source_name, &source) < 0) {
+ DRM_DEBUG_DRIVER("unknown source %s\n", source_name);
return -EINVAL;
}
*values_cnt = 1;
+ return 0;
+}
+
+const char *const *rcar_du_crtc_get_crc_sources(struct drm_crtc *crtc,
+ size_t *count)
+{
+ struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
+
+ *count = rcrtc->sources_count;
+ return rcrtc->sources;
+}
+
+static int rcar_du_crtc_set_crc_source(struct drm_crtc *crtc,
+ const char *source_name)
+{
+ struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_crtc_state *crtc_state;
+ struct drm_atomic_state *state;
+ enum vsp1_du_crc_source source;
+ unsigned int index;
+ int ret;
+
+ ret = rcar_du_crtc_parse_crc_source(rcrtc, source_name, &source);
+ if (ret < 0)
+ return ret;
+
+ index = ret;
/* Perform an atomic commit to set the CRC source. */
drm_modeset_acquire_init(&ctx, 0);
@@ -853,7 +1001,7 @@ static const struct drm_crtc_funcs crtc_funcs_gen2 = {
static const struct drm_crtc_funcs crtc_funcs_gen3 = {
.reset = rcar_du_crtc_reset,
- .destroy = drm_crtc_cleanup,
+ .destroy = rcar_du_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = rcar_du_crtc_atomic_duplicate_state,
@@ -861,6 +1009,8 @@ static const struct drm_crtc_funcs crtc_funcs_gen3 = {
.enable_vblank = rcar_du_crtc_enable_vblank,
.disable_vblank = rcar_du_crtc_disable_vblank,
.set_crc_source = rcar_du_crtc_set_crc_source,
+ .verify_crc_source = rcar_du_crtc_verify_crc_source,
+ .get_crc_sources = rcar_du_crtc_get_crc_sources,
};
/* -----------------------------------------------------------------------------
@@ -999,5 +1149,7 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex,
return ret;
}
+ rcar_du_crtc_crc_init(rcrtc);
+
return 0;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
index 7680cb2636c8..4990bbe9ba26 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* rcar_du_crtc.h -- R-Car Display Unit CRTCs
*
* Copyright (C) 2013-2015 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart ([email protected])
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __RCAR_DU_CRTC_H__
@@ -67,6 +63,9 @@ struct rcar_du_crtc {
struct rcar_du_group *group;
struct rcar_du_vsp *vsp;
unsigned int vsp_pipe;
+
+ const char *const *sources;
+ unsigned int sources_count;
};
#define to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, crtc)
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 02aee6cb0e53..0954ecd2f943 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* rcar_du_drv.c -- R-Car Display Unit DRM driver
*
* Copyright (C) 2013-2015 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart ([email protected])
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/clk.h>
@@ -39,7 +35,8 @@
static const struct rcar_du_device_info rzg1_du_r8a7743_info = {
.gen = 2,
.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
- | RCAR_DU_FEATURE_EXT_CTRL_REGS,
+ | RCAR_DU_FEATURE_EXT_CTRL_REGS
+ | RCAR_DU_FEATURE_INTERLACED,
.channels_mask = BIT(1) | BIT(0),
.routes = {
/*
@@ -60,7 +57,8 @@ static const struct rcar_du_device_info rzg1_du_r8a7743_info = {
static const struct rcar_du_device_info rzg1_du_r8a7745_info = {
.gen = 2,
.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
- | RCAR_DU_FEATURE_EXT_CTRL_REGS,
+ | RCAR_DU_FEATURE_EXT_CTRL_REGS
+ | RCAR_DU_FEATURE_INTERLACED,
.channels_mask = BIT(1) | BIT(0),
.routes = {
/*
@@ -79,7 +77,7 @@ static const struct rcar_du_device_info rzg1_du_r8a7745_info = {
static const struct rcar_du_device_info rcar_du_r8a7779_info = {
.gen = 2,
- .features = 0,
+ .features = RCAR_DU_FEATURE_INTERLACED,
.channels_mask = BIT(1) | BIT(0),
.routes = {
/*
@@ -100,7 +98,8 @@ static const struct rcar_du_device_info rcar_du_r8a7779_info = {
static const struct rcar_du_device_info rcar_du_r8a7790_info = {
.gen = 2,
.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
- | RCAR_DU_FEATURE_EXT_CTRL_REGS,
+ | RCAR_DU_FEATURE_EXT_CTRL_REGS
+ | RCAR_DU_FEATURE_INTERLACED,
.quirks = RCAR_DU_QUIRK_ALIGN_128B,
.channels_mask = BIT(2) | BIT(1) | BIT(0),
.routes = {
@@ -128,7 +127,8 @@ static const struct rcar_du_device_info rcar_du_r8a7790_info = {
static const struct rcar_du_device_info rcar_du_r8a7791_info = {
.gen = 2,
.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
- | RCAR_DU_FEATURE_EXT_CTRL_REGS,
+ | RCAR_DU_FEATURE_EXT_CTRL_REGS
+ | RCAR_DU_FEATURE_INTERLACED,
.channels_mask = BIT(1) | BIT(0),
.routes = {
/*
@@ -150,7 +150,8 @@ static const struct rcar_du_device_info rcar_du_r8a7791_info = {
static const struct rcar_du_device_info rcar_du_r8a7792_info = {
.gen = 2,
.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
- | RCAR_DU_FEATURE_EXT_CTRL_REGS,
+ | RCAR_DU_FEATURE_EXT_CTRL_REGS
+ | RCAR_DU_FEATURE_INTERLACED,
.channels_mask = BIT(1) | BIT(0),
.routes = {
/* R8A7792 has two RGB outputs. */
@@ -168,7 +169,8 @@ static const struct rcar_du_device_info rcar_du_r8a7792_info = {
static const struct rcar_du_device_info rcar_du_r8a7794_info = {
.gen = 2,
.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
- | RCAR_DU_FEATURE_EXT_CTRL_REGS,
+ | RCAR_DU_FEATURE_EXT_CTRL_REGS
+ | RCAR_DU_FEATURE_INTERLACED,
.channels_mask = BIT(1) | BIT(0),
.routes = {
/*
@@ -190,7 +192,8 @@ static const struct rcar_du_device_info rcar_du_r8a7795_info = {
.gen = 3,
.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
| RCAR_DU_FEATURE_EXT_CTRL_REGS
- | RCAR_DU_FEATURE_VSP1_SOURCE,
+ | RCAR_DU_FEATURE_VSP1_SOURCE
+ | RCAR_DU_FEATURE_INTERLACED,
.channels_mask = BIT(3) | BIT(2) | BIT(1) | BIT(0),
.routes = {
/*
@@ -215,14 +218,15 @@ static const struct rcar_du_device_info rcar_du_r8a7795_info = {
},
},
.num_lvds = 1,
- .dpll_ch = BIT(2) | BIT(1),
+ .dpll_mask = BIT(2) | BIT(1),
};
static const struct rcar_du_device_info rcar_du_r8a7796_info = {
.gen = 3,
.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
| RCAR_DU_FEATURE_EXT_CTRL_REGS
- | RCAR_DU_FEATURE_VSP1_SOURCE,
+ | RCAR_DU_FEATURE_VSP1_SOURCE
+ | RCAR_DU_FEATURE_INTERLACED,
.channels_mask = BIT(2) | BIT(1) | BIT(0),
.routes = {
/*
@@ -243,14 +247,15 @@ static const struct rcar_du_device_info rcar_du_r8a7796_info = {
},
},
.num_lvds = 1,
- .dpll_ch = BIT(1),
+ .dpll_mask = BIT(1),
};
static const struct rcar_du_device_info rcar_du_r8a77965_info = {
.gen = 3,
.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
| RCAR_DU_FEATURE_EXT_CTRL_REGS
- | RCAR_DU_FEATURE_VSP1_SOURCE,
+ | RCAR_DU_FEATURE_VSP1_SOURCE
+ | RCAR_DU_FEATURE_INTERLACED,
.channels_mask = BIT(3) | BIT(1) | BIT(0),
.routes = {
/*
@@ -271,14 +276,15 @@ static const struct rcar_du_device_info rcar_du_r8a77965_info = {
},
},
.num_lvds = 1,
- .dpll_ch = BIT(1),
+ .dpll_mask = BIT(1),
};
static const struct rcar_du_device_info rcar_du_r8a77970_info = {
.gen = 3,
.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
| RCAR_DU_FEATURE_EXT_CTRL_REGS
- | RCAR_DU_FEATURE_VSP1_SOURCE,
+ | RCAR_DU_FEATURE_VSP1_SOURCE
+ | RCAR_DU_FEATURE_INTERLACED,
.channels_mask = BIT(0),
.routes = {
/* R8A77970 has one RGB output and one LVDS output. */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index b3a25e8e07d0..fef9ea5c22f3 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* rcar_du_drv.h -- R-Car Display Unit DRM driver
*
* Copyright (C) 2013-2015 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart ([email protected])
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __RCAR_DU_DRV_H__
@@ -27,11 +23,12 @@ struct drm_device;
struct drm_fbdev_cma;
struct rcar_du_device;
-#define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK (1 << 0) /* Per-CRTC IRQ and clock */
-#define RCAR_DU_FEATURE_EXT_CTRL_REGS (1 << 1) /* Has extended control registers */
-#define RCAR_DU_FEATURE_VSP1_SOURCE (1 << 2) /* Has inputs from VSP1 */
+#define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK BIT(0) /* Per-CRTC IRQ and clock */
+#define RCAR_DU_FEATURE_EXT_CTRL_REGS BIT(1) /* Has extended control registers */
+#define RCAR_DU_FEATURE_VSP1_SOURCE BIT(2) /* Has inputs from VSP1 */
+#define RCAR_DU_FEATURE_INTERLACED BIT(3) /* HW supports interlaced */
-#define RCAR_DU_QUIRK_ALIGN_128B (1 << 0) /* Align pitches to 128 bytes */
+#define RCAR_DU_QUIRK_ALIGN_128B BIT(0) /* Align pitches to 128 bytes */
/*
* struct rcar_du_output_routing - Output routing specification
@@ -55,6 +52,7 @@ struct rcar_du_output_routing {
* @channels_mask: bit mask of available DU channels
* @routes: array of CRTC to output routes, indexed by output (RCAR_DU_OUTPUT_*)
* @num_lvds: number of internal LVDS encoders
+ * @dpll_mask: bit mask of DU channels equipped with a DPLL
*/
struct rcar_du_device_info {
unsigned int gen;
@@ -63,7 +61,7 @@ struct rcar_du_device_info {
unsigned int channels_mask;
struct rcar_du_output_routing routes[RCAR_DU_OUTPUT_MAX];
unsigned int num_lvds;
- unsigned int dpll_ch;
+ unsigned int dpll_mask;
};
#define RCAR_DU_MAX_CRTCS 4
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index f9c933d3bae6..1877764bd6d9 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* rcar_du_encoder.c -- R-Car Display Unit Encoder
*
* Copyright (C) 2013-2014 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart ([email protected])
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/export.h>
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
index 2d2abcacd169..ce3cbc85695e 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* rcar_du_encoder.h -- R-Car Display Unit Encoder
*
* Copyright (C) 2013-2014 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart ([email protected])
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __RCAR_DU_ENCODER_H__
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c
index d539cb290a35..ef2c177afb6d 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_group.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* rcar_du_group.c -- R-Car Display Unit Channels Pair
*
* Copyright (C) 2013-2015 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart ([email protected])
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
/*
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.h b/drivers/gpu/drm/rcar-du/rcar_du_group.h
index 42105aedecc8..87950c1f6a52 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_group.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* rcar_du_group.c -- R-Car Display Unit Planes and CRTCs Group
*
* Copyright (C) 2013-2014 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart ([email protected])
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __RCAR_DU_GROUP_H__
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index f0bc7cc0e913..b5d79ecd25ea 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* rcar_du_kms.c -- R-Car Display Unit Mode Setting
*
* Copyright (C) 2013-2015 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart ([email protected])
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <drm/drmP.h>
@@ -101,6 +97,38 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = {
* associated .pnmr or .edf settings.
*/
{
+ .fourcc = DRM_FORMAT_RGB332,
+ .bpp = 8,
+ .planes = 1,
+ }, {
+ .fourcc = DRM_FORMAT_ARGB4444,
+ .bpp = 16,
+ .planes = 1,
+ }, {
+ .fourcc = DRM_FORMAT_XRGB4444,
+ .bpp = 16,
+ .planes = 1,
+ }, {
+ .fourcc = DRM_FORMAT_BGR888,
+ .bpp = 24,
+ .planes = 1,
+ }, {
+ .fourcc = DRM_FORMAT_RGB888,
+ .bpp = 24,
+ .planes = 1,
+ }, {
+ .fourcc = DRM_FORMAT_BGRA8888,
+ .bpp = 32,
+ .planes = 1,
+ }, {
+ .fourcc = DRM_FORMAT_BGRX8888,
+ .bpp = 32,
+ .planes = 1,
+ }, {
+ .fourcc = DRM_FORMAT_YVYU,
+ .bpp = 16,
+ .planes = 1,
+ }, {
.fourcc = DRM_FORMAT_NV61,
.bpp = 16,
.planes = 2,
@@ -176,7 +204,6 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
const struct rcar_du_format_info *format;
unsigned int max_pitch;
unsigned int align;
- unsigned int bpp;
unsigned int i;
format = rcar_du_format_info(mode_cmd->pixel_format);
@@ -186,20 +213,32 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
return ERR_PTR(-EINVAL);
}
- /*
- * The pitch and alignment constraints are expressed in pixels on the
- * hardware side and in bytes in the DRM API.
- */
- bpp = format->planes == 1 ? format->bpp / 8 : 1;
- max_pitch = 4096 * bpp;
+ if (rcdu->info->gen < 3) {
+ /*
+ * On Gen2 the DU limits the pitch to 4095 pixels and requires
+ * buffers to be aligned to a 16 pixels boundary (or 128 bytes
+ * on some platforms).
+ */
+ unsigned int bpp = format->planes == 1 ? format->bpp / 8 : 1;
- if (rcar_du_needs(rcdu, RCAR_DU_QUIRK_ALIGN_128B))
- align = 128;
- else
- align = 16 * bpp;
+ max_pitch = 4095 * bpp;
+
+ if (rcar_du_needs(rcdu, RCAR_DU_QUIRK_ALIGN_128B))
+ align = 128;
+ else
+ align = 16 * bpp;
+ } else {
+ /*
+ * On Gen3 the memory interface is handled by the VSP that
+ * limits the pitch to 65535 bytes and has no alignment
+ * constraint.
+ */
+ max_pitch = 65535;
+ align = 1;
+ }
if (mode_cmd->pitches[0] & (align - 1) ||
- mode_cmd->pitches[0] >= max_pitch) {
+ mode_cmd->pitches[0] > max_pitch) {
dev_dbg(dev->dev, "invalid pitch value %u\n",
mode_cmd->pitches[0]);
return ERR_PTR(-EINVAL);
@@ -516,12 +555,22 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
- dev->mode_config.max_width = 4095;
- dev->mode_config.max_height = 2047;
dev->mode_config.normalize_zpos = true;
dev->mode_config.funcs = &rcar_du_mode_config_funcs;
dev->mode_config.helper_private = &rcar_du_mode_config_helper;
+ if (rcdu->info->gen < 3) {
+ dev->mode_config.max_width = 4095;
+ dev->mode_config.max_height = 2047;
+ } else {
+ /*
+ * The Gen3 DU uses the VSP1 for memory access, and is limited
+ * to frame sizes of 8190x8190.
+ */
+ dev->mode_config.max_width = 8190;
+ dev->mode_config.max_height = 8190;
+ }
+
rcdu->num_crtcs = hweight8(rcdu->info->channels_mask);
ret = rcar_du_properties_init(rcdu);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.h b/drivers/gpu/drm/rcar-du/rcar_du_kms.h
index 07951d5fe38b..e171527abdaa 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* rcar_du_kms.h -- R-Car Display Unit Mode Setting
*
* Copyright (C) 2013-2014 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart ([email protected])
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __RCAR_DU_KMS_H__
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index c20f7ed48c8d..9e07758a755c 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* rcar_du_plane.c -- R-Car Display Unit Planes
*
* Copyright (C) 2013-2015 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart ([email protected])
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <drm/drmP.h>
@@ -690,14 +686,12 @@ static void rcar_du_plane_reset(struct drm_plane *plane)
if (state == NULL)
return;
+ __drm_atomic_helper_plane_reset(plane, &state->state);
+
state->hwindex = -1;
state->source = RCAR_DU_PLANE_MEMORY;
state->colorkey = RCAR_DU_COLORKEY_NONE;
state->state.zpos = plane->type == DRM_PLANE_TYPE_PRIMARY ? 0 : 1;
-
- plane->state = &state->state;
- plane->state->alpha = DRM_BLEND_ALPHA_OPAQUE;
- plane->state->plane = plane;
}
static int rcar_du_plane_atomic_set_property(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.h b/drivers/gpu/drm/rcar-du/rcar_du_plane.h
index 5c19c69e4691..2f223a4c1d33 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* rcar_du_plane.h -- R-Car Display Unit Planes
*
* Copyright (C) 2013-2014 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart ([email protected])
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __RCAR_DU_PLANE_H__
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
index 9dfd220ceda1..bc87f080b170 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_regs.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* rcar_du_regs.h -- R-Car Display Unit Registers Definitions
*
* Copyright (C) 2013-2015 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart ([email protected])
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
*/
#ifndef __RCAR_DU_REGS_H__
@@ -492,8 +489,8 @@
* External Synchronization Control Registers
*/
-#define ESCR 0x10000
-#define ESCR2 0x31000
+#define ESCR02 0x10000
+#define ESCR13 0x01000
#define ESCR_DCLKOINV (1 << 25)
#define ESCR_DCLKSEL_DCLKIN (0 << 20)
#define ESCR_DCLKSEL_CLKS (1 << 20)
@@ -504,8 +501,8 @@
#define ESCR_SYNCSEL_EXHSYNC (3 << 8)
#define ESCR_FRQSEL_MASK (0x3f << 0)
-#define OTAR 0x10004
-#define OTAR2 0x31004
+#define OTAR02 0x10004
+#define OTAR13 0x01004
/* -----------------------------------------------------------------------------
* Dual Display Output Control Registers
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
index 72eebeda518e..4576119e7777 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* rcar_du_vsp.h -- R-Car Display Unit VSP-Based Compositor
*
* Copyright (C) 2015 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart ([email protected])
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <drm/drmP.h>
@@ -52,6 +48,7 @@ void rcar_du_vsp_enable(struct rcar_du_crtc *crtc)
struct vsp1_du_lif_config cfg = {
.width = mode->hdisplay,
.height = mode->vdisplay,
+ .interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE,
.callback = rcar_du_vsp_complete,
.callback_data = crtc,
};
@@ -129,7 +126,6 @@ static const u32 formats_kms[] = {
DRM_FORMAT_ARGB8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_UYVY,
- DRM_FORMAT_VYUY,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_NV12,
@@ -158,7 +154,6 @@ static const u32 formats_v4l2[] = {
V4L2_PIX_FMT_ABGR32,
V4L2_PIX_FMT_XBGR32,
V4L2_PIX_FMT_UYVY,
- V4L2_PIX_FMT_VYUY,
V4L2_PIX_FMT_YUYV,
V4L2_PIX_FMT_YVYU,
V4L2_PIX_FMT_NV12M,
@@ -346,11 +341,8 @@ static void rcar_du_vsp_plane_reset(struct drm_plane *plane)
if (state == NULL)
return;
- state->state.alpha = DRM_BLEND_ALPHA_OPAQUE;
+ __drm_atomic_helper_plane_reset(plane, &state->state);
state->state.zpos = plane->type == DRM_PLANE_TYPE_PRIMARY ? 0 : 1;
-
- plane->state = &state->state;
- plane->state->plane = plane;
}
static const struct drm_plane_funcs rcar_du_vsp_plane_funcs = {
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.h b/drivers/gpu/drm/rcar-du/rcar_du_vsp.h
index 8a8a25c8c8e8..e8c14dc5cb93 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* rcar_du_vsp.h -- R-Car Display Unit VSP-Based Compositor
*
* Copyright (C) 2015 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart ([email protected])
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __RCAR_DU_VSP_H__
diff --git a/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c b/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c
index 76210ae25094..75490a3e0a2a 100644
--- a/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c
+++ b/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* R-Car Gen3 HDMI PHY
*
* Copyright (C) 2016 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart ([email protected])
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/module.h>
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index 4c39de3f4f0f..ce0eb68c3416 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -522,6 +522,7 @@ static const struct of_device_id rcar_lvds_of_table[] = {
{ .compatible = "renesas,r8a7795-lvds", .data = &rcar_lvds_gen3_info },
{ .compatible = "renesas,r8a7796-lvds", .data = &rcar_lvds_gen3_info },
{ .compatible = "renesas,r8a77970-lvds", .data = &rcar_lvds_r8a77970_info },
+ { .compatible = "renesas,r8a77980-lvds", .data = &rcar_lvds_gen3_info },
{ }
};
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h b/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h
index 2896835ca7e9..4870f50d9bec 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* rcar_lvds_regs.h -- R-Car LVDS Interface Registers Definitions
*
* Copyright (C) 2013-2015 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart ([email protected])
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
*/
#ifndef __RCAR_LVDS_REGS_H__
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 0ccc76217ee4..26438d45732b 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -8,6 +8,7 @@ config DRM_ROCKCHIP
select DRM_ANALOGIX_DP if ROCKCHIP_ANALOGIX_DP
select DRM_DW_HDMI if ROCKCHIP_DW_HDMI
select DRM_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI
+ select DRM_RGB if ROCKCHIP_RGB
select SND_SOC_HDMI_CODEC if ROCKCHIP_CDN_DP && SND_SOC
help
Choose this option if you have a Rockchip soc chipset.
@@ -23,7 +24,7 @@ config ROCKCHIP_ANALOGIX_DP
help
This selects support for Rockchip SoC specific extensions
for the Analogix Core DP driver. If you want to enable DP
- on RK3288 based SoC, you should selet this option.
+ on RK3288 or RK3399 based SoC, you should select this option.
config ROCKCHIP_CDN_DP
bool "Rockchip cdn DP"
@@ -39,16 +40,16 @@ config ROCKCHIP_DW_HDMI
help
This selects support for Rockchip SoC specific extensions
for the Synopsys DesignWare HDMI driver. If you want to
- enable HDMI on RK3288 based SoC, you should selet this
- option.
+ enable HDMI on RK3288 or RK3399 based SoC, you should select
+ this option.
config ROCKCHIP_DW_MIPI_DSI
bool "Rockchip specific extensions for Synopsys DW MIPI DSI"
help
- This selects support for Rockchip SoC specific extensions
- for the Synopsys DesignWare HDMI driver. If you want to
- enable MIPI DSI on RK3288 based SoC, you should selet this
- option.
+ This selects support for Rockchip SoC specific extensions
+ for the Synopsys DesignWare HDMI driver. If you want to
+ enable MIPI DSI on RK3288 or RK3399 based SoC, you should
+ select this option.
config ROCKCHIP_INNO_HDMI
bool "Rockchip specific extensions for Innosilicon HDMI"
@@ -66,4 +67,14 @@ config ROCKCHIP_LVDS
Rockchip rk3288 SoC has LVDS TX Controller can be used, and it
support LVDS, rgb, dual LVDS output mode. say Y to enable its
driver.
+
+config ROCKCHIP_RGB
+ bool "Rockchip RGB support"
+ depends on DRM_ROCKCHIP
+ depends on PINCTRL
+ help
+ Choose this option to enable support for Rockchip RGB output.
+ Some Rockchip CRTCs, like rv1108, can directly output parallel
+ and serial RGB format to panel or connect to a conversion chip.
+ say Y to enable its driver.
endif
diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile
index a314e2109e76..868263ff0302 100644
--- a/drivers/gpu/drm/rockchip/Makefile
+++ b/drivers/gpu/drm/rockchip/Makefile
@@ -14,5 +14,6 @@ rockchipdrm-$(CONFIG_ROCKCHIP_DW_HDMI) += dw_hdmi-rockchip.o
rockchipdrm-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi.o
rockchipdrm-$(CONFIG_ROCKCHIP_INNO_HDMI) += inno_hdmi.o
rockchipdrm-$(CONFIG_ROCKCHIP_LVDS) += rockchip_lvds.o
+rockchipdrm-$(CONFIG_ROCKCHIP_RGB) += rockchip_rgb.o
obj-$(CONFIG_DRM_ROCKCHIP) += rockchipdrm.o
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index f814d37b1db2..941f35233b1f 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -24,6 +24,7 @@
#include <linux/pm_runtime.h>
#include <linux/module.h>
#include <linux/of_graph.h>
+#include <linux/of_platform.h>
#include <linux/component.h>
#include <linux/console.h>
#include <linux/iommu.h>
@@ -184,7 +185,7 @@ err_mode_config_cleanup:
err_free:
drm_dev->dev_private = NULL;
dev_set_drvdata(dev, NULL);
- drm_dev_unref(drm_dev);
+ drm_dev_put(drm_dev);
return ret;
}
@@ -204,7 +205,7 @@ static void rockchip_drm_unbind(struct device *dev)
drm_dev->dev_private = NULL;
dev_set_drvdata(dev, NULL);
- drm_dev_unref(drm_dev);
+ drm_dev_put(drm_dev);
}
static const struct file_operations rockchip_drm_driver_fops = {
@@ -243,60 +244,18 @@ static struct drm_driver rockchip_drm_driver = {
};
#ifdef CONFIG_PM_SLEEP
-static void rockchip_drm_fb_suspend(struct drm_device *drm)
-{
- struct rockchip_drm_private *priv = drm->dev_private;
-
- console_lock();
- drm_fb_helper_set_suspend(&priv->fbdev_helper, 1);
- console_unlock();
-}
-
-static void rockchip_drm_fb_resume(struct drm_device *drm)
-{
- struct rockchip_drm_private *priv = drm->dev_private;
-
- console_lock();
- drm_fb_helper_set_suspend(&priv->fbdev_helper, 0);
- console_unlock();
-}
-
static int rockchip_drm_sys_suspend(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- struct rockchip_drm_private *priv;
-
- if (!drm)
- return 0;
-
- drm_kms_helper_poll_disable(drm);
- rockchip_drm_fb_suspend(drm);
- priv = drm->dev_private;
- priv->state = drm_atomic_helper_suspend(drm);
- if (IS_ERR(priv->state)) {
- rockchip_drm_fb_resume(drm);
- drm_kms_helper_poll_enable(drm);
- return PTR_ERR(priv->state);
- }
-
- return 0;
+ return drm_mode_config_helper_suspend(drm);
}
static int rockchip_drm_sys_resume(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- struct rockchip_drm_private *priv;
-
- if (!drm)
- return 0;
- priv = drm->dev_private;
- drm_atomic_helper_resume(drm, priv->state);
- rockchip_drm_fb_resume(drm);
- drm_kms_helper_poll_enable(drm);
-
- return 0;
+ return drm_mode_config_helper_resume(drm);
}
#endif
@@ -309,6 +268,53 @@ static const struct dev_pm_ops rockchip_drm_pm_ops = {
static struct platform_driver *rockchip_sub_drivers[MAX_ROCKCHIP_SUB_DRIVERS];
static int num_rockchip_sub_drivers;
+/*
+ * Check if a vop endpoint is leading to a rockchip subdriver or bridge.
+ * Should be called from the component bind stage of the drivers
+ * to ensure that all subdrivers are probed.
+ *
+ * @ep: endpoint of a rockchip vop
+ *
+ * returns true if subdriver, false if external bridge and -ENODEV
+ * if remote port does not contain a device.
+ */
+int rockchip_drm_endpoint_is_subdriver(struct device_node *ep)
+{
+ struct device_node *node = of_graph_get_remote_port_parent(ep);
+ struct platform_device *pdev;
+ struct device_driver *drv;
+ int i;
+
+ if (!node)
+ return -ENODEV;
+
+ /* status disabled will prevent creation of platform-devices */
+ pdev = of_find_device_by_node(node);
+ of_node_put(node);
+ if (!pdev)
+ return -ENODEV;
+
+ /*
+ * All rockchip subdrivers have probed at this point, so
+ * any device not having a driver now is an external bridge.
+ */
+ drv = pdev->dev.driver;
+ if (!drv) {
+ platform_device_put(pdev);
+ return false;
+ }
+
+ for (i = 0; i < num_rockchip_sub_drivers; i++) {
+ if (rockchip_sub_drivers[i] == to_platform_driver(drv)) {
+ platform_device_put(pdev);
+ return true;
+ }
+ }
+
+ platform_device_put(pdev);
+ return false;
+}
+
static int compare_dev(struct device *dev, void *data)
{
return dev == (struct device *)data;
@@ -442,6 +448,11 @@ static int rockchip_drm_platform_remove(struct platform_device *pdev)
return 0;
}
+static void rockchip_drm_platform_shutdown(struct platform_device *pdev)
+{
+ rockchip_drm_platform_remove(pdev);
+}
+
static const struct of_device_id rockchip_drm_dt_ids[] = {
{ .compatible = "rockchip,display-subsystem", },
{ /* sentinel */ },
@@ -451,6 +462,7 @@ MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids);
static struct platform_driver rockchip_drm_platform_driver = {
.probe = rockchip_drm_platform_probe,
.remove = rockchip_drm_platform_remove,
+ .shutdown = rockchip_drm_platform_shutdown,
.driver = {
.name = "rockchip-drm",
.of_match_table = rockchip_drm_dt_ids,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index 3a6ebfc26036..21a023a97bb8 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -51,7 +51,6 @@ struct rockchip_crtc_state {
struct rockchip_drm_private {
struct drm_fb_helper fbdev_helper;
struct drm_gem_object *fbdev_bo;
- struct drm_atomic_state *state;
struct iommu_domain *domain;
struct mutex mm_lock;
struct drm_mm mm;
@@ -65,6 +64,7 @@ void rockchip_drm_dma_detach_device(struct drm_device *drm_dev,
struct device *dev);
int rockchip_drm_wait_vact_end(struct drm_crtc *crtc, unsigned int mstimeout);
+int rockchip_drm_endpoint_is_subdriver(struct device_node *ep);
extern struct platform_driver cdn_dp_driver;
extern struct platform_driver dw_hdmi_rockchip_pltfm_driver;
extern struct platform_driver dw_mipi_dsi_driver;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 1359e5c773e4..0c35a88e33dd 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -32,6 +32,7 @@
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/component.h>
+#include <linux/overflow.h>
#include <linux/reset.h>
#include <linux/delay.h>
@@ -41,6 +42,7 @@
#include "rockchip_drm_fb.h"
#include "rockchip_drm_psr.h"
#include "rockchip_drm_vop.h"
+#include "rockchip_rgb.h"
#define VOP_WIN_SET(x, win, name, v) \
vop_reg_set(vop, &win->phy->name, win->base, ~0, v, #name)
@@ -92,6 +94,7 @@ struct vop_win {
struct vop *vop;
};
+struct rockchip_rgb;
struct vop {
struct drm_crtc crtc;
struct device *dev;
@@ -135,6 +138,9 @@ struct vop {
/* vop dclk reset */
struct reset_control *dclk_rst;
+ /* optional internal rgb encoder */
+ struct rockchip_rgb *rgb;
+
struct vop_win win[];
};
@@ -1111,7 +1117,7 @@ static struct drm_connector *vop_get_edp_connector(struct vop *vop)
}
static int vop_crtc_set_crc_source(struct drm_crtc *crtc,
- const char *source_name, size_t *values_cnt)
+ const char *source_name)
{
struct vop *vop = to_vop(crtc);
struct drm_connector *connector;
@@ -1121,8 +1127,6 @@ static int vop_crtc_set_crc_source(struct drm_crtc *crtc,
if (!connector)
return -EINVAL;
- *values_cnt = 3;
-
if (source_name && strcmp(source_name, "auto") == 0)
ret = analogix_dp_start_crc(connector);
else if (!source_name)
@@ -1132,9 +1136,28 @@ static int vop_crtc_set_crc_source(struct drm_crtc *crtc,
return ret;
}
+
+static int
+vop_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
+ size_t *values_cnt)
+{
+ if (source_name && strcmp(source_name, "auto") != 0)
+ return -EINVAL;
+
+ *values_cnt = 3;
+ return 0;
+}
+
#else
static int vop_crtc_set_crc_source(struct drm_crtc *crtc,
- const char *source_name, size_t *values_cnt)
+ const char *source_name)
+{
+ return -ENODEV;
+}
+
+static int
+vop_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
+ size_t *values_cnt)
{
return -ENODEV;
}
@@ -1150,6 +1173,7 @@ static const struct drm_crtc_funcs vop_crtc_funcs = {
.enable_vblank = vop_crtc_enable_vblank,
.disable_vblank = vop_crtc_disable_vblank,
.set_crc_source = vop_crtc_set_crc_source,
+ .verify_crc_source = vop_crtc_verify_crc_source,
};
static void vop_fb_unref_worker(struct drm_flip_work *work, void *val)
@@ -1561,7 +1585,6 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
struct drm_device *drm_dev = data;
struct vop *vop;
struct resource *res;
- size_t alloc_size;
int ret, irq;
vop_data = of_device_get_match_data(dev);
@@ -1569,8 +1592,8 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
return -ENODEV;
/* Allocate vop struct and its vop_win array */
- alloc_size = sizeof(*vop) + sizeof(*vop->win) * vop_data->win_size;
- vop = devm_kzalloc(dev, alloc_size, GFP_KERNEL);
+ vop = devm_kzalloc(dev, struct_size(vop, win, vop_data->win_size),
+ GFP_KERNEL);
if (!vop)
return -ENOMEM;
@@ -1620,6 +1643,14 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
if (ret)
goto err_disable_pm_runtime;
+ if (vop->data->feature & VOP_FEATURE_INTERNAL_RGB) {
+ vop->rgb = rockchip_rgb_init(dev, &vop->crtc, vop->drm_dev);
+ if (IS_ERR(vop->rgb)) {
+ ret = PTR_ERR(vop->rgb);
+ goto err_disable_pm_runtime;
+ }
+ }
+
return 0;
err_disable_pm_runtime:
@@ -1632,6 +1663,9 @@ static void vop_unbind(struct device *dev, struct device *master, void *data)
{
struct vop *vop = dev_get_drvdata(dev);
+ if (vop->rgb)
+ rockchip_rgb_fini(vop->rgb);
+
pm_runtime_disable(dev);
vop_destroy_crtc(vop);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index fcb91041a666..fd5765dfd637 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -162,6 +162,7 @@ struct vop_data {
unsigned int win_size;
#define VOP_FEATURE_OUTPUT_RGB10 BIT(0)
+#define VOP_FEATURE_INTERNAL_RGB BIT(1)
u64 feature;
};
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c
new file mode 100644
index 000000000000..96ac1458a59c
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c
@@ -0,0 +1,173 @@
+//SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:
+ * Sandy Huang <[email protected]>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_of.h>
+
+#include <linux/component.h>
+#include <linux/of_graph.h>
+
+#include "rockchip_drm_drv.h"
+#include "rockchip_drm_vop.h"
+
+#define encoder_to_rgb(c) container_of(c, struct rockchip_rgb, encoder)
+
+struct rockchip_rgb {
+ struct device *dev;
+ struct drm_device *drm_dev;
+ struct drm_bridge *bridge;
+ struct drm_encoder encoder;
+ int output_mode;
+};
+
+static int
+rockchip_rgb_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+ struct drm_connector *connector = conn_state->connector;
+ struct drm_display_info *info = &connector->display_info;
+ u32 bus_format;
+
+ if (info->num_bus_formats)
+ bus_format = info->bus_formats[0];
+ else
+ bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+
+ switch (bus_format) {
+ case MEDIA_BUS_FMT_RGB666_1X18:
+ s->output_mode = ROCKCHIP_OUT_MODE_P666;
+ break;
+ case MEDIA_BUS_FMT_RGB565_1X16:
+ s->output_mode = ROCKCHIP_OUT_MODE_P565;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ case MEDIA_BUS_FMT_RGB666_1X24_CPADHI:
+ default:
+ s->output_mode = ROCKCHIP_OUT_MODE_P888;
+ break;
+ }
+
+ s->output_type = DRM_MODE_CONNECTOR_LVDS;
+
+ return 0;
+}
+
+static const
+struct drm_encoder_helper_funcs rockchip_rgb_encoder_helper_funcs = {
+ .atomic_check = rockchip_rgb_encoder_atomic_check,
+};
+
+static const struct drm_encoder_funcs rockchip_rgb_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
+ struct drm_crtc *crtc,
+ struct drm_device *drm_dev)
+{
+ struct rockchip_rgb *rgb;
+ struct drm_encoder *encoder;
+ struct device_node *port, *endpoint;
+ u32 endpoint_id;
+ int ret = 0, child_count = 0;
+ struct drm_panel *panel;
+ struct drm_bridge *bridge;
+
+ rgb = devm_kzalloc(dev, sizeof(*rgb), GFP_KERNEL);
+ if (!rgb)
+ return ERR_PTR(-ENOMEM);
+
+ rgb->dev = dev;
+ rgb->drm_dev = drm_dev;
+
+ port = of_graph_get_port_by_id(dev->of_node, 0);
+ if (!port)
+ return ERR_PTR(-EINVAL);
+
+ for_each_child_of_node(port, endpoint) {
+ if (of_property_read_u32(endpoint, "reg", &endpoint_id))
+ endpoint_id = 0;
+
+ if (rockchip_drm_endpoint_is_subdriver(endpoint) > 0)
+ continue;
+
+ child_count++;
+ ret = drm_of_find_panel_or_bridge(dev->of_node, 0, endpoint_id,
+ &panel, &bridge);
+ if (!ret)
+ break;
+ }
+
+ of_node_put(port);
+
+ /* if the rgb output is not connected to anything, just return */
+ if (!child_count)
+ return NULL;
+
+ if (ret < 0) {
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev, "failed to find panel or bridge %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ encoder = &rgb->encoder;
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ ret = drm_encoder_init(drm_dev, encoder, &rockchip_rgb_encoder_funcs,
+ DRM_MODE_ENCODER_NONE, NULL);
+ if (ret < 0) {
+ DRM_DEV_ERROR(drm_dev->dev,
+ "failed to initialize encoder: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ drm_encoder_helper_add(encoder, &rockchip_rgb_encoder_helper_funcs);
+
+ if (panel) {
+ bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_LVDS);
+ if (IS_ERR(bridge))
+ return ERR_CAST(bridge);
+ }
+
+ rgb->bridge = bridge;
+
+ ret = drm_bridge_attach(encoder, rgb->bridge, NULL);
+ if (ret) {
+ DRM_DEV_ERROR(drm_dev->dev,
+ "failed to attach bridge: %d\n", ret);
+ goto err_free_encoder;
+ }
+
+ return rgb;
+
+err_free_encoder:
+ drm_encoder_cleanup(encoder);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(rockchip_rgb_init);
+
+void rockchip_rgb_fini(struct rockchip_rgb *rgb)
+{
+ drm_panel_bridge_remove(rgb->bridge);
+ drm_encoder_cleanup(&rgb->encoder);
+}
+EXPORT_SYMBOL_GPL(rockchip_rgb_fini);
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.h b/drivers/gpu/drm/rockchip/rockchip_rgb.h
new file mode 100644
index 000000000000..38b52e63b2b0
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.h
@@ -0,0 +1,33 @@
+//SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:
+ * Sandy Huang <[email protected]>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifdef CONFIG_ROCKCHIP_RGB
+struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
+ struct drm_crtc *crtc,
+ struct drm_device *drm_dev);
+void rockchip_rgb_fini(struct rockchip_rgb *rgb);
+#else
+static inline struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
+ struct drm_crtc *crtc,
+ struct drm_device *drm_dev)
+{
+ return NULL;
+}
+
+static inline void rockchip_rgb_fini(struct rockchip_rgb *rgb)
+{
+}
+#endif
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index 08023d3ecb76..a6db3cd5544b 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -177,6 +177,215 @@ static const struct vop_data rk3126_vop = {
.win_size = ARRAY_SIZE(rk3126_vop_win_data),
};
+static const int px30_vop_intrs[] = {
+ FS_INTR,
+ 0, 0,
+ LINE_FLAG_INTR,
+ 0,
+ BUS_ERROR_INTR,
+ 0, 0,
+ DSP_HOLD_VALID_INTR,
+};
+
+static const struct vop_intr px30_intr = {
+ .intrs = px30_vop_intrs,
+ .nintrs = ARRAY_SIZE(px30_vop_intrs),
+ .line_flag_num[0] = VOP_REG(PX30_LINE_FLAG, 0xfff, 0),
+ .status = VOP_REG_MASK_SYNC(PX30_INTR_STATUS, 0xffff, 0),
+ .enable = VOP_REG_MASK_SYNC(PX30_INTR_EN, 0xffff, 0),
+ .clear = VOP_REG_MASK_SYNC(PX30_INTR_CLEAR, 0xffff, 0),
+};
+
+static const struct vop_common px30_common = {
+ .standby = VOP_REG_SYNC(PX30_SYS_CTRL2, 0x1, 1),
+ .out_mode = VOP_REG(PX30_DSP_CTRL2, 0xf, 16),
+ .dsp_blank = VOP_REG(PX30_DSP_CTRL2, 0x1, 14),
+ .cfg_done = VOP_REG_SYNC(PX30_REG_CFG_DONE, 0x1, 0),
+};
+
+static const struct vop_modeset px30_modeset = {
+ .htotal_pw = VOP_REG(PX30_DSP_HTOTAL_HS_END, 0x0fff0fff, 0),
+ .hact_st_end = VOP_REG(PX30_DSP_HACT_ST_END, 0x0fff0fff, 0),
+ .vtotal_pw = VOP_REG(PX30_DSP_VTOTAL_VS_END, 0x0fff0fff, 0),
+ .vact_st_end = VOP_REG(PX30_DSP_VACT_ST_END, 0x0fff0fff, 0),
+};
+
+static const struct vop_output px30_output = {
+ .rgb_pin_pol = VOP_REG(PX30_DSP_CTRL0, 0xf, 1),
+ .mipi_pin_pol = VOP_REG(PX30_DSP_CTRL0, 0xf, 25),
+ .rgb_en = VOP_REG(PX30_DSP_CTRL0, 0x1, 0),
+ .mipi_en = VOP_REG(PX30_DSP_CTRL0, 0x1, 24),
+};
+
+static const struct vop_scl_regs px30_win_scl = {
+ .scale_yrgb_x = VOP_REG(PX30_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
+ .scale_yrgb_y = VOP_REG(PX30_WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
+ .scale_cbcr_x = VOP_REG(PX30_WIN0_SCL_FACTOR_CBR, 0xffff, 0x0),
+ .scale_cbcr_y = VOP_REG(PX30_WIN0_SCL_FACTOR_CBR, 0xffff, 16),
+};
+
+static const struct vop_win_phy px30_win0_data = {
+ .scl = &px30_win_scl,
+ .data_formats = formats_win_full,
+ .nformats = ARRAY_SIZE(formats_win_full),
+ .enable = VOP_REG(PX30_WIN0_CTRL0, 0x1, 0),
+ .format = VOP_REG(PX30_WIN0_CTRL0, 0x7, 1),
+ .rb_swap = VOP_REG(PX30_WIN0_CTRL0, 0x1, 12),
+ .act_info = VOP_REG(PX30_WIN0_ACT_INFO, 0xffffffff, 0),
+ .dsp_info = VOP_REG(PX30_WIN0_DSP_INFO, 0xffffffff, 0),
+ .dsp_st = VOP_REG(PX30_WIN0_DSP_ST, 0xffffffff, 0),
+ .yrgb_mst = VOP_REG(PX30_WIN0_YRGB_MST0, 0xffffffff, 0),
+ .uv_mst = VOP_REG(PX30_WIN0_CBR_MST0, 0xffffffff, 0),
+ .yrgb_vir = VOP_REG(PX30_WIN0_VIR, 0x1fff, 0),
+ .uv_vir = VOP_REG(PX30_WIN0_VIR, 0x1fff, 16),
+};
+
+static const struct vop_win_phy px30_win1_data = {
+ .data_formats = formats_win_lite,
+ .nformats = ARRAY_SIZE(formats_win_lite),
+ .enable = VOP_REG(PX30_WIN1_CTRL0, 0x1, 0),
+ .format = VOP_REG(PX30_WIN1_CTRL0, 0x7, 4),
+ .rb_swap = VOP_REG(PX30_WIN1_CTRL0, 0x1, 12),
+ .dsp_info = VOP_REG(PX30_WIN1_DSP_INFO, 0xffffffff, 0),
+ .dsp_st = VOP_REG(PX30_WIN1_DSP_ST, 0xffffffff, 0),
+ .yrgb_mst = VOP_REG(PX30_WIN1_MST, 0xffffffff, 0),
+ .yrgb_vir = VOP_REG(PX30_WIN1_VIR, 0x1fff, 0),
+};
+
+static const struct vop_win_phy px30_win2_data = {
+ .data_formats = formats_win_lite,
+ .nformats = ARRAY_SIZE(formats_win_lite),
+ .gate = VOP_REG(PX30_WIN2_CTRL0, 0x1, 4),
+ .enable = VOP_REG(PX30_WIN2_CTRL0, 0x1, 0),
+ .format = VOP_REG(PX30_WIN2_CTRL0, 0x3, 5),
+ .rb_swap = VOP_REG(PX30_WIN2_CTRL0, 0x1, 20),
+ .dsp_info = VOP_REG(PX30_WIN2_DSP_INFO0, 0x0fff0fff, 0),
+ .dsp_st = VOP_REG(PX30_WIN2_DSP_ST0, 0x1fff1fff, 0),
+ .yrgb_mst = VOP_REG(PX30_WIN2_MST0, 0xffffffff, 0),
+ .yrgb_vir = VOP_REG(PX30_WIN2_VIR0_1, 0x1fff, 0),
+};
+
+static const struct vop_win_data px30_vop_big_win_data[] = {
+ { .base = 0x00, .phy = &px30_win0_data,
+ .type = DRM_PLANE_TYPE_PRIMARY },
+ { .base = 0x00, .phy = &px30_win1_data,
+ .type = DRM_PLANE_TYPE_OVERLAY },
+ { .base = 0x00, .phy = &px30_win2_data,
+ .type = DRM_PLANE_TYPE_CURSOR },
+};
+
+static const struct vop_data px30_vop_big = {
+ .intr = &px30_intr,
+ .feature = VOP_FEATURE_INTERNAL_RGB,
+ .common = &px30_common,
+ .modeset = &px30_modeset,
+ .output = &px30_output,
+ .win = px30_vop_big_win_data,
+ .win_size = ARRAY_SIZE(px30_vop_big_win_data),
+};
+
+static const struct vop_win_data px30_vop_lit_win_data[] = {
+ { .base = 0x00, .phy = &px30_win1_data,
+ .type = DRM_PLANE_TYPE_PRIMARY },
+};
+
+static const struct vop_data px30_vop_lit = {
+ .intr = &px30_intr,
+ .feature = VOP_FEATURE_INTERNAL_RGB,
+ .common = &px30_common,
+ .modeset = &px30_modeset,
+ .output = &px30_output,
+ .win = px30_vop_lit_win_data,
+ .win_size = ARRAY_SIZE(px30_vop_lit_win_data),
+};
+
+static const struct vop_scl_regs rk3188_win_scl = {
+ .scale_yrgb_x = VOP_REG(RK3188_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
+ .scale_yrgb_y = VOP_REG(RK3188_WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
+ .scale_cbcr_x = VOP_REG(RK3188_WIN0_SCL_FACTOR_CBR, 0xffff, 0x0),
+ .scale_cbcr_y = VOP_REG(RK3188_WIN0_SCL_FACTOR_CBR, 0xffff, 16),
+};
+
+static const struct vop_win_phy rk3188_win0_data = {
+ .scl = &rk3188_win_scl,
+ .data_formats = formats_win_full,
+ .nformats = ARRAY_SIZE(formats_win_full),
+ .enable = VOP_REG(RK3188_SYS_CTRL, 0x1, 0),
+ .format = VOP_REG(RK3188_SYS_CTRL, 0x7, 3),
+ .rb_swap = VOP_REG(RK3188_SYS_CTRL, 0x1, 15),
+ .act_info = VOP_REG(RK3188_WIN0_ACT_INFO, 0x1fff1fff, 0),
+ .dsp_info = VOP_REG(RK3188_WIN0_DSP_INFO, 0x0fff0fff, 0),
+ .dsp_st = VOP_REG(RK3188_WIN0_DSP_ST, 0x1fff1fff, 0),
+ .yrgb_mst = VOP_REG(RK3188_WIN0_YRGB_MST0, 0xffffffff, 0),
+ .uv_mst = VOP_REG(RK3188_WIN0_CBR_MST0, 0xffffffff, 0),
+ .yrgb_vir = VOP_REG(RK3188_WIN_VIR, 0x1fff, 0),
+};
+
+static const struct vop_win_phy rk3188_win1_data = {
+ .data_formats = formats_win_lite,
+ .nformats = ARRAY_SIZE(formats_win_lite),
+ .enable = VOP_REG(RK3188_SYS_CTRL, 0x1, 1),
+ .format = VOP_REG(RK3188_SYS_CTRL, 0x7, 6),
+ .rb_swap = VOP_REG(RK3188_SYS_CTRL, 0x1, 19),
+ /* no act_info on window1 */
+ .dsp_info = VOP_REG(RK3188_WIN1_DSP_INFO, 0x07ff07ff, 0),
+ .dsp_st = VOP_REG(RK3188_WIN1_DSP_ST, 0x0fff0fff, 0),
+ .yrgb_mst = VOP_REG(RK3188_WIN1_MST, 0xffffffff, 0),
+ .yrgb_vir = VOP_REG(RK3188_WIN_VIR, 0x1fff, 16),
+};
+
+static const struct vop_modeset rk3188_modeset = {
+ .htotal_pw = VOP_REG(RK3188_DSP_HTOTAL_HS_END, 0x0fff0fff, 0),
+ .hact_st_end = VOP_REG(RK3188_DSP_HACT_ST_END, 0x0fff0fff, 0),
+ .vtotal_pw = VOP_REG(RK3188_DSP_VTOTAL_VS_END, 0x0fff0fff, 0),
+ .vact_st_end = VOP_REG(RK3188_DSP_VACT_ST_END, 0x0fff0fff, 0),
+};
+
+static const struct vop_output rk3188_output = {
+ .pin_pol = VOP_REG(RK3188_DSP_CTRL0, 0xf, 4),
+};
+
+static const struct vop_common rk3188_common = {
+ .gate_en = VOP_REG(RK3188_SYS_CTRL, 0x1, 31),
+ .standby = VOP_REG(RK3188_SYS_CTRL, 0x1, 30),
+ .out_mode = VOP_REG(RK3188_DSP_CTRL0, 0xf, 0),
+ .cfg_done = VOP_REG(RK3188_REG_CFG_DONE, 0x1, 0),
+ .dsp_blank = VOP_REG(RK3188_DSP_CTRL1, 0x3, 24),
+};
+
+static const struct vop_win_data rk3188_vop_win_data[] = {
+ { .base = 0x00, .phy = &rk3188_win0_data,
+ .type = DRM_PLANE_TYPE_PRIMARY },
+ { .base = 0x00, .phy = &rk3188_win1_data,
+ .type = DRM_PLANE_TYPE_CURSOR },
+};
+
+static const int rk3188_vop_intrs[] = {
+ 0,
+ FS_INTR,
+ LINE_FLAG_INTR,
+ BUS_ERROR_INTR,
+};
+
+static const struct vop_intr rk3188_vop_intr = {
+ .intrs = rk3188_vop_intrs,
+ .nintrs = ARRAY_SIZE(rk3188_vop_intrs),
+ .line_flag_num[0] = VOP_REG(RK3188_INT_STATUS, 0xfff, 12),
+ .status = VOP_REG(RK3188_INT_STATUS, 0xf, 0),
+ .enable = VOP_REG(RK3188_INT_STATUS, 0xf, 4),
+ .clear = VOP_REG(RK3188_INT_STATUS, 0xf, 8),
+};
+
+static const struct vop_data rk3188_vop = {
+ .intr = &rk3188_vop_intr,
+ .common = &rk3188_common,
+ .modeset = &rk3188_modeset,
+ .output = &rk3188_output,
+ .win = rk3188_vop_win_data,
+ .win_size = ARRAY_SIZE(rk3188_vop_win_data),
+ .feature = VOP_FEATURE_INTERNAL_RGB,
+};
+
static const struct vop_scl_extension rk3288_win_full_scl_ext = {
.cbcr_vsd_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 31),
.cbcr_vsu_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 30),
@@ -541,6 +750,12 @@ static const struct of_device_id vop_driver_dt_match[] = {
.data = &rk3036_vop },
{ .compatible = "rockchip,rk3126-vop",
.data = &rk3126_vop },
+ { .compatible = "rockchip,px30-vop-big",
+ .data = &px30_vop_big },
+ { .compatible = "rockchip,px30-vop-lit",
+ .data = &px30_vop_lit },
+ { .compatible = "rockchip,rk3188-vop",
+ .data = &rk3188_vop },
{ .compatible = "rockchip,rk3288-vop",
.data = &rk3288_vop },
{ .compatible = "rockchip,rk3368-vop",
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.h b/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
index f81b510ea99c..7348c68352ed 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
@@ -884,4 +884,103 @@
#define RK3126_WIN1_DSP_ST 0x54
/* rk3126 register definition end */
+/* px30 register definition */
+#define PX30_REG_CFG_DONE 0x00000
+#define PX30_VERSION 0x00004
+#define PX30_DSP_BG 0x00008
+#define PX30_MCU_CTRL 0x0000c
+#define PX30_SYS_CTRL0 0x00010
+#define PX30_SYS_CTRL1 0x00014
+#define PX30_SYS_CTRL2 0x00018
+#define PX30_DSP_CTRL0 0x00020
+#define PX30_DSP_CTRL2 0x00028
+#define PX30_VOP_STATUS 0x0002c
+#define PX30_LINE_FLAG 0x00030
+#define PX30_INTR_EN 0x00034
+#define PX30_INTR_CLEAR 0x00038
+#define PX30_INTR_STATUS 0x0003c
+#define PX30_WIN0_CTRL0 0x00050
+#define PX30_WIN0_CTRL1 0x00054
+#define PX30_WIN0_COLOR_KEY 0x00058
+#define PX30_WIN0_VIR 0x0005c
+#define PX30_WIN0_YRGB_MST0 0x00060
+#define PX30_WIN0_CBR_MST0 0x00064
+#define PX30_WIN0_ACT_INFO 0x00068
+#define PX30_WIN0_DSP_INFO 0x0006c
+#define PX30_WIN0_DSP_ST 0x00070
+#define PX30_WIN0_SCL_FACTOR_YRGB 0x00074
+#define PX30_WIN0_SCL_FACTOR_CBR 0x00078
+#define PX30_WIN0_SCL_OFFSET 0x0007c
+#define PX30_WIN0_ALPHA_CTRL 0x00080
+#define PX30_WIN1_CTRL0 0x00090
+#define PX30_WIN1_CTRL1 0x00094
+#define PX30_WIN1_VIR 0x00098
+#define PX30_WIN1_MST 0x000a0
+#define PX30_WIN1_DSP_INFO 0x000a4
+#define PX30_WIN1_DSP_ST 0x000a8
+#define PX30_WIN1_COLOR_KEY 0x000ac
+#define PX30_WIN1_ALPHA_CTRL 0x000bc
+#define PX30_HWC_CTRL0 0x000e0
+#define PX30_HWC_CTRL1 0x000e4
+#define PX30_HWC_MST 0x000e8
+#define PX30_HWC_DSP_ST 0x000ec
+#define PX30_HWC_ALPHA_CTRL 0x000f0
+#define PX30_DSP_HTOTAL_HS_END 0x00100
+#define PX30_DSP_HACT_ST_END 0x00104
+#define PX30_DSP_VTOTAL_VS_END 0x00108
+#define PX30_DSP_VACT_ST_END 0x0010c
+#define PX30_DSP_VS_ST_END_F1 0x00110
+#define PX30_DSP_VACT_ST_END_F1 0x00114
+#define PX30_BCSH_CTRL 0x00160
+#define PX30_BCSH_COL_BAR 0x00164
+#define PX30_BCSH_BCS 0x00168
+#define PX30_BCSH_H 0x0016c
+#define PX30_FRC_LOWER01_0 0x00170
+#define PX30_FRC_LOWER01_1 0x00174
+#define PX30_FRC_LOWER10_0 0x00178
+#define PX30_FRC_LOWER10_1 0x0017c
+#define PX30_FRC_LOWER11_0 0x00180
+#define PX30_FRC_LOWER11_1 0x00184
+#define PX30_MCU_RW_BYPASS_PORT 0x0018c
+#define PX30_WIN2_CTRL0 0x00190
+#define PX30_WIN2_CTRL1 0x00194
+#define PX30_WIN2_VIR0_1 0x00198
+#define PX30_WIN2_VIR2_3 0x0019c
+#define PX30_WIN2_MST0 0x001a0
+#define PX30_WIN2_DSP_INFO0 0x001a4
+#define PX30_WIN2_DSP_ST0 0x001a8
+#define PX30_WIN2_COLOR_KEY 0x001ac
+#define PX30_WIN2_ALPHA_CTRL 0x001bc
+#define PX30_BLANKING_VALUE 0x001f4
+#define PX30_FLAG_REG_FRM_VALID 0x001f8
+#define PX30_FLAG_REG 0x001fc
+#define PX30_HWC_LUT_ADDR 0x00600
+#define PX30_GAMMA_LUT_ADDR 0x00a00
+/* px30 register definition end */
+
+/* rk3188 register definition */
+#define RK3188_SYS_CTRL 0x00
+#define RK3188_DSP_CTRL0 0x04
+#define RK3188_DSP_CTRL1 0x08
+#define RK3188_INT_STATUS 0x10
+#define RK3188_WIN0_YRGB_MST0 0x20
+#define RK3188_WIN0_CBR_MST0 0x24
+#define RK3188_WIN0_YRGB_MST1 0x28
+#define RK3188_WIN0_CBR_MST1 0x2c
+#define RK3188_WIN_VIR 0x30
+#define RK3188_WIN0_ACT_INFO 0x34
+#define RK3188_WIN0_DSP_INFO 0x38
+#define RK3188_WIN0_DSP_ST 0x3c
+#define RK3188_WIN0_SCL_FACTOR_YRGB 0x40
+#define RK3188_WIN0_SCL_FACTOR_CBR 0x44
+#define RK3188_WIN1_MST 0x4c
+#define RK3188_WIN1_DSP_INFO 0x50
+#define RK3188_WIN1_DSP_ST 0x54
+#define RK3188_DSP_HTOTAL_HS_END 0x6c
+#define RK3188_DSP_HACT_ST_END 0x70
+#define RK3188_DSP_VTOTAL_VS_END 0x74
+#define RK3188_DSP_VACT_ST_END 0x78
+#define RK3188_REG_CFG_DONE 0x90
+/* rk3188 register definition end */
+
#endif /* _ROCKCHIP_VOP_REG_H */
diff --git a/drivers/gpu/drm/shmobile/Kconfig b/drivers/gpu/drm/shmobile/Kconfig
index 0426d66660d1..61bbe8e8bcc5 100644
--- a/drivers/gpu/drm/shmobile/Kconfig
+++ b/drivers/gpu/drm/shmobile/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config DRM_SHMOBILE
tristate "DRM Support for SH Mobile"
depends on DRM && ARM
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_backlight.c b/drivers/gpu/drm/shmobile/shmob_drm_backlight.c
index 33dd41afea0e..f6628a5ee95f 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_backlight.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_backlight.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* shmob_drm_backlight.c -- SH Mobile DRM Backlight
*
* Copyright (C) 2012 Renesas Electronics Corporation
*
* Laurent Pinchart ([email protected])
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/backlight.h>
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_backlight.h b/drivers/gpu/drm/shmobile/shmob_drm_backlight.h
index bac719ecc301..d9abb7a60be5 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_backlight.h
+++ b/drivers/gpu/drm/shmobile/shmob_drm_backlight.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* shmob_drm_backlight.h -- SH Mobile DRM Backlight
*
* Copyright (C) 2012 Renesas Electronics Corporation
*
* Laurent Pinchart ([email protected])
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __SHMOB_DRM_BACKLIGHT_H__
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index fc66167b0641..499b5fdb869f 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* shmob_drm_crtc.c -- SH Mobile DRM CRTCs
*
* Copyright (C) 2012 Renesas Electronics Corporation
*
* Laurent Pinchart ([email protected])
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/backlight.h>
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.h b/drivers/gpu/drm/shmobile/shmob_drm_crtc.h
index c11f421737dc..9ca6920641d8 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.h
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* shmob_drm_crtc.h -- SH Mobile DRM CRTCs
*
* Copyright (C) 2012 Renesas Electronics Corporation
*
* Laurent Pinchart ([email protected])
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __SHMOB_DRM_CRTC_H__
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index 592572554eb0..6ececad6f845 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* shmob_drm_drv.c -- SH Mobile DRM driver
*
* Copyright (C) 2012 Renesas Electronics Corporation
*
* Laurent Pinchart ([email protected])
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/clk.h>
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.h b/drivers/gpu/drm/shmobile/shmob_drm_drv.h
index 088a6e55fa29..80dc4b1020aa 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.h
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* shmob_drm.h -- SH Mobile DRM driver
*
* Copyright (C) 2012 Renesas Electronics Corporation
*
* Laurent Pinchart ([email protected])
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __SHMOB_DRM_DRV_H__
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_kms.c b/drivers/gpu/drm/shmobile/shmob_drm_kms.c
index 447638581c08..a17268444c6d 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_kms.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_kms.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* shmob_drm_kms.c -- SH Mobile DRM Mode Setting
*
* Copyright (C) 2012 Renesas Electronics Corporation
*
* Laurent Pinchart ([email protected])
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <drm/drmP.h>
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_kms.h b/drivers/gpu/drm/shmobile/shmob_drm_kms.h
index 753e2817dc2c..6ec2b732bb94 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_kms.h
+++ b/drivers/gpu/drm/shmobile/shmob_drm_kms.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* shmob_drm_kms.h -- SH Mobile DRM Mode Setting
*
* Copyright (C) 2012 Renesas Electronics Corporation
*
* Laurent Pinchart ([email protected])
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __SHMOB_DRM_KMS_H__
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_plane.c b/drivers/gpu/drm/shmobile/shmob_drm_plane.c
index 1d0359f713ca..1d1ee5e51351 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_plane.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_plane.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* shmob_drm_plane.c -- SH Mobile DRM Planes
*
* Copyright (C) 2012 Renesas Electronics Corporation
*
* Laurent Pinchart ([email protected])
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <drm/drmP.h>
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_plane.h b/drivers/gpu/drm/shmobile/shmob_drm_plane.h
index a58cc1fc3240..bae67cc8c628 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_plane.h
+++ b/drivers/gpu/drm/shmobile/shmob_drm_plane.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* shmob_drm_plane.h -- SH Mobile DRM Planes
*
* Copyright (C) 2012 Renesas Electronics Corporation
*
* Laurent Pinchart ([email protected])
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __SHMOB_DRM_PLANE_H__
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_regs.h b/drivers/gpu/drm/shmobile/shmob_drm_regs.h
index ea17d4415b9e..9eb0b3d01df8 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_regs.h
+++ b/drivers/gpu/drm/shmobile/shmob_drm_regs.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* shmob_drm_regs.h -- SH Mobile DRM registers
*
* Copyright (C) 2012 Renesas Electronics Corporation
*
* Laurent Pinchart ([email protected])
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __SHMOB_DRM_REGS_H__
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index 49438337f70d..19b9b5ed1297 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -721,7 +721,6 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data)
return 0;
err_sysfs:
- drm_bridge_remove(bridge);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 34cdc4644435..ccf718404a1c 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -1315,7 +1315,6 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
return 0;
err_sysfs:
- drm_bridge_remove(bridge);
hdmi->drm_connector = NULL;
return -EINVAL;
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index d7950b52a1fd..bf49c55b0f2c 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -34,6 +34,9 @@
struct sun4i_backend_quirks {
/* backend <-> TCON muxing selection done in backend */
bool needs_output_muxing;
+
+ /* alpha at the lowest z position is not always supported */
+ bool supports_lowest_plane_alpha;
};
static const u32 sunxi_rgb2yuv_coef[12] = {
@@ -60,32 +63,6 @@ static const u32 sunxi_bt601_yuv2rgb_coef[12] = {
0x000004a7, 0x00000812, 0x00000000, 0x00002eb1,
};
-static inline bool sun4i_backend_format_is_planar_yuv(uint32_t format)
-{
- switch (format) {
- case DRM_FORMAT_YUV411:
- case DRM_FORMAT_YUV422:
- case DRM_FORMAT_YUV444:
- return true;
- default:
- return false;
- }
-}
-
-static inline bool sun4i_backend_format_is_packed_yuv422(uint32_t format)
-{
- switch (format) {
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_YVYU:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_VYUY:
- return true;
-
- default:
- return false;
- }
-}
-
static void sun4i_backend_apply_color_correction(struct sunxi_engine *engine)
{
int i;
@@ -215,7 +192,8 @@ static int sun4i_backend_update_yuv_format(struct sun4i_backend *backend,
{
struct drm_plane_state *state = plane->state;
struct drm_framebuffer *fb = state->fb;
- uint32_t format = fb->format->format;
+ const struct drm_format_info *format = fb->format;
+ const uint32_t fmt = format->format;
u32 val = SUN4I_BACKEND_IYUVCTL_EN;
int i;
@@ -233,16 +211,16 @@ static int sun4i_backend_update_yuv_format(struct sun4i_backend *backend,
SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN);
/* TODO: Add support for the multi-planar YUV formats */
- if (sun4i_backend_format_is_packed_yuv422(format))
+ if (format->num_planes == 1)
val |= SUN4I_BACKEND_IYUVCTL_FBFMT_PACKED_YUV422;
else
- DRM_DEBUG_DRIVER("Unsupported YUV format (0x%x)\n", format);
+ DRM_DEBUG_DRIVER("Unsupported YUV format (0x%x)\n", fmt);
/*
* Allwinner seems to list the pixel sequence from right to left, while
* DRM lists it from left to right.
*/
- switch (format) {
+ switch (fmt) {
case DRM_FORMAT_YUYV:
val |= SUN4I_BACKEND_IYUVCTL_FBPS_VYUY;
break;
@@ -257,7 +235,7 @@ static int sun4i_backend_update_yuv_format(struct sun4i_backend *backend,
break;
default:
DRM_DEBUG_DRIVER("Unsupported YUV pixel sequence (0x%x)\n",
- format);
+ fmt);
}
regmap_write(backend->engine.regs, SUN4I_BACKEND_IYUVCTL_REG, val);
@@ -457,12 +435,14 @@ static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
struct drm_crtc_state *crtc_state)
{
struct drm_plane_state *plane_states[SUN4I_BACKEND_NUM_LAYERS] = { 0 };
+ struct sun4i_backend *backend = engine_to_sun4i_backend(engine);
struct drm_atomic_state *state = crtc_state->state;
struct drm_device *drm = state->dev;
struct drm_plane *plane;
unsigned int num_planes = 0;
unsigned int num_alpha_planes = 0;
unsigned int num_frontend_planes = 0;
+ unsigned int num_alpha_planes_max = 1;
unsigned int num_yuv_planes = 0;
unsigned int current_pipe = 0;
unsigned int i;
@@ -526,33 +506,40 @@ static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
* the layer with the highest priority.
*
* The second step is the actual alpha blending, that takes
- * the two pipes as input, and uses the eventual alpha
+ * the two pipes as input, and uses the potential alpha
* component to do the transparency between the two.
*
- * This two steps scenario makes us unable to guarantee a
+ * This two-step scenario makes us unable to guarantee a
* robust alpha blending between the 4 layers in all
* situations, since this means that we need to have one layer
* with alpha at the lowest position of our two pipes.
*
- * However, we cannot even do that, since the hardware has a
- * bug where the lowest plane of the lowest pipe (pipe 0,
- * priority 0), if it has any alpha, will discard the pixel
- * entirely and just display the pixels in the background
- * color (black by default).
+ * However, we cannot even do that on every platform, since
+ * the hardware has a bug where the lowest plane of the lowest
+ * pipe (pipe 0, priority 0), if it has any alpha, will
+ * discard the pixel data entirely and just display the pixels
+ * in the background color (black by default).
*
- * This means that we effectively have only three valid
- * configurations with alpha, all of them with the alpha being
- * on pipe1 with the lowest position, which can be 1, 2 or 3
- * depending on the number of planes and their zpos.
+ * This means that on the affected platforms, we effectively
+ * have only three valid configurations with alpha, all of
+ * them with the alpha being on pipe1 with the lowest
+ * position, which can be 1, 2 or 3 depending on the number of
+ * planes and their zpos.
*/
- if (num_alpha_planes > SUN4I_BACKEND_NUM_ALPHA_LAYERS) {
+
+ /* For platforms that are not affected by the issue described above. */
+ if (backend->quirks->supports_lowest_plane_alpha)
+ num_alpha_planes_max++;
+
+ if (num_alpha_planes > num_alpha_planes_max) {
DRM_DEBUG_DRIVER("Too many planes with alpha, rejecting...\n");
return -EINVAL;
}
/* We can't have an alpha plane at the lowest position */
- if (plane_states[0]->fb->format->has_alpha ||
- (plane_states[0]->alpha != DRM_BLEND_ALPHA_OPAQUE))
+ if (!backend->quirks->supports_lowest_plane_alpha &&
+ (plane_states[0]->fb->format->has_alpha ||
+ (plane_states[0]->alpha != DRM_BLEND_ALPHA_OPAQUE)))
return -EINVAL;
for (i = 1; i < num_planes; i++) {
@@ -876,6 +863,8 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
: SUN4I_BACKEND_MODCTL_OUT_LCD0));
}
+ backend->quirks = quirks;
+
return 0;
err_disable_ram_clk:
@@ -935,9 +924,11 @@ static const struct sun4i_backend_quirks sun6i_backend_quirks = {
static const struct sun4i_backend_quirks sun7i_backend_quirks = {
.needs_output_muxing = true,
+ .supports_lowest_plane_alpha = true,
};
static const struct sun4i_backend_quirks sun8i_a33_backend_quirks = {
+ .supports_lowest_plane_alpha = true,
};
static const struct sun4i_backend_quirks sun9i_backend_quirks = {
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.h b/drivers/gpu/drm/sun4i/sun4i_backend.h
index 4caee0392fa4..e3d4c6035eb2 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.h
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.h
@@ -167,7 +167,6 @@
#define SUN4I_BACKEND_PIPE_OFF(p) (0x5000 + (0x400 * (p)))
#define SUN4I_BACKEND_NUM_LAYERS 4
-#define SUN4I_BACKEND_NUM_ALPHA_LAYERS 1
#define SUN4I_BACKEND_NUM_FRONTEND_LAYERS 1
#define SUN4I_BACKEND_NUM_YUV_PLANES 1
@@ -187,6 +186,8 @@ struct sun4i_backend {
/* Protects against races in the frontend teardown */
spinlock_t frontend_lock;
bool frontend_teardown;
+
+ const struct sun4i_backend_quirks *quirks;
};
static inline struct sun4i_backend *
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index dd19d674055c..1e41c3f5fd6d 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -61,22 +61,6 @@ static struct drm_driver sun4i_drv_driver = {
/* Frame Buffer Operations */
};
-static void sun4i_remove_framebuffers(void)
-{
- struct apertures_struct *ap;
-
- ap = alloc_apertures(1);
- if (!ap)
- return;
-
- /* The framebuffer can be located anywhere in RAM */
- ap->ranges[0].base = 0;
- ap->ranges[0].size = ~0;
-
- drm_fb_helper_remove_conflicting_framebuffers(ap, "sun4i-drm-fb", false);
- kfree(ap);
-}
-
static int sun4i_drv_bind(struct device *dev)
{
struct drm_device *drm;
@@ -119,7 +103,7 @@ static int sun4i_drv_bind(struct device *dev)
drm->irq_enabled = true;
/* Remove early framebuffers (ie. simplefb) */
- sun4i_remove_framebuffers();
+ drm_fb_helper_remove_conflicting_framebuffers(NULL, "sun4i-drm-fb", false);
/* Create our framebuffer */
ret = sun4i_framebuffer_init(drm);
@@ -421,6 +405,7 @@ static const struct of_device_id sun4i_drv_of_table[] = {
{ .compatible = "allwinner,sun8i-r40-display-engine" },
{ .compatible = "allwinner,sun8i-v3s-display-engine" },
{ .compatible = "allwinner,sun9i-a80-display-engine" },
+ { .compatible = "allwinner,sun50i-a64-display-engine" },
{ }
};
MODULE_DEVICE_TABLE(of, sun4i_drv_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun4i_layer.c b/drivers/gpu/drm/sun4i/sun4i_layer.c
index 750ad24de1d7..78f77af8805a 100644
--- a/drivers/gpu/drm/sun4i/sun4i_layer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_layer.c
@@ -35,9 +35,7 @@ static void sun4i_backend_layer_reset(struct drm_plane *plane)
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (state) {
- plane->state = &state->state;
- plane->state->plane = plane;
- plane->state->alpha = DRM_BLEND_ALPHA_OPAQUE;
+ __drm_atomic_helper_plane_reset(plane, &state->state);
plane->state->zpos = layer->id;
}
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 3fb084f802e2..c78cd35a1294 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -12,11 +12,13 @@
#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_connector.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_encoder.h>
#include <drm/drm_modes.h>
#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
#include <uapi/drm/drm_mode.h>
@@ -35,6 +37,7 @@
#include "sun4i_rgb.h"
#include "sun4i_tcon.h"
#include "sun6i_mipi_dsi.h"
+#include "sun8i_tcon_top.h"
#include "sunxi_engine.h"
static struct drm_connector *sun4i_tcon_get_connector(const struct drm_encoder *encoder)
@@ -275,10 +278,64 @@ static void sun4i_tcon0_mode_set_common(struct sun4i_tcon *tcon,
SUN4I_TCON0_BASIC0_Y(mode->crtc_vdisplay));
}
+static void sun4i_tcon0_mode_set_dithering(struct sun4i_tcon *tcon,
+ const struct drm_connector *connector)
+{
+ u32 bus_format = 0;
+ u32 val = 0;
+
+ /* XXX Would this ever happen? */
+ if (!connector)
+ return;
+
+ /*
+ * FIXME: Undocumented bits
+ *
+ * The whole dithering process and these parameters are not
+ * explained in the vendor documents or BSP kernel code.
+ */
+ regmap_write(tcon->regs, SUN4I_TCON0_FRM_SEED_PR_REG, 0x11111111);
+ regmap_write(tcon->regs, SUN4I_TCON0_FRM_SEED_PG_REG, 0x11111111);
+ regmap_write(tcon->regs, SUN4I_TCON0_FRM_SEED_PB_REG, 0x11111111);
+ regmap_write(tcon->regs, SUN4I_TCON0_FRM_SEED_LR_REG, 0x11111111);
+ regmap_write(tcon->regs, SUN4I_TCON0_FRM_SEED_LG_REG, 0x11111111);
+ regmap_write(tcon->regs, SUN4I_TCON0_FRM_SEED_LB_REG, 0x11111111);
+ regmap_write(tcon->regs, SUN4I_TCON0_FRM_TBL0_REG, 0x01010000);
+ regmap_write(tcon->regs, SUN4I_TCON0_FRM_TBL1_REG, 0x15151111);
+ regmap_write(tcon->regs, SUN4I_TCON0_FRM_TBL2_REG, 0x57575555);
+ regmap_write(tcon->regs, SUN4I_TCON0_FRM_TBL3_REG, 0x7f7f7777);
+
+ /* Do dithering if panel only supports 6 bits per color */
+ if (connector->display_info.bpc == 6)
+ val |= SUN4I_TCON0_FRM_CTL_EN;
+
+ if (connector->display_info.num_bus_formats == 1)
+ bus_format = connector->display_info.bus_formats[0];
+
+ /* Check the connection format */
+ switch (bus_format) {
+ case MEDIA_BUS_FMT_RGB565_1X16:
+ /* R and B components are only 5 bits deep */
+ val |= SUN4I_TCON0_FRM_CTL_MODE_R;
+ val |= SUN4I_TCON0_FRM_CTL_MODE_B;
+ case MEDIA_BUS_FMT_RGB666_1X18:
+ case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
+ /* Fall through: enable dithering */
+ val |= SUN4I_TCON0_FRM_CTL_EN;
+ break;
+ }
+
+ /* Write dithering settings */
+ regmap_write(tcon->regs, SUN4I_TCON_FRM_CTL_REG, val);
+}
+
static void sun4i_tcon0_mode_set_cpu(struct sun4i_tcon *tcon,
- struct mipi_dsi_device *device,
+ const struct drm_encoder *encoder,
const struct drm_display_mode *mode)
{
+ /* TODO support normal CPU interface modes */
+ struct sun6i_dsi *dsi = encoder_to_sun6i_dsi(encoder);
+ struct mipi_dsi_device *device = dsi->device;
u8 bpp = mipi_dsi_pixel_format_to_bpp(device->format);
u8 lanes = device->lanes;
u32 block_space, start_delay;
@@ -289,6 +346,9 @@ static void sun4i_tcon0_mode_set_cpu(struct sun4i_tcon *tcon,
sun4i_tcon0_mode_set_common(tcon, mode);
+ /* Set dithering if needed */
+ sun4i_tcon0_mode_set_dithering(tcon, sun4i_tcon_get_connector(encoder));
+
regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG,
SUN4I_TCON0_CTL_IF_MASK,
SUN4I_TCON0_CTL_IF_8080);
@@ -354,6 +414,9 @@ static void sun4i_tcon0_mode_set_lvds(struct sun4i_tcon *tcon,
tcon->dclk_max_div = 7;
sun4i_tcon0_mode_set_common(tcon, mode);
+ /* Set dithering if needed */
+ sun4i_tcon0_mode_set_dithering(tcon, sun4i_tcon_get_connector(encoder));
+
/* Adjust clock delay */
clk_delay = sun4i_tcon_get_clk_delay(mode, 0);
regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG,
@@ -427,6 +490,9 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
tcon->dclk_max_div = 127;
sun4i_tcon0_mode_set_common(tcon, mode);
+ /* Set dithering if needed */
+ sun4i_tcon0_mode_set_dithering(tcon, tcon->panel->connector);
+
/* Adjust clock delay */
clk_delay = sun4i_tcon_get_clk_delay(mode, 0);
regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG,
@@ -474,6 +540,33 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE;
+ /*
+ * On A20 and similar SoCs, the only way to achieve Positive Edge
+ * (Rising Edge), is setting dclk clock phase to 2/3(240°).
+ * By default TCON works in Negative Edge(Falling Edge),
+ * this is why phase is set to 0 in that case.
+ * Unfortunately there's no way to logically invert dclk through
+ * IO_POL register.
+ * The only acceptable way to work, triple checked with scope,
+ * is using clock phase set to 0° for Negative Edge and set to 240°
+ * for Positive Edge.
+ * On A33 and similar SoCs there would be a 90° phase option,
+ * but it divides also dclk by 2.
+ * Following code is a way to avoid quirks all around TCON
+ * and DOTCLOCK drivers.
+ */
+ if (!IS_ERR(tcon->panel)) {
+ struct drm_panel *panel = tcon->panel;
+ struct drm_connector *connector = panel->connector;
+ struct drm_display_info display_info = connector->display_info;
+
+ if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE)
+ clk_set_phase(tcon->dclk, 240);
+
+ if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
+ clk_set_phase(tcon->dclk, 0);
+ }
+
regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG,
SUN4I_TCON0_IO_POL_HSYNC_POSITIVE | SUN4I_TCON0_IO_POL_VSYNC_POSITIVE,
val);
@@ -581,16 +674,10 @@ void sun4i_tcon_mode_set(struct sun4i_tcon *tcon,
const struct drm_encoder *encoder,
const struct drm_display_mode *mode)
{
- struct sun6i_dsi *dsi;
-
switch (encoder->encoder_type) {
case DRM_MODE_ENCODER_DSI:
- /*
- * This is not really elegant, but it's the "cleaner"
- * way I could think of...
- */
- dsi = encoder_to_sun6i_dsi(encoder);
- sun4i_tcon0_mode_set_cpu(tcon, dsi->device, mode);
+ /* DSI is tied to special case of CPU interface */
+ sun4i_tcon0_mode_set_cpu(tcon, encoder, mode);
break;
case DRM_MODE_ENCODER_LVDS:
sun4i_tcon0_mode_set_lvds(tcon, encoder, mode);
@@ -880,6 +967,37 @@ static struct sunxi_engine *sun4i_tcon_get_engine_by_id(struct sun4i_drv *drv,
return ERR_PTR(-EINVAL);
}
+static bool sun4i_tcon_connected_to_tcon_top(struct device_node *node)
+{
+ struct device_node *remote;
+ bool ret = false;
+
+ remote = of_graph_get_remote_node(node, 0, -1);
+ if (remote) {
+ ret = !!(IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP) &&
+ of_match_node(sun8i_tcon_top_of_table, remote));
+ of_node_put(remote);
+ }
+
+ return ret;
+}
+
+static int sun4i_tcon_get_index(struct sun4i_drv *drv)
+{
+ struct list_head *pos;
+ int size = 0;
+
+ /*
+ * Because TCON is added to the list at the end of the probe
+ * (after this function is called), index of the current TCON
+ * will be same as current TCON list size.
+ */
+ list_for_each(pos, &drv->tcon_list)
+ ++size;
+
+ return size;
+}
+
/*
* On SoCs with the old display pipeline design (Display Engine 1.0),
* we assumed the TCON was always tied to just one backend. However
@@ -928,8 +1046,24 @@ static struct sunxi_engine *sun4i_tcon_find_engine(struct sun4i_drv *drv,
* connections between the backend and TCON?
*/
if (of_get_child_count(port) > 1) {
- /* Get our ID directly from an upstream endpoint */
- int id = sun4i_tcon_of_get_id_from_port(port);
+ int id;
+
+ /*
+ * When pipeline has the same number of TCONs and engines which
+ * are represented by frontends/backends (DE1) or mixers (DE2),
+ * we match them by their respective IDs. However, if pipeline
+ * contains TCON TOP, chances are that there are either more
+ * TCONs than engines (R40) or TCONs with non-consecutive ids.
+ * (H6). In that case it's easier just use TCON index in list
+ * as an id. That means that on R40, any 2 TCONs can be enabled
+ * in DT out of 4 (there are 2 mixers). Due to the design of
+ * TCON TOP, remaining 2 TCONs can't be connected to anything
+ * anyway.
+ */
+ if (sun4i_tcon_connected_to_tcon_top(node))
+ id = sun4i_tcon_get_index(drv);
+ else
+ id = sun4i_tcon_of_get_id_from_port(port);
/* Get our engine by matching our ID */
engine = sun4i_tcon_get_engine_by_id(drv, id);
@@ -1244,6 +1378,47 @@ static int sun6i_tcon_set_mux(struct sun4i_tcon *tcon,
return 0;
}
+static int sun8i_r40_tcon_tv_set_mux(struct sun4i_tcon *tcon,
+ const struct drm_encoder *encoder)
+{
+ struct device_node *port, *remote;
+ struct platform_device *pdev;
+ int id, ret;
+
+ /* find TCON TOP platform device and TCON id */
+
+ port = of_graph_get_port_by_id(tcon->dev->of_node, 0);
+ if (!port)
+ return -EINVAL;
+
+ id = sun4i_tcon_of_get_id_from_port(port);
+ of_node_put(port);
+
+ remote = of_graph_get_remote_node(tcon->dev->of_node, 0, -1);
+ if (!remote)
+ return -EINVAL;
+
+ pdev = of_find_device_by_node(remote);
+ of_node_put(remote);
+ if (!pdev)
+ return -EINVAL;
+
+ if (IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP) &&
+ encoder->encoder_type == DRM_MODE_ENCODER_TMDS) {
+ ret = sun8i_tcon_top_set_hdmi_src(&pdev->dev, id);
+ if (ret)
+ return ret;
+ }
+
+ if (IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP)) {
+ ret = sun8i_tcon_top_de_config(&pdev->dev, tcon->id, id);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static const struct sun4i_tcon_quirks sun4i_a10_quirks = {
.has_channel_0 = true,
.has_channel_1 = true,
@@ -1291,6 +1466,11 @@ static const struct sun4i_tcon_quirks sun8i_a83t_tv_quirks = {
.has_channel_1 = true,
};
+static const struct sun4i_tcon_quirks sun8i_r40_tv_quirks = {
+ .has_channel_1 = true,
+ .set_mux = sun8i_r40_tcon_tv_set_mux,
+};
+
static const struct sun4i_tcon_quirks sun8i_v3s_quirks = {
.has_channel_0 = true,
};
@@ -1315,6 +1495,7 @@ const struct of_device_id sun4i_tcon_of_table[] = {
{ .compatible = "allwinner,sun8i-a33-tcon", .data = &sun8i_a33_quirks },
{ .compatible = "allwinner,sun8i-a83t-tcon-lcd", .data = &sun8i_a83t_lcd_quirks },
{ .compatible = "allwinner,sun8i-a83t-tcon-tv", .data = &sun8i_a83t_tv_quirks },
+ { .compatible = "allwinner,sun8i-r40-tcon-tv", .data = &sun8i_r40_tv_quirks },
{ .compatible = "allwinner,sun8i-v3s-tcon", .data = &sun8i_v3s_quirks },
{ .compatible = "allwinner,sun9i-a80-tcon-lcd", .data = &sun9i_a80_tcon_lcd_quirks },
{ .compatible = "allwinner,sun9i-a80-tcon-tv", .data = &sun9i_a80_tcon_tv_quirks },
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index f6a071cd5a6f..3d492c8be1fc 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -37,18 +37,21 @@
#define SUN4I_TCON_GINT1_REG 0x8
#define SUN4I_TCON_FRM_CTL_REG 0x10
-#define SUN4I_TCON_FRM_CTL_EN BIT(31)
-
-#define SUN4I_TCON_FRM_SEED_PR_REG 0x14
-#define SUN4I_TCON_FRM_SEED_PG_REG 0x18
-#define SUN4I_TCON_FRM_SEED_PB_REG 0x1c
-#define SUN4I_TCON_FRM_SEED_LR_REG 0x20
-#define SUN4I_TCON_FRM_SEED_LG_REG 0x24
-#define SUN4I_TCON_FRM_SEED_LB_REG 0x28
-#define SUN4I_TCON_FRM_TBL0_REG 0x2c
-#define SUN4I_TCON_FRM_TBL1_REG 0x30
-#define SUN4I_TCON_FRM_TBL2_REG 0x34
-#define SUN4I_TCON_FRM_TBL3_REG 0x38
+#define SUN4I_TCON0_FRM_CTL_EN BIT(31)
+#define SUN4I_TCON0_FRM_CTL_MODE_R BIT(6)
+#define SUN4I_TCON0_FRM_CTL_MODE_G BIT(5)
+#define SUN4I_TCON0_FRM_CTL_MODE_B BIT(4)
+
+#define SUN4I_TCON0_FRM_SEED_PR_REG 0x14
+#define SUN4I_TCON0_FRM_SEED_PG_REG 0x18
+#define SUN4I_TCON0_FRM_SEED_PB_REG 0x1c
+#define SUN4I_TCON0_FRM_SEED_LR_REG 0x20
+#define SUN4I_TCON0_FRM_SEED_LG_REG 0x24
+#define SUN4I_TCON0_FRM_SEED_LB_REG 0x28
+#define SUN4I_TCON0_FRM_TBL0_REG 0x2c
+#define SUN4I_TCON0_FRM_TBL1_REG 0x30
+#define SUN4I_TCON0_FRM_TBL2_REG 0x34
+#define SUN4I_TCON0_FRM_TBL3_REG 0x38
#define SUN4I_TCON0_CTL_REG 0x40
#define SUN4I_TCON0_CTL_TCON_ENABLE BIT(31)
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
index 31875b636434..ed2983770e9c 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
@@ -125,10 +125,22 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
return PTR_ERR(hdmi->clk_tmds);
}
+ hdmi->regulator = devm_regulator_get(dev, "hvcc");
+ if (IS_ERR(hdmi->regulator)) {
+ dev_err(dev, "Couldn't get regulator\n");
+ return PTR_ERR(hdmi->regulator);
+ }
+
+ ret = regulator_enable(hdmi->regulator);
+ if (ret) {
+ dev_err(dev, "Failed to enable regulator\n");
+ return ret;
+ }
+
ret = reset_control_deassert(hdmi->rst_ctrl);
if (ret) {
dev_err(dev, "Could not deassert ctrl reset control\n");
- return ret;
+ goto err_disable_regulator;
}
ret = clk_prepare_enable(hdmi->clk_tmds);
@@ -183,6 +195,8 @@ err_disable_clk_tmds:
clk_disable_unprepare(hdmi->clk_tmds);
err_assert_ctrl_reset:
reset_control_assert(hdmi->rst_ctrl);
+err_disable_regulator:
+ regulator_disable(hdmi->regulator);
return ret;
}
@@ -196,6 +210,7 @@ static void sun8i_dw_hdmi_unbind(struct device *dev, struct device *master,
sun8i_hdmi_phy_remove(hdmi);
clk_disable_unprepare(hdmi->clk_tmds);
reset_control_assert(hdmi->rst_ctrl);
+ regulator_disable(hdmi->regulator);
}
static const struct component_ops sun8i_dw_hdmi_ops = {
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
index aadbe0a10b0c..7fdc1ecd2892 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
@@ -10,6 +10,7 @@
#include <drm/drm_encoder.h>
#include <linux/clk.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#define SUN8I_HDMI_PHY_DBG_CTRL_REG 0x0000
@@ -176,6 +177,7 @@ struct sun8i_dw_hdmi {
struct drm_encoder encoder;
struct sun8i_hdmi_phy *phy;
struct dw_hdmi_plat_data plat_data;
+ struct regulator *regulator;
struct reset_control *rst_ctrl;
};
diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
index 82502b351aec..856124806353 100644
--- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
+++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
@@ -417,6 +417,14 @@ static const struct sun8i_hdmi_phy_variant sun8i_h3_hdmi_phy = {
.phy_config = &sun8i_hdmi_phy_config_h3,
};
+static const struct sun8i_hdmi_phy_variant sun8i_r40_hdmi_phy = {
+ .has_phy_clk = true,
+ .has_second_pll = true,
+ .phy_init = &sun8i_hdmi_phy_init_h3,
+ .phy_disable = &sun8i_hdmi_phy_disable_h3,
+ .phy_config = &sun8i_hdmi_phy_config_h3,
+};
+
static const struct of_device_id sun8i_hdmi_phy_of_table[] = {
{
.compatible = "allwinner,sun50i-a64-hdmi-phy",
@@ -430,6 +438,10 @@ static const struct of_device_id sun8i_hdmi_phy_of_table[] = {
.compatible = "allwinner,sun8i-h3-hdmi-phy",
.data = &sun8i_h3_hdmi_phy,
},
+ {
+ .compatible = "allwinner,sun8i-r40-hdmi-phy",
+ .data = &sun8i_r40_hdmi_phy,
+ },
{ /* sentinel */ }
};
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
index fc3713608f78..8b3d02b146b7 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
@@ -569,6 +569,22 @@ static const struct sun8i_mixer_cfg sun8i_v3s_mixer_cfg = {
.mod_rate = 150000000,
};
+static const struct sun8i_mixer_cfg sun50i_a64_mixer0_cfg = {
+ .ccsc = 0,
+ .mod_rate = 297000000,
+ .scaler_mask = 0xf,
+ .ui_num = 3,
+ .vi_num = 1,
+};
+
+static const struct sun8i_mixer_cfg sun50i_a64_mixer1_cfg = {
+ .ccsc = 1,
+ .mod_rate = 297000000,
+ .scaler_mask = 0x3,
+ .ui_num = 1,
+ .vi_num = 1,
+};
+
static const struct of_device_id sun8i_mixer_of_table[] = {
{
.compatible = "allwinner,sun8i-a83t-de2-mixer-0",
@@ -594,6 +610,14 @@ static const struct of_device_id sun8i_mixer_of_table[] = {
.compatible = "allwinner,sun8i-v3s-de2-mixer",
.data = &sun8i_v3s_mixer_cfg,
},
+ {
+ .compatible = "allwinner,sun50i-a64-de2-mixer-0",
+ .data = &sun50i_a64_mixer0_cfg,
+ },
+ {
+ .compatible = "allwinner,sun50i-a64-de2-mixer-1",
+ .data = &sun50i_a64_mixer1_cfg,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, sun8i_mixer_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
index 55fe398d8290..3040a79f298f 100644
--- a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
+++ b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
@@ -129,8 +129,7 @@ static int sun8i_tcon_top_bind(struct device *dev, struct device *master,
if (!tcon_top)
return -ENOMEM;
- clk_data = devm_kzalloc(dev, sizeof(*clk_data) +
- sizeof(*clk_data->hws) * CLK_NUM,
+ clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, CLK_NUM),
GFP_KERNEL);
if (!clk_data)
return -ENOMEM;
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index a2bd5876c633..b424bc911b95 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -1187,6 +1187,10 @@ static int host1x_drm_probe(struct host1x_device *dev)
dev_set_drvdata(&dev->dev, drm);
+ err = drm_fb_helper_remove_conflicting_framebuffers(NULL, "tegradrmfb", false);
+ if (err < 0)
+ goto unref;
+
err = drm_dev_register(drm, 0);
if (err < 0)
goto unref;
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c b/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
index 19c7f70adfa5..255341ee4eb9 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
@@ -135,7 +135,7 @@ static int tinydrm_init(struct device *parent, struct tinydrm_device *tdev,
/*
* We don't embed drm_device, because that prevent us from using
* devm_kzalloc() to allocate tinydrm_device in the driver since
- * drm_dev_unref() frees the structure. The devm_ functions provide
+ * drm_dev_put() frees the structure. The devm_ functions provide
* for easy error handling.
*/
drm = drm_dev_alloc(driver, parent);
@@ -155,7 +155,7 @@ static void tinydrm_fini(struct tinydrm_device *tdev)
drm_mode_config_cleanup(tdev->drm);
mutex_destroy(&tdev->dirty_lock);
tdev->drm->dev_private = NULL;
- drm_dev_unref(tdev->drm);
+ drm_dev_put(tdev->drm);
}
static void devm_tinydrm_release(void *data)
@@ -172,7 +172,7 @@ static void devm_tinydrm_release(void *data)
*
* This function initializes @tdev, the underlying DRM device and it's
* mode_config. Resources will be automatically freed on driver detach (devres)
- * using drm_mode_config_cleanup() and drm_dev_unref().
+ * using drm_mode_config_cleanup() and drm_dev_put().
*
* Returns:
* Zero on success, negative error code on failure.
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 5ce24098a5fd..70c54774400b 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -521,12 +521,12 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
kref_init(&exec->refcount);
ret = drm_syncobj_find_fence(file_priv, args->in_sync_bcl,
- &exec->bin.in_fence);
+ 0, &exec->bin.in_fence);
if (ret == -EINVAL)
goto fail;
ret = drm_syncobj_find_fence(file_priv, args->in_sync_rcl,
- &exec->render.in_fence);
+ 0, &exec->render.in_fence);
if (ret == -EINVAL)
goto fail;
@@ -584,7 +584,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
/* Update the return sync object for the */
sync_out = drm_syncobj_find(file_priv, args->out_sync);
if (sync_out) {
- drm_syncobj_replace_fence(sync_out,
+ drm_syncobj_replace_fence(sync_out, 0,
&exec->render.base.s_fence->finished);
drm_syncobj_put(sync_out);
}
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 0e6a121858d1..3ce136ba8791 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -35,6 +35,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_atomic_uapi.h>
#include <linux/clk.h>
#include <drm/drm_fb_cma_helper.h>
#include <linux/component.h>
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 04270a14fcaa..e2a15c63a81f 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -248,24 +248,6 @@ static void vc4_match_add_drivers(struct device *dev,
}
}
-static void vc4_kick_out_firmware_fb(void)
-{
- struct apertures_struct *ap;
-
- ap = alloc_apertures(1);
- if (!ap)
- return;
-
- /* Since VC4 is a UMA device, the simplefb node may have been
- * located anywhere in memory.
- */
- ap->ranges[0].base = 0;
- ap->ranges[0].size = ~0;
-
- drm_fb_helper_remove_conflicting_framebuffers(ap, "vc4drmfb", false);
- kfree(ap);
-}
-
static int vc4_drm_bind(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -298,7 +280,7 @@ static int vc4_drm_bind(struct device *dev)
if (ret)
goto gem_destroy;
- vc4_kick_out_firmware_fb();
+ drm_fb_helper_remove_conflicting_framebuffers(NULL, "vc4drmfb", false);
ret = drm_dev_register(drm, 0);
if (ret < 0)
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 7910b9acedd6..5b22e996af6c 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -681,7 +681,7 @@ vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec,
exec->fence = &fence->base;
if (out_sync)
- drm_syncobj_replace_fence(out_sync, exec->fence);
+ drm_syncobj_replace_fence(out_sync, 0, exec->fence);
vc4_update_bo_seqnos(exec, seqno);
@@ -1173,7 +1173,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
if (args->in_sync) {
ret = drm_syncobj_find_fence(file_priv, args->in_sync,
- &in_fence);
+ 0, &in_fence);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index cfb50fedfa2b..f39ee212412d 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -22,6 +22,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_atomic_uapi.h>
#include "uapi/drm/vc4_drm.h"
#include "vc4_drv.h"
@@ -200,9 +201,7 @@ static void vc4_plane_reset(struct drm_plane *plane)
if (!vc4_state)
return;
- plane->state = &vc4_state->base;
- plane->state->alpha = DRM_BLEND_ALPHA_OPAQUE;
- vc4_state->base.plane = plane;
+ __drm_atomic_helper_plane_reset(plane, &vc4_state->base);
}
static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val)
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 0e5620f76ee0..ec6af8b920da 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -504,7 +504,7 @@ out_free:
static void __exit vgem_exit(void)
{
drm_dev_unregister(&vgem_device->drm);
- drm_dev_unref(&vgem_device->drm);
+ drm_dev_put(&vgem_device->drm);
}
module_init(vgem_init);
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index b28876c222b4..e6ee71323a66 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -43,16 +43,6 @@ static const char *vgem_fence_get_timeline_name(struct dma_fence *fence)
return "unbound";
}
-static bool vgem_fence_signaled(struct dma_fence *fence)
-{
- return false;
-}
-
-static bool vgem_fence_enable_signaling(struct dma_fence *fence)
-{
- return true;
-}
-
static void vgem_fence_release(struct dma_fence *base)
{
struct vgem_fence *fence = container_of(base, typeof(*fence), base);
@@ -76,9 +66,6 @@ static void vgem_fence_timeline_value_str(struct dma_fence *fence, char *str,
static const struct dma_fence_ops vgem_fence_ops = {
.get_driver_name = vgem_fence_get_driver_name,
.get_timeline_name = vgem_fence_get_timeline_name,
- .enable_signaling = vgem_fence_enable_signaling,
- .signaled = vgem_fence_signaled,
- .wait = dma_fence_default_wait,
.release = vgem_fence_release,
.fence_value_str = vgem_fence_value_str,
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 25503b933599..0379d6897659 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -75,12 +75,9 @@ virtio_gpu_framebuffer_init(struct drm_device *dev,
struct drm_gem_object *obj)
{
int ret;
- struct virtio_gpu_object *bo;
vgfb->base.obj[0] = obj;
- bo = gem_to_virtio_gpu_obj(obj);
-
drm_helper_mode_fill_fb_struct(dev, &vgfb->base, mode_cmd);
ret = drm_framebuffer_init(dev, &vgfb->base, &virtio_gpu_fb_funcs);
@@ -109,6 +106,9 @@ static void virtio_gpu_crtc_mode_set_nofb(struct drm_crtc *crtc)
static void virtio_gpu_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
+ struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
+
+ output->enabled = true;
}
static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc,
@@ -119,6 +119,7 @@ static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc,
struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
virtio_gpu_cmd_set_scanout(vgdev, output->index, 0, 0, 0, 0, 0);
+ output->enabled = false;
}
static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
index 7df8d0c9026a..757ca28ab93e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
@@ -28,26 +28,6 @@
#include "virtgpu_drv.h"
-static void virtio_pci_kick_out_firmware_fb(struct pci_dev *pci_dev)
-{
- struct apertures_struct *ap;
- bool primary;
-
- ap = alloc_apertures(1);
- if (!ap)
- return;
-
- ap->ranges[0].base = pci_resource_start(pci_dev, 0);
- ap->ranges[0].size = pci_resource_len(pci_dev, 0);
-
- primary = pci_dev->resource[PCI_ROM_RESOURCE].flags
- & IORESOURCE_ROM_SHADOW;
-
- drm_fb_helper_remove_conflicting_framebuffers(ap, "virtiodrmfb", primary);
-
- kfree(ap);
-}
-
int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev)
{
struct drm_device *dev;
@@ -69,7 +49,9 @@ int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev)
pname);
dev->pdev = pdev;
if (vga)
- virtio_pci_kick_out_firmware_fb(pdev);
+ drm_fb_helper_remove_conflicting_pci_framebuffers(pdev,
+ 0,
+ "virtiodrmfb");
snprintf(unique, sizeof(unique), "pci:%s", pname);
ret = drm_dev_set_unique(dev, unique);
@@ -85,6 +67,6 @@ int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev)
return 0;
err_free:
- drm_dev_unref(dev);
+ drm_dev_put(dev);
return ret;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 65605e207bbe..a2d79e18bda7 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -36,6 +36,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_encoder.h>
+#include <drm/drm_fb_helper.h>
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
@@ -57,6 +58,7 @@ struct virtio_gpu_object {
uint32_t hw_res_handle;
struct sg_table *pages;
+ uint32_t mapped;
void *vmap;
bool dumb;
struct ttm_place placement_code;
@@ -114,6 +116,7 @@ struct virtio_gpu_output {
struct virtio_gpu_update_cursor cursor;
int cur_x;
int cur_y;
+ bool enabled;
};
#define drm_crtc_to_virtio_gpu_output(x) \
container_of(x, struct virtio_gpu_output, crtc)
@@ -131,6 +134,13 @@ struct virtio_gpu_framebuffer {
#define to_virtio_gpu_framebuffer(x) \
container_of(x, struct virtio_gpu_framebuffer, base)
+struct virtio_gpu_fbdev {
+ struct drm_fb_helper helper;
+ struct virtio_gpu_framebuffer vgfb;
+ struct virtio_gpu_device *vgdev;
+ struct delayed_work work;
+};
+
struct virtio_gpu_mman {
struct ttm_bo_global_ref bo_global_ref;
struct drm_global_reference mem_global_ref;
@@ -276,13 +286,13 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj,
uint32_t resource_id,
struct virtio_gpu_fence **fence);
+void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *obj);
int virtio_gpu_attach_status_page(struct virtio_gpu_device *vgdev);
int virtio_gpu_detach_status_page(struct virtio_gpu_device *vgdev);
void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
struct virtio_gpu_output *output);
int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev);
-void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
- uint32_t resource_id);
int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx);
int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
int idx, int version,
@@ -372,7 +382,7 @@ int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,
static inline struct virtio_gpu_object*
virtio_gpu_object_ref(struct virtio_gpu_object *bo)
{
- ttm_bo_reference(&bo->tbo);
+ ttm_bo_get(&bo->tbo);
return bo;
}
@@ -383,9 +393,8 @@ static inline void virtio_gpu_object_unref(struct virtio_gpu_object **bo)
if ((*bo) == NULL)
return;
tbo = &((*bo)->tbo);
- ttm_bo_unref(&tbo);
- if (tbo == NULL)
- *bo = NULL;
+ ttm_bo_put(tbo);
+ *bo = NULL;
}
static inline u64 virtio_gpu_object_mmap_offset(struct virtio_gpu_object *bo)
diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c
index a121b1c79522..b9678c4082ac 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fb.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fb.c
@@ -29,13 +29,6 @@
#define VIRTIO_GPU_FBCON_POLL_PERIOD (HZ / 60)
-struct virtio_gpu_fbdev {
- struct drm_fb_helper helper;
- struct virtio_gpu_framebuffer vgfb;
- struct virtio_gpu_device *vgdev;
- struct delayed_work work;
-};
-
static int virtio_gpu_dirty_update(struct virtio_gpu_framebuffer *fb,
bool store, int x, int y,
int width, int height)
@@ -291,7 +284,7 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
return 0;
err_fb_alloc:
- virtio_gpu_cmd_resource_inval_backing(vgdev, resid);
+ virtio_gpu_object_detach(vgdev, obj);
err_obj_attach:
err_obj_vmap:
virtio_gpu_gem_free_object(&obj->gem_base);
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index dc5b5b2b7aab..88f2fb8c61c4 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -152,7 +152,7 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
if (WARN_ON(!output))
return;
- if (plane->state->fb) {
+ if (plane->state->fb && output->enabled) {
vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
handle = bo->hw_res_handle;
diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
index 11f8ae5b5332..e3152d45c5f1 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ttm.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c
@@ -106,29 +106,6 @@ static void virtio_gpu_ttm_global_fini(struct virtio_gpu_device *vgdev)
}
}
-#if 0
-/*
- * Hmm, seems to not do anything useful. Leftover debug hack?
- * Something like printing pagefaults to kernel log?
- */
-static struct vm_operations_struct virtio_gpu_ttm_vm_ops;
-static const struct vm_operations_struct *ttm_vm_ops;
-
-static int virtio_gpu_ttm_fault(struct vm_fault *vmf)
-{
- struct ttm_buffer_object *bo;
- struct virtio_gpu_device *vgdev;
- int r;
-
- bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
- if (bo == NULL)
- return VM_FAULT_NOPAGE;
- vgdev = virtio_gpu_get_vgdev(bo->bdev);
- r = ttm_vm_ops->fault(vmf);
- return r;
-}
-#endif
-
int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *file_priv;
@@ -143,19 +120,8 @@ int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma)
return -EINVAL;
}
r = ttm_bo_mmap(filp, vma, &vgdev->mman.bdev);
-#if 0
- if (unlikely(r != 0))
- return r;
- if (unlikely(ttm_vm_ops == NULL)) {
- ttm_vm_ops = vma->vm_ops;
- virtio_gpu_ttm_vm_ops = *ttm_vm_ops;
- virtio_gpu_ttm_vm_ops.fault = &virtio_gpu_ttm_fault;
- }
- vma->vm_ops = &virtio_gpu_ttm_vm_ops;
- return 0;
-#else
+
return r;
-#endif
}
static int virtio_gpu_invalidate_caches(struct ttm_bo_device *bdev,
@@ -377,8 +343,7 @@ static void virtio_gpu_bo_move_notify(struct ttm_buffer_object *tbo,
if (!new_mem || (new_mem->placement & TTM_PL_FLAG_SYSTEM)) {
if (bo->hw_res_handle)
- virtio_gpu_cmd_resource_inval_backing(vgdev,
- bo->hw_res_handle);
+ virtio_gpu_object_detach(vgdev, bo);
} else if (new_mem->placement & TTM_PL_FLAG_TT) {
if (bo->hw_res_handle) {
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 020070d483d3..df32811f2c3e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -423,8 +423,9 @@ void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
}
-void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
- uint32_t resource_id)
+static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
+ uint32_t resource_id,
+ struct virtio_gpu_fence **fence)
{
struct virtio_gpu_resource_detach_backing *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -435,7 +436,7 @@ void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
cmd_p->resource_id = cpu_to_le32(resource_id);
- virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
}
void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
@@ -489,6 +490,15 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
{
struct virtio_gpu_transfer_to_host_2d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
+ struct virtio_gpu_fbdev *vgfbdev = vgdev->vgfbdev;
+ struct virtio_gpu_framebuffer *fb = &vgfbdev->vgfb;
+ struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(fb->base.obj[0]);
+ bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+
+ if (use_dma_api)
+ dma_sync_sg_for_device(vgdev->vdev->dev.parent,
+ obj->pages->sgl, obj->pages->nents,
+ DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
@@ -648,11 +658,11 @@ int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
{
struct virtio_gpu_get_capset *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
- int max_size = vgdev->capsets[idx].max_size;
+ int max_size;
struct virtio_gpu_drv_cap_cache *cache_ent;
void *resp_buf;
- if (idx > vgdev->num_capsets)
+ if (idx >= vgdev->num_capsets)
return -EINVAL;
if (version > vgdev->capsets[idx].max_version)
@@ -662,6 +672,7 @@ int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
if (!cache_ent)
return -ENOMEM;
+ max_size = vgdev->capsets[idx].max_size;
cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
if (!cache_ent->caps_cache) {
kfree(cache_ent);
@@ -787,6 +798,15 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
{
struct virtio_gpu_transfer_host_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
+ struct virtio_gpu_fbdev *vgfbdev = vgdev->vgfbdev;
+ struct virtio_gpu_framebuffer *fb = &vgfbdev->vgfb;
+ struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(fb->base.obj[0]);
+ bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+
+ if (use_dma_api)
+ dma_sync_sg_for_device(vgdev->vdev->dev.parent,
+ obj->pages->sgl, obj->pages->nents,
+ DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
@@ -848,9 +868,10 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
uint32_t resource_id,
struct virtio_gpu_fence **fence)
{
+ bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
struct virtio_gpu_mem_entry *ents;
struct scatterlist *sg;
- int si;
+ int si, nents;
if (!obj->pages) {
int ret;
@@ -860,28 +881,60 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
return ret;
}
+ if (use_dma_api) {
+ obj->mapped = dma_map_sg(vgdev->vdev->dev.parent,
+ obj->pages->sgl, obj->pages->nents,
+ DMA_TO_DEVICE);
+ nents = obj->mapped;
+ } else {
+ nents = obj->pages->nents;
+ }
+
/* gets freed when the ring has consumed it */
- ents = kmalloc_array(obj->pages->nents,
- sizeof(struct virtio_gpu_mem_entry),
+ ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry),
GFP_KERNEL);
if (!ents) {
DRM_ERROR("failed to allocate ent list\n");
return -ENOMEM;
}
- for_each_sg(obj->pages->sgl, sg, obj->pages->nents, si) {
- ents[si].addr = cpu_to_le64(sg_phys(sg));
+ for_each_sg(obj->pages->sgl, sg, nents, si) {
+ ents[si].addr = cpu_to_le64(use_dma_api
+ ? sg_dma_address(sg)
+ : sg_phys(sg));
ents[si].length = cpu_to_le32(sg->length);
ents[si].padding = 0;
}
virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id,
- ents, obj->pages->nents,
+ ents, nents,
fence);
obj->hw_res_handle = resource_id;
return 0;
}
+void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *obj)
+{
+ bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+ struct virtio_gpu_fence *fence;
+
+ if (use_dma_api && obj->mapped) {
+ /* detach backing and wait for the host process it ... */
+ virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, &fence);
+ dma_fence_wait(&fence->f, true);
+ dma_fence_put(&fence->f);
+
+ /* ... then tear down iommu mappings */
+ dma_unmap_sg(vgdev->vdev->dev.parent,
+ obj->pages->sgl, obj->mapped,
+ DMA_TO_DEVICE);
+ obj->mapped = 0;
+ } else {
+ virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL);
+ }
+}
+
void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
struct virtio_gpu_output *output)
{
diff --git a/drivers/gpu/drm/vkms/Makefile b/drivers/gpu/drm/vkms/Makefile
index 986297da51bf..37966914f70b 100644
--- a/drivers/gpu/drm/vkms/Makefile
+++ b/drivers/gpu/drm/vkms/Makefile
@@ -1,3 +1,3 @@
-vkms-y := vkms_drv.o vkms_plane.o vkms_output.o vkms_crtc.o vkms_gem.o
+vkms-y := vkms_drv.o vkms_plane.o vkms_output.o vkms_crtc.o vkms_gem.o vkms_crc.o
obj-$(CONFIG_DRM_VKMS) += vkms.o
diff --git a/drivers/gpu/drm/vkms/vkms_crc.c b/drivers/gpu/drm/vkms/vkms_crc.c
new file mode 100644
index 000000000000..0a2745646dfa
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_crc.c
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vkms_drv.h"
+#include <linux/crc32.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+
+/**
+ * compute_crc - Compute CRC value on output frame
+ *
+ * @vaddr_out: address to final framebuffer
+ * @crc_out: framebuffer's metadata
+ *
+ * returns CRC value computed using crc32 on the visible portion of
+ * the final framebuffer at vaddr_out
+ */
+static uint32_t compute_crc(void *vaddr_out, struct vkms_crc_data *crc_out)
+{
+ int i, j, src_offset;
+ int x_src = crc_out->src.x1 >> 16;
+ int y_src = crc_out->src.y1 >> 16;
+ int h_src = drm_rect_height(&crc_out->src) >> 16;
+ int w_src = drm_rect_width(&crc_out->src) >> 16;
+ u32 crc = 0;
+
+ for (i = y_src; i < y_src + h_src; ++i) {
+ for (j = x_src; j < x_src + w_src; ++j) {
+ src_offset = crc_out->offset
+ + (i * crc_out->pitch)
+ + (j * crc_out->cpp);
+ /* XRGB format ignores Alpha channel */
+ memset(vaddr_out + src_offset + 24, 0, 8);
+ crc = crc32_le(crc, vaddr_out + src_offset,
+ sizeof(u32));
+ }
+ }
+
+ return crc;
+}
+
+/**
+ * blend - belnd value at vaddr_src with value at vaddr_dst
+ * @vaddr_dst: destination address
+ * @vaddr_src: source address
+ * @crc_dst: destination framebuffer's metadata
+ * @crc_src: source framebuffer's metadata
+ *
+ * Blend value at vaddr_src with value at vaddr_dst.
+ * Currently, this function write value at vaddr_src on value
+ * at vaddr_dst using buffer's metadata to locate the new values
+ * from vaddr_src and their distenation at vaddr_dst.
+ *
+ * Todo: Use the alpha value to blend vaddr_src with vaddr_dst
+ * instead of overwriting it.
+ */
+static void blend(void *vaddr_dst, void *vaddr_src,
+ struct vkms_crc_data *crc_dst,
+ struct vkms_crc_data *crc_src)
+{
+ int i, j, j_dst, i_dst;
+ int offset_src, offset_dst;
+
+ int x_src = crc_src->src.x1 >> 16;
+ int y_src = crc_src->src.y1 >> 16;
+
+ int x_dst = crc_src->dst.x1;
+ int y_dst = crc_src->dst.y1;
+ int h_dst = drm_rect_height(&crc_src->dst);
+ int w_dst = drm_rect_width(&crc_src->dst);
+
+ int y_limit = y_src + h_dst;
+ int x_limit = x_src + w_dst;
+
+ for (i = y_src, i_dst = y_dst; i < y_limit; ++i) {
+ for (j = x_src, j_dst = x_dst; j < x_limit; ++j) {
+ offset_dst = crc_dst->offset
+ + (i_dst * crc_dst->pitch)
+ + (j_dst++ * crc_dst->cpp);
+ offset_src = crc_src->offset
+ + (i * crc_src->pitch)
+ + (j * crc_src->cpp);
+
+ memcpy(vaddr_dst + offset_dst,
+ vaddr_src + offset_src, sizeof(u32));
+ }
+ i_dst++;
+ }
+}
+
+static void compose_cursor(struct vkms_crc_data *cursor_crc,
+ struct vkms_crc_data *primary_crc, void *vaddr_out)
+{
+ struct drm_gem_object *cursor_obj;
+ struct vkms_gem_object *cursor_vkms_obj;
+
+ cursor_obj = drm_gem_fb_get_obj(&cursor_crc->fb, 0);
+ cursor_vkms_obj = drm_gem_to_vkms_gem(cursor_obj);
+
+ mutex_lock(&cursor_vkms_obj->pages_lock);
+ if (!cursor_vkms_obj->vaddr) {
+ DRM_WARN("cursor plane vaddr is NULL");
+ goto out;
+ }
+
+ blend(vaddr_out, cursor_vkms_obj->vaddr, primary_crc, cursor_crc);
+
+out:
+ mutex_unlock(&cursor_vkms_obj->pages_lock);
+}
+
+static uint32_t _vkms_get_crc(struct vkms_crc_data *primary_crc,
+ struct vkms_crc_data *cursor_crc)
+{
+ struct drm_framebuffer *fb = &primary_crc->fb;
+ struct drm_gem_object *gem_obj = drm_gem_fb_get_obj(fb, 0);
+ struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(gem_obj);
+ void *vaddr_out = kzalloc(vkms_obj->gem.size, GFP_KERNEL);
+ u32 crc = 0;
+
+ if (!vaddr_out) {
+ DRM_ERROR("Failed to allocate memory for output frame.");
+ return 0;
+ }
+
+ mutex_lock(&vkms_obj->pages_lock);
+ if (WARN_ON(!vkms_obj->vaddr)) {
+ mutex_unlock(&vkms_obj->pages_lock);
+ return crc;
+ }
+
+ memcpy(vaddr_out, vkms_obj->vaddr, vkms_obj->gem.size);
+ mutex_unlock(&vkms_obj->pages_lock);
+
+ if (cursor_crc)
+ compose_cursor(cursor_crc, primary_crc, vaddr_out);
+
+ crc = compute_crc(vaddr_out, primary_crc);
+
+ kfree(vaddr_out);
+
+ return crc;
+}
+
+/**
+ * vkms_crc_work_handle - ordered work_struct to compute CRC
+ *
+ * @work: work_struct
+ *
+ * Work handler for computing CRCs. work_struct scheduled in
+ * an ordered workqueue that's periodically scheduled to run by
+ * _vblank_handle() and flushed at vkms_atomic_crtc_destroy_state().
+ */
+void vkms_crc_work_handle(struct work_struct *work)
+{
+ struct vkms_crtc_state *crtc_state = container_of(work,
+ struct vkms_crtc_state,
+ crc_work);
+ struct drm_crtc *crtc = crtc_state->base.crtc;
+ struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
+ struct vkms_device *vdev = container_of(out, struct vkms_device,
+ output);
+ struct vkms_crc_data *primary_crc = NULL;
+ struct vkms_crc_data *cursor_crc = NULL;
+ struct drm_plane *plane;
+ u32 crc32 = 0;
+ u64 frame_start, frame_end;
+ unsigned long flags;
+
+ spin_lock_irqsave(&out->state_lock, flags);
+ frame_start = crtc_state->frame_start;
+ frame_end = crtc_state->frame_end;
+ spin_unlock_irqrestore(&out->state_lock, flags);
+
+ /* _vblank_handle() hasn't updated frame_start yet */
+ if (!frame_start || frame_start == frame_end)
+ goto out;
+
+ drm_for_each_plane(plane, &vdev->drm) {
+ struct vkms_plane_state *vplane_state;
+ struct vkms_crc_data *crc_data;
+
+ vplane_state = to_vkms_plane_state(plane->state);
+ crc_data = vplane_state->crc_data;
+
+ if (drm_framebuffer_read_refcount(&crc_data->fb) == 0)
+ continue;
+
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+ primary_crc = crc_data;
+ else
+ cursor_crc = crc_data;
+ }
+
+ if (primary_crc)
+ crc32 = _vkms_get_crc(primary_crc, cursor_crc);
+
+ frame_end = drm_crtc_accurate_vblank_count(crtc);
+
+ /* queue_work can fail to schedule crc_work; add crc for
+ * missing frames
+ */
+ while (frame_start <= frame_end)
+ drm_crtc_add_crc_entry(crtc, true, frame_start++, &crc32);
+
+out:
+ /* to avoid using the same value for frame number again */
+ spin_lock_irqsave(&out->state_lock, flags);
+ crtc_state->frame_end = frame_end;
+ crtc_state->frame_start = 0;
+ spin_unlock_irqrestore(&out->state_lock, flags);
+}
+
+static int vkms_crc_parse_source(const char *src_name, bool *enabled)
+{
+ int ret = 0;
+
+ if (!src_name) {
+ *enabled = false;
+ } else if (strcmp(src_name, "auto") == 0) {
+ *enabled = true;
+ } else {
+ *enabled = false;
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+int vkms_verify_crc_source(struct drm_crtc *crtc, const char *src_name,
+ size_t *values_cnt)
+{
+ bool enabled;
+
+ if (vkms_crc_parse_source(src_name, &enabled) < 0) {
+ DRM_DEBUG_DRIVER("unknown source %s\n", src_name);
+ return -EINVAL;
+ }
+
+ *values_cnt = 1;
+
+ return 0;
+}
+
+int vkms_set_crc_source(struct drm_crtc *crtc, const char *src_name)
+{
+ struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
+ bool enabled = false;
+ unsigned long flags;
+ int ret = 0;
+
+ ret = vkms_crc_parse_source(src_name, &enabled);
+
+ /* make sure nothing is scheduled on crtc workq */
+ flush_workqueue(out->crc_workq);
+
+ spin_lock_irqsave(&out->lock, flags);
+ out->crc_enabled = enabled;
+ spin_unlock_irqrestore(&out->lock, flags);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index 875fca662ac0..177bbcb38306 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -10,18 +10,44 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
-static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
+static void _vblank_handle(struct vkms_output *output)
{
- struct vkms_output *output = container_of(timer, struct vkms_output,
- vblank_hrtimer);
struct drm_crtc *crtc = &output->crtc;
- int ret_overrun;
+ struct vkms_crtc_state *state = to_vkms_crtc_state(crtc->state);
bool ret;
+ spin_lock(&output->lock);
ret = drm_crtc_handle_vblank(crtc);
if (!ret)
DRM_ERROR("vkms failure on handling vblank");
+ if (state && output->crc_enabled) {
+ u64 frame = drm_crtc_accurate_vblank_count(crtc);
+
+ /* update frame_start only if a queued vkms_crc_work_handle()
+ * has read the data
+ */
+ spin_lock(&output->state_lock);
+ if (!state->frame_start)
+ state->frame_start = frame;
+ spin_unlock(&output->state_lock);
+
+ ret = queue_work(output->crc_workq, &state->crc_work);
+ if (!ret)
+ DRM_WARN("failed to queue vkms_crc_work_handle");
+ }
+
+ spin_unlock(&output->lock);
+}
+
+static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
+{
+ struct vkms_output *output = container_of(timer, struct vkms_output,
+ vblank_hrtimer);
+ int ret_overrun;
+
+ _vblank_handle(output);
+
ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer,
output->period_ns);
@@ -64,15 +90,68 @@ bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
return true;
}
+static void vkms_atomic_crtc_reset(struct drm_crtc *crtc)
+{
+ struct vkms_crtc_state *vkms_state = NULL;
+
+ if (crtc->state) {
+ vkms_state = to_vkms_crtc_state(crtc->state);
+ __drm_atomic_helper_crtc_destroy_state(crtc->state);
+ kfree(vkms_state);
+ crtc->state = NULL;
+ }
+
+ vkms_state = kzalloc(sizeof(*vkms_state), GFP_KERNEL);
+ if (!vkms_state)
+ return;
+
+ crtc->state = &vkms_state->base;
+ crtc->state->crtc = crtc;
+}
+
+static struct drm_crtc_state *
+vkms_atomic_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+ struct vkms_crtc_state *vkms_state;
+
+ if (WARN_ON(!crtc->state))
+ return NULL;
+
+ vkms_state = kzalloc(sizeof(*vkms_state), GFP_KERNEL);
+ if (!vkms_state)
+ return NULL;
+
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &vkms_state->base);
+
+ INIT_WORK(&vkms_state->crc_work, vkms_crc_work_handle);
+
+ return &vkms_state->base;
+}
+
+static void vkms_atomic_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct vkms_crtc_state *vkms_state = to_vkms_crtc_state(state);
+
+ __drm_atomic_helper_crtc_destroy_state(state);
+
+ if (vkms_state) {
+ flush_work(&vkms_state->crc_work);
+ kfree(vkms_state);
+ }
+}
+
static const struct drm_crtc_funcs vkms_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.destroy = drm_crtc_cleanup,
.page_flip = drm_atomic_helper_page_flip,
- .reset = drm_atomic_helper_crtc_reset,
- .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .reset = vkms_atomic_crtc_reset,
+ .atomic_duplicate_state = vkms_atomic_crtc_duplicate_state,
+ .atomic_destroy_state = vkms_atomic_crtc_destroy_state,
.enable_vblank = vkms_enable_vblank,
.disable_vblank = vkms_disable_vblank,
+ .set_crc_source = vkms_set_crc_source,
+ .verify_crc_source = vkms_verify_crc_source,
};
static void vkms_crtc_atomic_enable(struct drm_crtc *crtc,
@@ -87,9 +166,21 @@ static void vkms_crtc_atomic_disable(struct drm_crtc *crtc,
drm_crtc_vblank_off(crtc);
}
+static void vkms_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc);
+
+ /* This lock is held across the atomic commit to block vblank timer
+ * from scheduling vkms_crc_work_handle until the crc_data is updated
+ */
+ spin_lock_irq(&vkms_output->lock);
+}
+
static void vkms_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
+ struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc);
unsigned long flags;
if (crtc->state->event) {
@@ -104,9 +195,12 @@ static void vkms_crtc_atomic_flush(struct drm_crtc *crtc,
crtc->state->event = NULL;
}
+
+ spin_unlock_irq(&vkms_output->lock);
}
static const struct drm_crtc_helper_funcs vkms_crtc_helper_funcs = {
+ .atomic_begin = vkms_crtc_atomic_begin,
.atomic_flush = vkms_crtc_atomic_flush,
.atomic_enable = vkms_crtc_atomic_enable,
.atomic_disable = vkms_crtc_atomic_disable,
@@ -115,6 +209,7 @@ static const struct drm_crtc_helper_funcs vkms_crtc_helper_funcs = {
int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_plane *primary, struct drm_plane *cursor)
{
+ struct vkms_output *vkms_out = drm_crtc_to_vkms_output(crtc);
int ret;
ret = drm_crtc_init_with_planes(dev, crtc, primary, cursor,
@@ -126,5 +221,10 @@ int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
drm_crtc_helper_add(crtc, &vkms_crtc_helper_funcs);
+ spin_lock_init(&vkms_out->lock);
+ spin_lock_init(&vkms_out->state_lock);
+
+ vkms_out->crc_workq = alloc_ordered_workqueue("vkms_crc_workq", 0);
+
return ret;
}
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index 6e728b825259..07cfde1b4132 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -5,6 +5,15 @@
* (at your option) any later version.
*/
+/**
+ * DOC: vkms (Virtual Kernel Modesetting)
+ *
+ * vkms is a software-only model of a kms driver that is useful for testing,
+ * or for running X (or similar) on headless machines and be able to still
+ * use the GPU. vkms aims to enable a virtual display without the need for
+ * a hardware display capability.
+ */
+
#include <linux/module.h>
#include <drm/drm_gem.h>
#include <drm/drm_crtc_helper.h>
@@ -21,6 +30,10 @@
static struct vkms_device *vkms_device;
+bool enable_cursor;
+module_param_named(enable_cursor, enable_cursor, bool, 0444);
+MODULE_PARM_DESC(enable_cursor, "Enable/Disable cursor support");
+
static const struct file_operations vkms_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
@@ -47,6 +60,7 @@ static void vkms_release(struct drm_device *dev)
drm_atomic_helper_shutdown(&vkms->drm);
drm_mode_config_cleanup(&vkms->drm);
drm_dev_fini(&vkms->drm);
+ destroy_workqueue(vkms->output.crc_workq);
}
static struct drm_driver vkms_driver = {
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index 07be29f2dc44..1c93990693e3 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -7,8 +7,8 @@
#include <drm/drm_encoder.h>
#include <linux/hrtimer.h>
-#define XRES_MIN 32
-#define YRES_MIN 32
+#define XRES_MIN 20
+#define YRES_MIN 20
#define XRES_DEF 1024
#define YRES_DEF 768
@@ -16,10 +16,48 @@
#define XRES_MAX 8192
#define YRES_MAX 8192
+extern bool enable_cursor;
+
static const u32 vkms_formats[] = {
DRM_FORMAT_XRGB8888,
};
+static const u32 vkms_cursor_formats[] = {
+ DRM_FORMAT_ARGB8888,
+};
+
+struct vkms_crc_data {
+ struct drm_framebuffer fb;
+ struct drm_rect src, dst;
+ unsigned int offset;
+ unsigned int pitch;
+ unsigned int cpp;
+};
+
+/**
+ * vkms_plane_state - Driver specific plane state
+ * @base: base plane state
+ * @crc_data: data required for CRC computation
+ */
+struct vkms_plane_state {
+ struct drm_plane_state base;
+ struct vkms_crc_data *crc_data;
+};
+
+/**
+ * vkms_crtc_state - Driver specific CRTC state
+ * @base: base CRTC state
+ * @crc_work: work struct to compute and add CRC entries
+ * @n_frame_start: start frame number for computed CRC
+ * @n_frame_end: end frame number for computed CRC
+ */
+struct vkms_crtc_state {
+ struct drm_crtc_state base;
+ struct work_struct crc_work;
+ u64 frame_start;
+ u64 frame_end;
+};
+
struct vkms_output {
struct drm_crtc crtc;
struct drm_encoder encoder;
@@ -27,6 +65,13 @@ struct vkms_output {
struct hrtimer vblank_hrtimer;
ktime_t period_ns;
struct drm_pending_vblank_event *event;
+ bool crc_enabled;
+ /* ordered wq for crc_work */
+ struct workqueue_struct *crc_workq;
+ /* protects concurrent access to crc_data */
+ spinlock_t lock;
+ /* protects concurrent access to crtc_state */
+ spinlock_t state_lock;
};
struct vkms_device {
@@ -39,6 +84,8 @@ struct vkms_gem_object {
struct drm_gem_object gem;
struct mutex pages_lock; /* Page lock used in page fault handler */
struct page **pages;
+ unsigned int vmap_count;
+ void *vaddr;
};
#define drm_crtc_to_vkms_output(target) \
@@ -47,6 +94,15 @@ struct vkms_gem_object {
#define drm_device_to_vkms_device(target) \
container_of(target, struct vkms_device, drm)
+#define drm_gem_to_vkms_gem(target)\
+ container_of(target, struct vkms_gem_object, gem)
+
+#define to_vkms_crtc_state(target)\
+ container_of(target, struct vkms_crtc_state, base)
+
+#define to_vkms_plane_state(target)\
+ container_of(target, struct vkms_plane_state, base)
+
/* CRTC */
int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_plane *primary, struct drm_plane *cursor);
@@ -57,7 +113,8 @@ bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
int vkms_output_init(struct vkms_device *vkmsdev);
-struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev);
+struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev,
+ enum drm_plane_type type);
/* Gem stuff */
struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
@@ -65,7 +122,7 @@ struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
u32 *handle,
u64 size);
-int vkms_gem_fault(struct vm_fault *vmf);
+vm_fault_t vkms_gem_fault(struct vm_fault *vmf);
int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
@@ -75,4 +132,14 @@ int vkms_dumb_map(struct drm_file *file, struct drm_device *dev,
void vkms_gem_free_object(struct drm_gem_object *obj);
+int vkms_gem_vmap(struct drm_gem_object *obj);
+
+void vkms_gem_vunmap(struct drm_gem_object *obj);
+
+/* CRC Support */
+int vkms_set_crc_source(struct drm_crtc *crtc, const char *src_name);
+int vkms_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
+ size_t *values_cnt);
+void vkms_crc_work_handle(struct work_struct *work);
+
#endif /* _VKMS_DRV_H_ */
diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
index c7e38368602b..d04e988b4cbe 100644
--- a/drivers/gpu/drm/vkms/vkms_gem.c
+++ b/drivers/gpu/drm/vkms/vkms_gem.c
@@ -37,20 +37,22 @@ void vkms_gem_free_object(struct drm_gem_object *obj)
struct vkms_gem_object *gem = container_of(obj, struct vkms_gem_object,
gem);
- kvfree(gem->pages);
+ WARN_ON(gem->pages);
+ WARN_ON(gem->vaddr);
+
mutex_destroy(&gem->pages_lock);
drm_gem_object_release(obj);
kfree(gem);
}
-int vkms_gem_fault(struct vm_fault *vmf)
+vm_fault_t vkms_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct vkms_gem_object *obj = vma->vm_private_data;
unsigned long vaddr = vmf->address;
pgoff_t page_offset;
loff_t num_pages;
- int ret;
+ vm_fault_t ret = VM_FAULT_SIGBUS;
page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
num_pages = DIV_ROUND_UP(obj->gem.size, PAGE_SIZE);
@@ -58,7 +60,6 @@ int vkms_gem_fault(struct vm_fault *vmf)
if (page_offset > num_pages)
return VM_FAULT_SIGBUS;
- ret = -ENOENT;
mutex_lock(&obj->pages_lock);
if (obj->pages) {
get_page(obj->pages[page_offset]);
@@ -177,3 +178,77 @@ unref:
return ret;
}
+
+static struct page **_get_pages(struct vkms_gem_object *vkms_obj)
+{
+ struct drm_gem_object *gem_obj = &vkms_obj->gem;
+
+ if (!vkms_obj->pages) {
+ struct page **pages = drm_gem_get_pages(gem_obj);
+
+ if (IS_ERR(pages))
+ return pages;
+
+ if (cmpxchg(&vkms_obj->pages, NULL, pages))
+ drm_gem_put_pages(gem_obj, pages, false, true);
+ }
+
+ return vkms_obj->pages;
+}
+
+void vkms_gem_vunmap(struct drm_gem_object *obj)
+{
+ struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
+
+ mutex_lock(&vkms_obj->pages_lock);
+ if (vkms_obj->vmap_count < 1) {
+ WARN_ON(vkms_obj->vaddr);
+ WARN_ON(vkms_obj->pages);
+ mutex_unlock(&vkms_obj->pages_lock);
+ return;
+ }
+
+ vkms_obj->vmap_count--;
+
+ if (vkms_obj->vmap_count == 0) {
+ vunmap(vkms_obj->vaddr);
+ vkms_obj->vaddr = NULL;
+ drm_gem_put_pages(obj, vkms_obj->pages, false, true);
+ vkms_obj->pages = NULL;
+ }
+
+ mutex_unlock(&vkms_obj->pages_lock);
+}
+
+int vkms_gem_vmap(struct drm_gem_object *obj)
+{
+ struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
+ int ret = 0;
+
+ mutex_lock(&vkms_obj->pages_lock);
+
+ if (!vkms_obj->vaddr) {
+ unsigned int n_pages = obj->size >> PAGE_SHIFT;
+ struct page **pages = _get_pages(vkms_obj);
+
+ if (IS_ERR(pages)) {
+ ret = PTR_ERR(pages);
+ goto out;
+ }
+
+ vkms_obj->vaddr = vmap(pages, n_pages, VM_MAP, PAGE_KERNEL);
+ if (!vkms_obj->vaddr)
+ goto err_vmap;
+ }
+
+ vkms_obj->vmap_count++;
+ goto out;
+
+err_vmap:
+ ret = -ENOMEM;
+ drm_gem_put_pages(obj, vkms_obj->pages, false, true);
+ vkms_obj->pages = NULL;
+out:
+ mutex_unlock(&vkms_obj->pages_lock);
+ return ret;
+}
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
index 901012cb1af1..271a0eb9042c 100644
--- a/drivers/gpu/drm/vkms/vkms_output.c
+++ b/drivers/gpu/drm/vkms/vkms_output.c
@@ -49,14 +49,22 @@ int vkms_output_init(struct vkms_device *vkmsdev)
struct drm_connector *connector = &output->connector;
struct drm_encoder *encoder = &output->encoder;
struct drm_crtc *crtc = &output->crtc;
- struct drm_plane *primary;
+ struct drm_plane *primary, *cursor = NULL;
int ret;
- primary = vkms_plane_init(vkmsdev);
+ primary = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_PRIMARY);
if (IS_ERR(primary))
return PTR_ERR(primary);
- ret = vkms_crtc_init(dev, crtc, primary, NULL);
+ if (enable_cursor) {
+ cursor = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_CURSOR);
+ if (IS_ERR(cursor)) {
+ ret = PTR_ERR(cursor);
+ goto err_cursor;
+ }
+ }
+
+ ret = vkms_crtc_init(dev, crtc, primary, cursor);
if (ret)
goto err_crtc;
@@ -106,6 +114,11 @@ err_connector:
drm_crtc_cleanup(crtc);
err_crtc:
+ if (enable_cursor)
+ drm_plane_cleanup(cursor);
+
+err_cursor:
drm_plane_cleanup(primary);
+
return ret;
}
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
index 9f75b1e2c1c4..7041007396ae 100644
--- a/drivers/gpu/drm/vkms/vkms_plane.c
+++ b/drivers/gpu/drm/vkms/vkms_plane.c
@@ -8,29 +8,175 @@
#include "vkms_drv.h"
#include <drm/drm_plane_helper.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+
+static struct drm_plane_state *
+vkms_plane_duplicate_state(struct drm_plane *plane)
+{
+ struct vkms_plane_state *vkms_state;
+ struct vkms_crc_data *crc_data;
+
+ vkms_state = kzalloc(sizeof(*vkms_state), GFP_KERNEL);
+ if (!vkms_state)
+ return NULL;
+
+ crc_data = kzalloc(sizeof(*crc_data), GFP_KERNEL);
+ if (WARN_ON(!crc_data))
+ DRM_INFO("Couldn't allocate crc_data");
+
+ vkms_state->crc_data = crc_data;
+
+ __drm_atomic_helper_plane_duplicate_state(plane,
+ &vkms_state->base);
+
+ return &vkms_state->base;
+}
+
+static void vkms_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct vkms_plane_state *vkms_state = to_vkms_plane_state(old_state);
+ struct drm_crtc *crtc = vkms_state->base.crtc;
+
+ if (crtc) {
+ /* dropping the reference we acquired in
+ * vkms_primary_plane_update()
+ */
+ if (drm_framebuffer_read_refcount(&vkms_state->crc_data->fb))
+ drm_framebuffer_put(&vkms_state->crc_data->fb);
+ }
+
+ kfree(vkms_state->crc_data);
+ vkms_state->crc_data = NULL;
+
+ __drm_atomic_helper_plane_destroy_state(old_state);
+ kfree(vkms_state);
+}
+
+static void vkms_plane_reset(struct drm_plane *plane)
+{
+ struct vkms_plane_state *vkms_state;
+
+ if (plane->state)
+ vkms_plane_destroy_state(plane, plane->state);
+
+ vkms_state = kzalloc(sizeof(*vkms_state), GFP_KERNEL);
+ if (!vkms_state) {
+ DRM_ERROR("Cannot allocate vkms_plane_state\n");
+ return;
+ }
+
+ plane->state = &vkms_state->base;
+ plane->state->plane = plane;
+}
static const struct drm_plane_funcs vkms_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
- .reset = drm_atomic_helper_plane_reset,
- .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+ .reset = vkms_plane_reset,
+ .atomic_duplicate_state = vkms_plane_duplicate_state,
+ .atomic_destroy_state = vkms_plane_destroy_state,
};
-static void vkms_primary_plane_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+static void vkms_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct vkms_plane_state *vkms_plane_state;
+ struct drm_framebuffer *fb = plane->state->fb;
+ struct vkms_crc_data *crc_data;
+
+ if (!plane->state->crtc || !fb)
+ return;
+
+ vkms_plane_state = to_vkms_plane_state(plane->state);
+
+ crc_data = vkms_plane_state->crc_data;
+ memcpy(&crc_data->src, &plane->state->src, sizeof(struct drm_rect));
+ memcpy(&crc_data->dst, &plane->state->dst, sizeof(struct drm_rect));
+ memcpy(&crc_data->fb, fb, sizeof(struct drm_framebuffer));
+ drm_framebuffer_get(&crc_data->fb);
+ crc_data->offset = fb->offsets[0];
+ crc_data->pitch = fb->pitches[0];
+ crc_data->cpp = fb->format->cpp[0];
+}
+
+static int vkms_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
{
+ struct drm_crtc_state *crtc_state;
+ bool can_position = false;
+ int ret;
+
+ if (!state->fb | !state->crtc)
+ return 0;
+
+ crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ can_position = true;
+
+ ret = drm_atomic_helper_check_plane_state(state, crtc_state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ can_position, true);
+ if (ret != 0)
+ return ret;
+
+ /* for now primary plane must be visible and full screen */
+ if (!state->visible && !can_position)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int vkms_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_gem_object *gem_obj;
+ struct vkms_gem_object *vkms_obj;
+ int ret;
+
+ if (!state->fb)
+ return 0;
+
+ gem_obj = drm_gem_fb_get_obj(state->fb, 0);
+ vkms_obj = drm_gem_to_vkms_gem(gem_obj);
+ ret = vkms_gem_vmap(gem_obj);
+ if (ret)
+ DRM_ERROR("vmap failed: %d\n", ret);
+
+ return drm_gem_fb_prepare_fb(plane, state);
+}
+
+static void vkms_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct drm_gem_object *gem_obj;
+
+ if (!old_state->fb)
+ return;
+
+ gem_obj = drm_gem_fb_get_obj(old_state->fb, 0);
+ vkms_gem_vunmap(gem_obj);
}
static const struct drm_plane_helper_funcs vkms_primary_helper_funcs = {
- .atomic_update = vkms_primary_plane_update,
+ .atomic_update = vkms_plane_atomic_update,
+ .atomic_check = vkms_plane_atomic_check,
+ .prepare_fb = vkms_prepare_fb,
+ .cleanup_fb = vkms_cleanup_fb,
};
-struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev)
+struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev,
+ enum drm_plane_type type)
{
struct drm_device *dev = &vkmsdev->drm;
+ const struct drm_plane_helper_funcs *funcs;
struct drm_plane *plane;
const u32 *formats;
int ret, nformats;
@@ -39,19 +185,26 @@ struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev)
if (!plane)
return ERR_PTR(-ENOMEM);
- formats = vkms_formats;
- nformats = ARRAY_SIZE(vkms_formats);
+ if (type == DRM_PLANE_TYPE_CURSOR) {
+ formats = vkms_cursor_formats;
+ nformats = ARRAY_SIZE(vkms_cursor_formats);
+ funcs = &vkms_primary_helper_funcs;
+ } else {
+ formats = vkms_formats;
+ nformats = ARRAY_SIZE(vkms_formats);
+ funcs = &vkms_primary_helper_funcs;
+ }
ret = drm_universal_plane_init(dev, plane, 0,
&vkms_plane_funcs,
formats, nformats,
- NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
+ NULL, type, NULL);
if (ret) {
kfree(plane);
return ERR_PTR(ret);
}
- drm_plane_helper_add(plane, &vkms_primary_helper_funcs);
+ drm_plane_helper_add(plane, funcs);
return plane;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 23beff5d8e3c..0c25bb8faf80 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -720,9 +720,7 @@ void vmw_du_plane_reset(struct drm_plane *plane)
return;
}
- plane->state = &vps->base;
- plane->state->plane = plane;
- plane->state->rotation = DRM_MODE_ROTATE_0;
+ __drm_atomic_helper_plane_reset(plane, &vps->base);
}
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c
index c85bfe7571cb..47ff019d3aef 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_gem.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c
@@ -179,7 +179,7 @@ struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj)
struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
if (!xen_obj->pages)
- return NULL;
+ return ERR_PTR(-ENOMEM);
return drm_prime_pages_to_sg(xen_obj->pages, xen_obj->num_pages);
}
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index 90837f7c7d0f..f4c7516eb989 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -302,14 +302,18 @@ static inline u16 volt2reg(int channel, long volt, u8 bypass_attn)
return clamp_val(reg, 0, 1023) & (0xff << 2);
}
-static u16 adt7475_read_word(struct i2c_client *client, int reg)
+static int adt7475_read_word(struct i2c_client *client, int reg)
{
- u16 val;
+ int val1, val2;
- val = i2c_smbus_read_byte_data(client, reg);
- val |= (i2c_smbus_read_byte_data(client, reg + 1) << 8);
+ val1 = i2c_smbus_read_byte_data(client, reg);
+ if (val1 < 0)
+ return val1;
+ val2 = i2c_smbus_read_byte_data(client, reg + 1);
+ if (val2 < 0)
+ return val2;
- return val;
+ return val1 | (val2 << 8);
}
static void adt7475_write_word(struct i2c_client *client, int reg, u16 val)
@@ -962,13 +966,14 @@ static ssize_t show_pwmfreq(struct device *dev, struct device_attribute *attr,
{
struct adt7475_data *data = adt7475_update_device(dev);
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
- int i = clamp_val(data->range[sattr->index] & 0xf, 0,
- ARRAY_SIZE(pwmfreq_table) - 1);
+ int idx;
if (IS_ERR(data))
return PTR_ERR(data);
+ idx = clamp_val(data->range[sattr->index] & 0xf, 0,
+ ARRAY_SIZE(pwmfreq_table) - 1);
- return sprintf(buf, "%d\n", pwmfreq_table[i]);
+ return sprintf(buf, "%d\n", pwmfreq_table[idx]);
}
static ssize_t set_pwmfreq(struct device *dev, struct device_attribute *attr,
@@ -1004,6 +1009,10 @@ static ssize_t pwm_use_point2_pwm_at_crit_show(struct device *dev,
char *buf)
{
struct adt7475_data *data = adt7475_update_device(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
return sprintf(buf, "%d\n", !!(data->config4 & CONFIG4_MAXDUTY));
}
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index e9e6aeabbf84..71d3445ba869 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -17,7 +17,7 @@
* Bi-directional Current/Power Monitor with I2C Interface
* Datasheet: http://www.ti.com/product/ina230
*
- * Copyright (C) 2012 Lothar Felten <[email protected]>
+ * Copyright (C) 2012 Lothar Felten <[email protected]>
* Thanks to Jan Volkering
*
* This program is free software; you can redistribute it and/or modify
@@ -329,6 +329,15 @@ static int ina2xx_set_shunt(struct ina2xx_data *data, long val)
return 0;
}
+static ssize_t ina2xx_show_shunt(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct ina2xx_data *data = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%li\n", data->rshunt);
+}
+
static ssize_t ina2xx_store_shunt(struct device *dev,
struct device_attribute *da,
const char *buf, size_t count)
@@ -403,7 +412,7 @@ static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL,
/* shunt resistance */
static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR,
- ina2xx_show_value, ina2xx_store_shunt,
+ ina2xx_show_shunt, ina2xx_store_shunt,
INA2XX_CALIBRATION);
/* update interval (ina226 only) */
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index c6bd61e4695a..944f5b63aecd 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -63,6 +63,7 @@
#include <linux/bitops.h>
#include <linux/dmi.h>
#include <linux/io.h>
+#include <linux/nospec.h>
#include "lm75.h"
#define USE_ALTERNATE
@@ -2689,6 +2690,7 @@ store_pwm_weight_temp_sel(struct device *dev, struct device_attribute *attr,
return err;
if (val > NUM_TEMP)
return -EINVAL;
+ val = array_index_nospec(val, NUM_TEMP + 1);
if (val && (!(data->have_temp & BIT(val - 1)) ||
!data->temp_src[val - 1]))
return -EINVAL;
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
index 6ec65adaba49..c33dcfb87993 100644
--- a/drivers/i2c/algos/i2c-algo-bit.c
+++ b/drivers/i2c/algos/i2c-algo-bit.c
@@ -110,8 +110,8 @@ static int sclhi(struct i2c_algo_bit_data *adap)
}
#ifdef DEBUG
if (jiffies != start && i2c_debug >= 3)
- pr_debug("i2c-algo-bit: needed %ld jiffies for SCL to go "
- "high\n", jiffies - start);
+ pr_debug("i2c-algo-bit: needed %ld jiffies for SCL to go high\n",
+ jiffies - start);
#endif
done:
@@ -171,8 +171,9 @@ static int i2c_outb(struct i2c_adapter *i2c_adap, unsigned char c)
setsda(adap, sb);
udelay((adap->udelay + 1) / 2);
if (sclhi(adap) < 0) { /* timed out */
- bit_dbg(1, &i2c_adap->dev, "i2c_outb: 0x%02x, "
- "timeout at bit #%d\n", (int)c, i);
+ bit_dbg(1, &i2c_adap->dev,
+ "i2c_outb: 0x%02x, timeout at bit #%d\n",
+ (int)c, i);
return -ETIMEDOUT;
}
/* FIXME do arbitration here:
@@ -185,8 +186,8 @@ static int i2c_outb(struct i2c_adapter *i2c_adap, unsigned char c)
}
sdahi(adap);
if (sclhi(adap) < 0) { /* timeout */
- bit_dbg(1, &i2c_adap->dev, "i2c_outb: 0x%02x, "
- "timeout at ack\n", (int)c);
+ bit_dbg(1, &i2c_adap->dev,
+ "i2c_outb: 0x%02x, timeout at ack\n", (int)c);
return -ETIMEDOUT;
}
@@ -215,8 +216,9 @@ static int i2c_inb(struct i2c_adapter *i2c_adap)
sdahi(adap);
for (i = 0; i < 8; i++) {
if (sclhi(adap) < 0) { /* timeout */
- bit_dbg(1, &i2c_adap->dev, "i2c_inb: timeout at bit "
- "#%d\n", 7 - i);
+ bit_dbg(1, &i2c_adap->dev,
+ "i2c_inb: timeout at bit #%d\n",
+ 7 - i);
return -ETIMEDOUT;
}
indata *= 2;
@@ -265,8 +267,9 @@ static int test_bus(struct i2c_adapter *i2c_adap)
goto bailout;
}
if (!scl) {
- printk(KERN_WARNING "%s: SCL unexpected low "
- "while pulling SDA low!\n", name);
+ printk(KERN_WARNING
+ "%s: SCL unexpected low while pulling SDA low!\n",
+ name);
goto bailout;
}
@@ -278,8 +281,9 @@ static int test_bus(struct i2c_adapter *i2c_adap)
goto bailout;
}
if (!scl) {
- printk(KERN_WARNING "%s: SCL unexpected low "
- "while pulling SDA high!\n", name);
+ printk(KERN_WARNING
+ "%s: SCL unexpected low while pulling SDA high!\n",
+ name);
goto bailout;
}
@@ -291,8 +295,9 @@ static int test_bus(struct i2c_adapter *i2c_adap)
goto bailout;
}
if (!sda) {
- printk(KERN_WARNING "%s: SDA unexpected low "
- "while pulling SCL low!\n", name);
+ printk(KERN_WARNING
+ "%s: SDA unexpected low while pulling SCL low!\n",
+ name);
goto bailout;
}
@@ -304,8 +309,9 @@ static int test_bus(struct i2c_adapter *i2c_adap)
goto bailout;
}
if (!sda) {
- printk(KERN_WARNING "%s: SDA unexpected low "
- "while pulling SCL high!\n", name);
+ printk(KERN_WARNING
+ "%s: SDA unexpected low while pulling SCL high!\n",
+ name);
goto bailout;
}
@@ -352,8 +358,8 @@ static int try_address(struct i2c_adapter *i2c_adap,
i2c_start(adap);
}
if (i && ret)
- bit_dbg(1, &i2c_adap->dev, "Used %d tries to %s client at "
- "0x%02x: %s\n", i + 1,
+ bit_dbg(1, &i2c_adap->dev,
+ "Used %d tries to %s client at 0x%02x: %s\n", i + 1,
addr & 1 ? "read from" : "write to", addr >> 1,
ret == 1 ? "success" : "failed, timeout?");
return ret;
@@ -442,8 +448,9 @@ static int readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
if (inval <= 0 || inval > I2C_SMBUS_BLOCK_MAX) {
if (!(flags & I2C_M_NO_RD_ACK))
acknak(i2c_adap, 0);
- dev_err(&i2c_adap->dev, "readbytes: invalid "
- "block length (%d)\n", inval);
+ dev_err(&i2c_adap->dev,
+ "readbytes: invalid block length (%d)\n",
+ inval);
return -EPROTO;
}
/* The original count value accounts for the extra
@@ -506,8 +513,8 @@ static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
return -ENXIO;
}
if (flags & I2C_M_RD) {
- bit_dbg(3, &i2c_adap->dev, "emitting repeated "
- "start condition\n");
+ bit_dbg(3, &i2c_adap->dev,
+ "emitting repeated start condition\n");
i2c_repstart(adap);
/* okay, now switch into reading mode */
addr |= 0x01;
@@ -564,8 +571,8 @@ static int bit_xfer(struct i2c_adapter *i2c_adap,
}
ret = bit_doAddress(i2c_adap, pmsg);
if ((ret != 0) && !nak_ok) {
- bit_dbg(1, &i2c_adap->dev, "NAK from "
- "device addr 0x%02x msg #%d\n",
+ bit_dbg(1, &i2c_adap->dev,
+ "NAK from device addr 0x%02x msg #%d\n",
msgs[i].addr, i);
goto bailout;
}
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index e18442b9973a..94d94b4a9a0d 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -708,7 +708,6 @@ int i2c_dw_probe(struct dw_i2c_dev *dev)
i2c_set_adapdata(adap, dev);
if (dev->pm_disabled) {
- dev_pm_syscore_device(dev->dev, true);
irq_flags = IRQF_NO_SUSPEND;
} else {
irq_flags = IRQF_SHARED | IRQF_COND_SUSPEND;
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 1a8d2da5b000..b5750fd85125 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -434,6 +434,9 @@ static int dw_i2c_plat_suspend(struct device *dev)
{
struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
+ if (i_dev->pm_disabled)
+ return 0;
+
i_dev->disable(i_dev);
i2c_dw_prepare_clk(i_dev, false);
@@ -444,7 +447,9 @@ static int dw_i2c_plat_resume(struct device *dev)
{
struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
- i2c_dw_prepare_clk(i_dev, true);
+ if (!i_dev->pm_disabled)
+ i2c_dw_prepare_clk(i_dev, true);
+
i_dev->init(i_dev);
return 0;
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 941c223f6491..04b60a349d7e 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1415,6 +1415,13 @@ static void i801_add_tco(struct i801_priv *priv)
}
#ifdef CONFIG_ACPI
+static bool i801_acpi_is_smbus_ioport(const struct i801_priv *priv,
+ acpi_physical_address address)
+{
+ return address >= priv->smba &&
+ address <= pci_resource_end(priv->pci_dev, SMBBAR);
+}
+
static acpi_status
i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
u64 *value, void *handler_context, void *region_context)
@@ -1430,7 +1437,7 @@ i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
*/
mutex_lock(&priv->acpi_lock);
- if (!priv->acpi_reserved) {
+ if (!priv->acpi_reserved && i801_acpi_is_smbus_ioport(priv, address)) {
priv->acpi_reserved = true;
dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n");
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 439e8778f849..818cab14e87c 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -507,8 +507,6 @@ static void sh_mobile_i2c_dma_callback(void *data)
pd->pos = pd->msg->len;
pd->stop_after_dma = true;
- i2c_release_dma_safe_msg_buf(pd->msg, pd->dma_buf);
-
iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE);
}
@@ -602,8 +600,8 @@ static void sh_mobile_i2c_xfer_dma(struct sh_mobile_i2c_data *pd)
dma_async_issue_pending(chan);
}
-static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg,
- bool do_init)
+static void start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg,
+ bool do_init)
{
if (do_init) {
/* Initialize channel registers */
@@ -627,7 +625,6 @@ static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg,
/* Enable all interrupts to begin with */
iic_wr(pd, ICIC, ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE);
- return 0;
}
static int poll_dte(struct sh_mobile_i2c_data *pd)
@@ -698,9 +695,7 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
pd->send_stop = i == num - 1 || msg->flags & I2C_M_STOP;
pd->stop_after_dma = false;
- err = start_ch(pd, msg, do_start);
- if (err)
- break;
+ start_ch(pd, msg, do_start);
if (do_start)
i2c_op(pd, OP_START, 0);
@@ -709,6 +704,10 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
timeout = wait_event_timeout(pd->wait,
pd->sr & (ICSR_TACK | SW_DONE),
adapter->timeout);
+
+ /* 'stop_after_dma' tells if DMA transfer was complete */
+ i2c_put_dma_safe_msg_buf(pd->dma_buf, pd->msg, pd->stop_after_dma);
+
if (!timeout) {
dev_err(pd->dev, "Transfer request timed out\n");
if (pd->dma_direction != DMA_NONE)
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index f15737763608..9ee9a15e7134 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -2293,21 +2293,22 @@ u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold)
EXPORT_SYMBOL_GPL(i2c_get_dma_safe_msg_buf);
/**
- * i2c_release_dma_safe_msg_buf - release DMA safe buffer and sync with i2c_msg
- * @msg: the message to be synced with
+ * i2c_put_dma_safe_msg_buf - release DMA safe buffer and sync with i2c_msg
* @buf: the buffer obtained from i2c_get_dma_safe_msg_buf(). May be NULL.
+ * @msg: the message which the buffer corresponds to
+ * @xferred: bool saying if the message was transferred
*/
-void i2c_release_dma_safe_msg_buf(struct i2c_msg *msg, u8 *buf)
+void i2c_put_dma_safe_msg_buf(u8 *buf, struct i2c_msg *msg, bool xferred)
{
if (!buf || buf == msg->buf)
return;
- if (msg->flags & I2C_M_RD)
+ if (xferred && msg->flags & I2C_M_RD)
memcpy(msg->buf, buf, msg->len);
kfree(buf);
}
-EXPORT_SYMBOL_GPL(i2c_release_dma_safe_msg_buf);
+EXPORT_SYMBOL_GPL(i2c_put_dma_safe_msg_buf);
MODULE_AUTHOR("Simon G. Vogl <[email protected]>");
MODULE_DESCRIPTION("I2C-Bus main module");
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 648eb6743ed5..6edffeed9953 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -238,10 +238,6 @@ static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req,
mmc_exit_request(mq->queue, req);
}
-/*
- * We use BLK_MQ_F_BLOCKING and have only 1 hardware queue, which means requests
- * will not be dispatched in parallel.
- */
static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -264,7 +260,7 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
spin_lock_irq(q->queue_lock);
- if (mq->recovery_needed) {
+ if (mq->recovery_needed || mq->busy) {
spin_unlock_irq(q->queue_lock);
return BLK_STS_RESOURCE;
}
@@ -291,6 +287,9 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
break;
}
+ /* Parallel dispatch of requests is not supported at the moment */
+ mq->busy = true;
+
mq->in_flight[issue_type] += 1;
get_card = (mmc_tot_in_flight(mq) == 1);
cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1);
@@ -333,9 +332,12 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
mq->in_flight[issue_type] -= 1;
if (mmc_tot_in_flight(mq) == 0)
put_card = true;
+ mq->busy = false;
spin_unlock_irq(q->queue_lock);
if (put_card)
mmc_put_card(card, &mq->ctx);
+ } else {
+ WRITE_ONCE(mq->busy, false);
}
return ret;
diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h
index 17e59d50b496..9bf3c9245075 100644
--- a/drivers/mmc/core/queue.h
+++ b/drivers/mmc/core/queue.h
@@ -81,6 +81,7 @@ struct mmc_queue {
unsigned int cqe_busy;
#define MMC_CQE_DCMD_BUSY BIT(0)
#define MMC_CQE_QUEUE_FULL BIT(1)
+ bool busy;
bool use_cqe;
bool recovery_needed;
bool in_recovery;
diff --git a/drivers/mmc/host/android-goldfish.c b/drivers/mmc/host/android-goldfish.c
index 294de177632c..61e4e2a213c9 100644
--- a/drivers/mmc/host/android-goldfish.c
+++ b/drivers/mmc/host/android-goldfish.c
@@ -217,7 +217,7 @@ static void goldfish_mmc_xfer_done(struct goldfish_mmc_host *host,
* We don't really have DMA, so we need
* to copy from our platform driver buffer
*/
- sg_copy_to_buffer(data->sg, 1, host->virt_base,
+ sg_copy_from_buffer(data->sg, 1, host->virt_base,
data->sg->length);
}
host->data->bytes_xfered += data->sg->length;
@@ -393,7 +393,7 @@ static void goldfish_mmc_prepare_data(struct goldfish_mmc_host *host,
* We don't really have DMA, so we need to copy to our
* platform driver buffer
*/
- sg_copy_from_buffer(data->sg, 1, host->virt_base,
+ sg_copy_to_buffer(data->sg, 1, host->virt_base,
data->sg->length);
}
}
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 5aa2c9404e92..be53044086c7 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -1976,7 +1976,7 @@ static void atmci_read_data_pio(struct atmel_mci *host)
do {
value = atmci_readl(host, ATMCI_RDR);
if (likely(offset + 4 <= sg->length)) {
- sg_pcopy_to_buffer(sg, 1, &value, sizeof(u32), offset);
+ sg_pcopy_from_buffer(sg, 1, &value, sizeof(u32), offset);
offset += 4;
nbytes += 4;
@@ -1993,7 +1993,7 @@ static void atmci_read_data_pio(struct atmel_mci *host)
} else {
unsigned int remaining = sg->length - offset;
- sg_pcopy_to_buffer(sg, 1, &value, remaining, offset);
+ sg_pcopy_from_buffer(sg, 1, &value, remaining, offset);
nbytes += remaining;
flush_dcache_page(sg_page(sg));
@@ -2003,7 +2003,7 @@ static void atmci_read_data_pio(struct atmel_mci *host)
goto done;
offset = 4 - remaining;
- sg_pcopy_to_buffer(sg, 1, (u8 *)&value + remaining,
+ sg_pcopy_from_buffer(sg, 1, (u8 *)&value + remaining,
offset, 0);
nbytes += offset;
}
@@ -2042,7 +2042,7 @@ static void atmci_write_data_pio(struct atmel_mci *host)
do {
if (likely(offset + 4 <= sg->length)) {
- sg_pcopy_from_buffer(sg, 1, &value, sizeof(u32), offset);
+ sg_pcopy_to_buffer(sg, 1, &value, sizeof(u32), offset);
atmci_writel(host, ATMCI_TDR, value);
offset += 4;
@@ -2059,7 +2059,7 @@ static void atmci_write_data_pio(struct atmel_mci *host)
unsigned int remaining = sg->length - offset;
value = 0;
- sg_pcopy_from_buffer(sg, 1, &value, remaining, offset);
+ sg_pcopy_to_buffer(sg, 1, &value, remaining, offset);
nbytes += remaining;
host->sg = sg = sg_next(sg);
@@ -2070,7 +2070,7 @@ static void atmci_write_data_pio(struct atmel_mci *host)
}
offset = 4 - remaining;
- sg_pcopy_from_buffer(sg, 1, (u8 *)&value + remaining,
+ sg_pcopy_to_buffer(sg, 1, (u8 *)&value + remaining,
offset, 0);
atmci_writel(host, ATMCI_TDR, value);
nbytes += offset;
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index 35cc0de6be67..ca0b43973769 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -45,14 +45,16 @@
/* DM_CM_RST */
#define RST_DTRANRST1 BIT(9)
#define RST_DTRANRST0 BIT(8)
-#define RST_RESERVED_BITS GENMASK_ULL(32, 0)
+#define RST_RESERVED_BITS GENMASK_ULL(31, 0)
/* DM_CM_INFO1 and DM_CM_INFO1_MASK */
#define INFO1_CLEAR 0
+#define INFO1_MASK_CLEAR GENMASK_ULL(31, 0)
#define INFO1_DTRANEND1 BIT(17)
#define INFO1_DTRANEND0 BIT(16)
/* DM_CM_INFO2 and DM_CM_INFO2_MASK */
+#define INFO2_MASK_CLEAR GENMASK_ULL(31, 0)
#define INFO2_DTRANERR1 BIT(17)
#define INFO2_DTRANERR0 BIT(16)
@@ -252,6 +254,12 @@ renesas_sdhi_internal_dmac_request_dma(struct tmio_mmc_host *host,
{
struct renesas_sdhi *priv = host_to_priv(host);
+ /* Disable DMAC interrupts, we don't use them */
+ renesas_sdhi_internal_dmac_dm_write(host, DM_CM_INFO1_MASK,
+ INFO1_MASK_CLEAR);
+ renesas_sdhi_internal_dmac_dm_write(host, DM_CM_INFO2_MASK,
+ INFO2_MASK_CLEAR);
+
/* Each value is set to non-zero to assume "enabling" each DMA */
host->chan_rx = host->chan_tx = (void *)0xdeadbeaf;
diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c
index ca18612c4201..67b2065e7a19 100644
--- a/drivers/mtd/nand/raw/denali.c
+++ b/drivers/mtd/nand/raw/denali.c
@@ -1338,6 +1338,11 @@ int denali_init(struct denali_nand_info *denali)
denali_enable_irq(denali);
denali_reset_banks(denali);
+ if (!denali->max_banks) {
+ /* Error out earlier if no chip is found for some reasons. */
+ ret = -ENODEV;
+ goto disable_irq;
+ }
denali->active_bank = DENALI_INVALID_BANK;
diff --git a/drivers/mtd/nand/raw/docg4.c b/drivers/mtd/nand/raw/docg4.c
index a3f04315c05c..427fcbc1b71c 100644
--- a/drivers/mtd/nand/raw/docg4.c
+++ b/drivers/mtd/nand/raw/docg4.c
@@ -1218,7 +1218,7 @@ static int docg4_resume(struct platform_device *pdev)
return 0;
}
-static void __init init_mtd_structs(struct mtd_info *mtd)
+static void init_mtd_structs(struct mtd_info *mtd)
{
/* initialize mtd and nand data structures */
@@ -1290,7 +1290,7 @@ static void __init init_mtd_structs(struct mtd_info *mtd)
}
-static int __init read_id_reg(struct mtd_info *mtd)
+static int read_id_reg(struct mtd_info *mtd)
{
struct nand_chip *nand = mtd_to_nand(mtd);
struct docg4_priv *doc = nand_get_controller_data(nand);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 139d96c5a023..092c817f8f11 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -110,16 +110,14 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
struct tcf_exts *tc_exts)
{
const struct tc_action *tc_act;
- LIST_HEAD(tc_actions);
- int rc;
+ int i, rc;
if (!tcf_exts_has_actions(tc_exts)) {
netdev_info(bp->dev, "no actions");
return -EINVAL;
}
- tcf_exts_to_list(tc_exts, &tc_actions);
- list_for_each_entry(tc_act, &tc_actions, list) {
+ tcf_exts_for_each_action(i, tc_act, tc_exts) {
/* Drop action */
if (is_tcf_gact_shot(tc_act)) {
actions->flags |= BNXT_TC_ACTION_FLAG_DROP;
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index dc09f9a8a49b..c6707ea2d751 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -482,11 +482,6 @@ static int macb_mii_probe(struct net_device *dev)
if (np) {
if (of_phy_is_fixed_link(np)) {
- if (of_phy_register_fixed_link(np) < 0) {
- dev_err(&bp->pdev->dev,
- "broken fixed-link specification\n");
- return -ENODEV;
- }
bp->phy_node = of_node_get(np);
} else {
bp->phy_node = of_parse_phandle(np, "phy-handle", 0);
@@ -569,7 +564,7 @@ static int macb_mii_init(struct macb *bp)
{
struct macb_platform_data *pdata;
struct device_node *np;
- int err;
+ int err = -ENXIO;
/* Enable management port */
macb_writel(bp, NCR, MACB_BIT(MPE));
@@ -592,12 +587,23 @@ static int macb_mii_init(struct macb *bp)
dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
np = bp->pdev->dev.of_node;
- if (pdata)
- bp->mii_bus->phy_mask = pdata->phy_mask;
+ if (np && of_phy_is_fixed_link(np)) {
+ if (of_phy_register_fixed_link(np) < 0) {
+ dev_err(&bp->pdev->dev,
+ "broken fixed-link specification %pOF\n", np);
+ goto err_out_free_mdiobus;
+ }
+
+ err = mdiobus_register(bp->mii_bus);
+ } else {
+ if (pdata)
+ bp->mii_bus->phy_mask = pdata->phy_mask;
+
+ err = of_mdiobus_register(bp->mii_bus, np);
+ }
- err = of_mdiobus_register(bp->mii_bus, np);
if (err)
- goto err_out_free_mdiobus;
+ goto err_out_free_fixed_link;
err = macb_mii_probe(bp->dev);
if (err)
@@ -607,6 +613,7 @@ static int macb_mii_init(struct macb *bp)
err_out_unregister_bus:
mdiobus_unregister(bp->mii_bus);
+err_out_free_fixed_link:
if (np && of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
err_out_free_mdiobus:
@@ -2028,14 +2035,17 @@ static void macb_reset_hw(struct macb *bp)
{
struct macb_queue *queue;
unsigned int q;
+ u32 ctrl = macb_readl(bp, NCR);
/* Disable RX and TX (XXX: Should we halt the transmission
* more gracefully?)
*/
- macb_writel(bp, NCR, 0);
+ ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
/* Clear the stats registers (XXX: Update stats first?) */
- macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
+ ctrl |= MACB_BIT(CLRSTAT);
+
+ macb_writel(bp, NCR, ctrl);
/* Clear all status flags */
macb_writel(bp, TSR, -1);
@@ -2223,7 +2233,7 @@ static void macb_init_hw(struct macb *bp)
}
/* Enable TX and RX */
- macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
+ macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
}
/* The hash address register is 64 bits long and takes up two
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index 623f73dd7738..c116f96956fe 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -417,10 +417,9 @@ static void cxgb4_process_flow_actions(struct net_device *in,
struct ch_filter_specification *fs)
{
const struct tc_action *a;
- LIST_HEAD(actions);
+ int i;
- tcf_exts_to_list(cls->exts, &actions);
- list_for_each_entry(a, &actions, list) {
+ tcf_exts_for_each_action(i, a, cls->exts) {
if (is_tcf_gact_ok(a)) {
fs->action = FILTER_PASS;
} else if (is_tcf_gact_shot(a)) {
@@ -591,10 +590,9 @@ static int cxgb4_validate_flow_actions(struct net_device *dev,
bool act_redir = false;
bool act_pedit = false;
bool act_vlan = false;
- LIST_HEAD(actions);
+ int i;
- tcf_exts_to_list(cls->exts, &actions);
- list_for_each_entry(a, &actions, list) {
+ tcf_exts_for_each_action(i, a, cls->exts) {
if (is_tcf_gact_ok(a)) {
/* Do nothing */
} else if (is_tcf_gact_shot(a)) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
index 18eb2aedd4cb..c7d2b4dc7568 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
@@ -93,14 +93,13 @@ static int fill_action_fields(struct adapter *adap,
unsigned int num_actions = 0;
const struct tc_action *a;
struct tcf_exts *exts;
- LIST_HEAD(actions);
+ int i;
exts = cls->knode.exts;
if (!tcf_exts_has_actions(exts))
return -EINVAL;
- tcf_exts_to_list(exts, &actions);
- list_for_each_entry(a, &actions, list) {
+ tcf_exts_for_each_action(i, a, exts) {
/* Don't allow more than one action per rule. */
if (num_actions)
return -EINVAL;
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index fa5b30f547f6..cad52bd331f7 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -220,10 +220,10 @@ struct hnae_desc_cb {
/* priv data for the desc, e.g. skb when use with ip stack*/
void *priv;
- u16 page_offset;
- u16 reuse_flag;
+ u32 page_offset;
+ u32 length; /* length of the buffer */
- u16 length; /* length of the buffer */
+ u16 reuse_flag;
/* desc type, used by the ring user to mark the type of the priv data */
u16 type;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 9f2b552aee33..02a0ba20fad5 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -406,113 +406,13 @@ out_net_tx_busy:
return NETDEV_TX_BUSY;
}
-/**
- * hns_nic_get_headlen - determine size of header for RSC/LRO/GRO/FCOE
- * @data: pointer to the start of the headers
- * @max: total length of section to find headers in
- *
- * This function is meant to determine the length of headers that will
- * be recognized by hardware for LRO, GRO, and RSC offloads. The main
- * motivation of doing this is to only perform one pull for IPv4 TCP
- * packets so that we can do basic things like calculating the gso_size
- * based on the average data per packet.
- **/
-static unsigned int hns_nic_get_headlen(unsigned char *data, u32 flag,
- unsigned int max_size)
-{
- unsigned char *network;
- u8 hlen;
-
- /* this should never happen, but better safe than sorry */
- if (max_size < ETH_HLEN)
- return max_size;
-
- /* initialize network frame pointer */
- network = data;
-
- /* set first protocol and move network header forward */
- network += ETH_HLEN;
-
- /* handle any vlan tag if present */
- if (hnae_get_field(flag, HNS_RXD_VLAN_M, HNS_RXD_VLAN_S)
- == HNS_RX_FLAG_VLAN_PRESENT) {
- if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN))
- return max_size;
-
- network += VLAN_HLEN;
- }
-
- /* handle L3 protocols */
- if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S)
- == HNS_RX_FLAG_L3ID_IPV4) {
- if ((typeof(max_size))(network - data) >
- (max_size - sizeof(struct iphdr)))
- return max_size;
-
- /* access ihl as a u8 to avoid unaligned access on ia64 */
- hlen = (network[0] & 0x0F) << 2;
-
- /* verify hlen meets minimum size requirements */
- if (hlen < sizeof(struct iphdr))
- return network - data;
-
- /* record next protocol if header is present */
- } else if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S)
- == HNS_RX_FLAG_L3ID_IPV6) {
- if ((typeof(max_size))(network - data) >
- (max_size - sizeof(struct ipv6hdr)))
- return max_size;
-
- /* record next protocol */
- hlen = sizeof(struct ipv6hdr);
- } else {
- return network - data;
- }
-
- /* relocate pointer to start of L4 header */
- network += hlen;
-
- /* finally sort out TCP/UDP */
- if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S)
- == HNS_RX_FLAG_L4ID_TCP) {
- if ((typeof(max_size))(network - data) >
- (max_size - sizeof(struct tcphdr)))
- return max_size;
-
- /* access doff as a u8 to avoid unaligned access on ia64 */
- hlen = (network[12] & 0xF0) >> 2;
-
- /* verify hlen meets minimum size requirements */
- if (hlen < sizeof(struct tcphdr))
- return network - data;
-
- network += hlen;
- } else if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S)
- == HNS_RX_FLAG_L4ID_UDP) {
- if ((typeof(max_size))(network - data) >
- (max_size - sizeof(struct udphdr)))
- return max_size;
-
- network += sizeof(struct udphdr);
- }
-
- /* If everything has gone correctly network should be the
- * data section of the packet and will be the end of the header.
- * If not then it probably represents the end of the last recognized
- * header.
- */
- if ((typeof(max_size))(network - data) < max_size)
- return network - data;
- else
- return max_size;
-}
-
static void hns_nic_reuse_page(struct sk_buff *skb, int i,
struct hnae_ring *ring, int pull_len,
struct hnae_desc_cb *desc_cb)
{
struct hnae_desc *desc;
- int truesize, size;
+ u32 truesize;
+ int size;
int last_offset;
bool twobufs;
@@ -530,7 +430,7 @@ static void hns_nic_reuse_page(struct sk_buff *skb, int i,
}
skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
- size - pull_len, truesize - pull_len);
+ size - pull_len, truesize);
/* avoid re-using remote pages,flag default unreuse */
if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
@@ -695,7 +595,7 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
} else {
ring->stats.seg_pkt_cnt++;
- pull_len = hns_nic_get_headlen(va, bnum_flag, HNS_RX_HEAD_SIZE);
+ pull_len = eth_get_headlen(va, HNS_RX_HEAD_SIZE);
memcpy(__skb_put(skb, pull_len), va,
ALIGN(pull_len, sizeof(long)));
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 3554dca7a680..955c4ab18b03 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -2019,7 +2019,8 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
struct hns3_desc_cb *desc_cb)
{
struct hns3_desc *desc;
- int truesize, size;
+ u32 truesize;
+ int size;
int last_offset;
bool twobufs;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index a02a96aee2a2..cb450d7ec8c1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -284,11 +284,11 @@ struct hns3_desc_cb {
/* priv data for the desc, e.g. skb when use with ip stack*/
void *priv;
- u16 page_offset;
- u16 reuse_flag;
-
+ u32 page_offset;
u32 length; /* length of the buffer */
+ u16 reuse_flag;
+
/* desc type, used by the ring user to mark the type of the priv data */
u16 type;
};
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index bdb3f8e65ed4..2569a168334c 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -624,14 +624,14 @@ static int e1000_set_ringparam(struct net_device *netdev,
adapter->tx_ring = tx_old;
e1000_free_all_rx_resources(adapter);
e1000_free_all_tx_resources(adapter);
- kfree(tx_old);
- kfree(rx_old);
adapter->rx_ring = rxdr;
adapter->tx_ring = txdr;
err = e1000_up(adapter);
if (err)
goto err_setup;
}
+ kfree(tx_old);
+ kfree(rx_old);
clear_bit(__E1000_RESETTING, &adapter->flags);
return 0;
@@ -644,7 +644,8 @@ err_setup_rx:
err_alloc_rx:
kfree(txdr);
err_alloc_tx:
- e1000_up(adapter);
+ if (netif_running(adapter->netdev))
+ e1000_up(adapter);
err_setup:
clear_bit(__E1000_RESETTING, &adapter->flags);
return err;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index abcd096ede14..5ff6caa83948 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -2013,7 +2013,7 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data)
for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
i40e_add_stat_strings(&data, i40e_gstrings_pfc_stats, i);
- WARN_ONCE(p - data != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN,
+ WARN_ONCE(data - p != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN,
"stat strings count mismatch!");
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index f2c622e78802..ac685ad4d877 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -5122,15 +5122,17 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
u8 *bw_share)
{
struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
+ struct i40e_pf *pf = vsi->back;
i40e_status ret;
int i;
- if (vsi->back->flags & I40E_FLAG_TC_MQPRIO)
+ /* There is no need to reset BW when mqprio mode is on. */
+ if (pf->flags & I40E_FLAG_TC_MQPRIO)
return 0;
- if (!vsi->mqprio_qopt.qopt.hw) {
+ if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
if (ret)
- dev_info(&vsi->back->pdev->dev,
+ dev_info(&pf->pdev->dev,
"Failed to reset tx rate for vsi->seid %u\n",
vsi->seid);
return ret;
@@ -5139,12 +5141,11 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
bw_data.tc_bw_credits[i] = bw_share[i];
- ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
- NULL);
+ ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL);
if (ret) {
- dev_info(&vsi->back->pdev->dev,
+ dev_info(&pf->pdev->dev,
"AQ command Config VSI BW allocation per TC failed = %d\n",
- vsi->back->hw.aq.asq_last_status);
+ pf->hw.aq.asq_last_status);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index d8b5fff581e7..868f4a1d0f72 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -89,6 +89,13 @@ extern const char ice_drv_ver[];
#define ice_for_each_rxq(vsi, i) \
for ((i) = 0; (i) < (vsi)->num_rxq; (i)++)
+/* Macros for each allocated tx/rx ring whether used or not in a VSI */
+#define ice_for_each_alloc_txq(vsi, i) \
+ for ((i) = 0; (i) < (vsi)->alloc_txq; (i)++)
+
+#define ice_for_each_alloc_rxq(vsi, i) \
+ for ((i) = 0; (i) < (vsi)->alloc_rxq; (i)++)
+
struct ice_tc_info {
u16 qoffset;
u16 qcount;
@@ -189,9 +196,9 @@ struct ice_vsi {
struct list_head tmp_sync_list; /* MAC filters to be synced */
struct list_head tmp_unsync_list; /* MAC filters to be unsynced */
- bool irqs_ready;
- bool current_isup; /* Sync 'link up' logging */
- bool stat_offsets_loaded;
+ u8 irqs_ready;
+ u8 current_isup; /* Sync 'link up' logging */
+ u8 stat_offsets_loaded;
/* queue information */
u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
@@ -262,7 +269,7 @@ struct ice_pf {
struct ice_hw_port_stats stats;
struct ice_hw_port_stats stats_prev;
struct ice_hw hw;
- bool stat_prev_loaded; /* has previous stats been loaded */
+ u8 stat_prev_loaded; /* has previous stats been loaded */
char int_name[ICE_INT_NAME_STR_LEN];
};
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 7541ec2270b3..a0614f472658 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -329,19 +329,19 @@ struct ice_aqc_vsi_props {
/* VLAN section */
__le16 pvid; /* VLANS include priority bits */
u8 pvlan_reserved[2];
- u8 port_vlan_flags;
-#define ICE_AQ_VSI_PVLAN_MODE_S 0
-#define ICE_AQ_VSI_PVLAN_MODE_M (0x3 << ICE_AQ_VSI_PVLAN_MODE_S)
-#define ICE_AQ_VSI_PVLAN_MODE_UNTAGGED 0x1
-#define ICE_AQ_VSI_PVLAN_MODE_TAGGED 0x2
-#define ICE_AQ_VSI_PVLAN_MODE_ALL 0x3
+ u8 vlan_flags;
+#define ICE_AQ_VSI_VLAN_MODE_S 0
+#define ICE_AQ_VSI_VLAN_MODE_M (0x3 << ICE_AQ_VSI_VLAN_MODE_S)
+#define ICE_AQ_VSI_VLAN_MODE_UNTAGGED 0x1
+#define ICE_AQ_VSI_VLAN_MODE_TAGGED 0x2
+#define ICE_AQ_VSI_VLAN_MODE_ALL 0x3
#define ICE_AQ_VSI_PVLAN_INSERT_PVID BIT(2)
-#define ICE_AQ_VSI_PVLAN_EMOD_S 3
-#define ICE_AQ_VSI_PVLAN_EMOD_M (0x3 << ICE_AQ_VSI_PVLAN_EMOD_S)
-#define ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH (0x0 << ICE_AQ_VSI_PVLAN_EMOD_S)
-#define ICE_AQ_VSI_PVLAN_EMOD_STR_UP (0x1 << ICE_AQ_VSI_PVLAN_EMOD_S)
-#define ICE_AQ_VSI_PVLAN_EMOD_STR (0x2 << ICE_AQ_VSI_PVLAN_EMOD_S)
-#define ICE_AQ_VSI_PVLAN_EMOD_NOTHING (0x3 << ICE_AQ_VSI_PVLAN_EMOD_S)
+#define ICE_AQ_VSI_VLAN_EMOD_S 3
+#define ICE_AQ_VSI_VLAN_EMOD_M (0x3 << ICE_AQ_VSI_VLAN_EMOD_S)
+#define ICE_AQ_VSI_VLAN_EMOD_STR_BOTH (0x0 << ICE_AQ_VSI_VLAN_EMOD_S)
+#define ICE_AQ_VSI_VLAN_EMOD_STR_UP (0x1 << ICE_AQ_VSI_VLAN_EMOD_S)
+#define ICE_AQ_VSI_VLAN_EMOD_STR (0x2 << ICE_AQ_VSI_VLAN_EMOD_S)
+#define ICE_AQ_VSI_VLAN_EMOD_NOTHING (0x3 << ICE_AQ_VSI_VLAN_EMOD_S)
u8 pvlan_reserved2[3];
/* ingress egress up sections */
__le32 ingress_table; /* bitmap, 3 bits per up */
@@ -594,6 +594,7 @@ struct ice_sw_rule_lg_act {
#define ICE_LG_ACT_GENERIC_OFFSET_M (0x7 << ICE_LG_ACT_GENERIC_OFFSET_S)
#define ICE_LG_ACT_GENERIC_PRIORITY_S 22
#define ICE_LG_ACT_GENERIC_PRIORITY_M (0x7 << ICE_LG_ACT_GENERIC_PRIORITY_S)
+#define ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX 7
/* Action = 7 - Set Stat count */
#define ICE_LG_ACT_STAT_COUNT 0x7
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 71d032cc5fa7..661beea6af79 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -45,6 +45,9 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)
/**
* ice_clear_pf_cfg - Clear PF configuration
* @hw: pointer to the hardware structure
+ *
+ * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
+ * configuration, flow director filters, etc.).
*/
enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
{
@@ -1483,7 +1486,7 @@ enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
struct ice_phy_info *phy_info;
enum ice_status status = 0;
- if (!pi)
+ if (!pi || !link_up)
return ICE_ERR_PARAM;
phy_info = &pi->phy;
@@ -1619,20 +1622,23 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
}
/* LUT size is only valid for Global and PF table types */
- if (lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128) {
- flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG <<
- ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
- ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
- } else if (lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512) {
+ switch (lut_size) {
+ case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
+ break;
+ case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
- } else if ((lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) &&
- (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF)) {
- flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
- ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
- ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
- } else {
+ break;
+ case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
+ if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
+ flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
+ ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
+ ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
+ break;
+ }
+ /* fall-through */
+ default:
status = ICE_ERR_PARAM;
goto ice_aq_get_set_rss_lut_exit;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index 7c511f144ed6..62be72fdc8f3 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -597,10 +597,14 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
return 0;
init_ctrlq_free_rq:
- ice_shutdown_rq(hw, cq);
- ice_shutdown_sq(hw, cq);
- mutex_destroy(&cq->sq_lock);
- mutex_destroy(&cq->rq_lock);
+ if (cq->rq.head) {
+ ice_shutdown_rq(hw, cq);
+ mutex_destroy(&cq->rq_lock);
+ }
+ if (cq->sq.head) {
+ ice_shutdown_sq(hw, cq);
+ mutex_destroy(&cq->sq_lock);
+ }
return status;
}
@@ -706,10 +710,14 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
return;
}
- ice_shutdown_sq(hw, cq);
- ice_shutdown_rq(hw, cq);
- mutex_destroy(&cq->sq_lock);
- mutex_destroy(&cq->rq_lock);
+ if (cq->sq.head) {
+ ice_shutdown_sq(hw, cq);
+ mutex_destroy(&cq->sq_lock);
+ }
+ if (cq->rq.head) {
+ ice_shutdown_rq(hw, cq);
+ mutex_destroy(&cq->rq_lock);
+ }
}
/**
@@ -1057,8 +1065,11 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
clean_rq_elem_out:
/* Set pending if needed, unlock and return */
- if (pending)
+ if (pending) {
+ /* re-read HW head to calculate actual pending messages */
+ ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
*pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
+ }
clean_rq_elem_err:
mutex_unlock(&cq->rq_lock);
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 1db304c01d10..c71a9b528d6d 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -26,7 +26,7 @@ static int ice_q_stats_len(struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
- return ((np->vsi->num_txq + np->vsi->num_rxq) *
+ return ((np->vsi->alloc_txq + np->vsi->alloc_rxq) *
(sizeof(struct ice_q_stats) / sizeof(u64)));
}
@@ -218,7 +218,7 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
p += ETH_GSTRING_LEN;
}
- ice_for_each_txq(vsi, i) {
+ ice_for_each_alloc_txq(vsi, i) {
snprintf(p, ETH_GSTRING_LEN,
"tx-queue-%u.tx_packets", i);
p += ETH_GSTRING_LEN;
@@ -226,7 +226,7 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
p += ETH_GSTRING_LEN;
}
- ice_for_each_rxq(vsi, i) {
+ ice_for_each_alloc_rxq(vsi, i) {
snprintf(p, ETH_GSTRING_LEN,
"rx-queue-%u.rx_packets", i);
p += ETH_GSTRING_LEN;
@@ -253,6 +253,24 @@ static int ice_get_sset_count(struct net_device *netdev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
+ /* The number (and order) of strings reported *must* remain
+ * constant for a given netdevice. This function must not
+ * report a different number based on run time parameters
+ * (such as the number of queues in use, or the setting of
+ * a private ethtool flag). This is due to the nature of the
+ * ethtool stats API.
+ *
+ * User space programs such as ethtool must make 3 separate
+ * ioctl requests, one for size, one for the strings, and
+ * finally one for the stats. Since these cross into
+ * user space, changes to the number or size could result in
+ * undefined memory access or incorrect string<->value
+ * correlations for statistics.
+ *
+ * Even if it appears to be safe, changes to the size or
+ * order of strings will suffer from race conditions and are
+ * not safe.
+ */
return ICE_ALL_STATS_LEN(netdev);
default:
return -EOPNOTSUPP;
@@ -280,18 +298,26 @@ ice_get_ethtool_stats(struct net_device *netdev,
/* populate per queue stats */
rcu_read_lock();
- ice_for_each_txq(vsi, j) {
+ ice_for_each_alloc_txq(vsi, j) {
ring = READ_ONCE(vsi->tx_rings[j]);
- if (!ring)
- continue;
- data[i++] = ring->stats.pkts;
- data[i++] = ring->stats.bytes;
+ if (ring) {
+ data[i++] = ring->stats.pkts;
+ data[i++] = ring->stats.bytes;
+ } else {
+ data[i++] = 0;
+ data[i++] = 0;
+ }
}
- ice_for_each_rxq(vsi, j) {
+ ice_for_each_alloc_rxq(vsi, j) {
ring = READ_ONCE(vsi->rx_rings[j]);
- data[i++] = ring->stats.pkts;
- data[i++] = ring->stats.bytes;
+ if (ring) {
+ data[i++] = ring->stats.pkts;
+ data[i++] = ring->stats.bytes;
+ } else {
+ data[i++] = 0;
+ data[i++] = 0;
+ }
}
rcu_read_unlock();
@@ -519,7 +545,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
goto done;
}
- for (i = 0; i < vsi->num_txq; i++) {
+ for (i = 0; i < vsi->alloc_txq; i++) {
/* clone ring and setup updated count */
tx_rings[i] = *vsi->tx_rings[i];
tx_rings[i].count = new_tx_cnt;
@@ -551,7 +577,7 @@ process_rx:
goto done;
}
- for (i = 0; i < vsi->num_rxq; i++) {
+ for (i = 0; i < vsi->alloc_rxq; i++) {
/* clone ring and setup updated count */
rx_rings[i] = *vsi->rx_rings[i];
rx_rings[i].count = new_rx_cnt;
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 499904874b3f..6076fc87df9d 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -121,10 +121,6 @@
#define PFINT_FW_CTL_CAUSE_ENA_S 30
#define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S)
#define PFINT_OICR 0x0016CA00
-#define PFINT_OICR_HLP_RDY_S 14
-#define PFINT_OICR_HLP_RDY_M BIT(PFINT_OICR_HLP_RDY_S)
-#define PFINT_OICR_CPM_RDY_S 15
-#define PFINT_OICR_CPM_RDY_M BIT(PFINT_OICR_CPM_RDY_S)
#define PFINT_OICR_ECC_ERR_S 16
#define PFINT_OICR_ECC_ERR_M BIT(PFINT_OICR_ECC_ERR_S)
#define PFINT_OICR_MAL_DETECT_S 19
@@ -133,10 +129,6 @@
#define PFINT_OICR_GRST_M BIT(PFINT_OICR_GRST_S)
#define PFINT_OICR_PCI_EXCEPTION_S 21
#define PFINT_OICR_PCI_EXCEPTION_M BIT(PFINT_OICR_PCI_EXCEPTION_S)
-#define PFINT_OICR_GPIO_S 22
-#define PFINT_OICR_GPIO_M BIT(PFINT_OICR_GPIO_S)
-#define PFINT_OICR_STORM_DETECT_S 24
-#define PFINT_OICR_STORM_DETECT_M BIT(PFINT_OICR_STORM_DETECT_S)
#define PFINT_OICR_HMC_ERR_S 26
#define PFINT_OICR_HMC_ERR_M BIT(PFINT_OICR_HMC_ERR_S)
#define PFINT_OICR_PE_CRITERR_S 28
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index d23a91665b46..068dbc740b76 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -265,6 +265,7 @@ enum ice_rx_flex_desc_status_error_0_bits {
struct ice_rlan_ctx {
u16 head;
u16 cpuid; /* bigger than needed, see above for reason */
+#define ICE_RLAN_BASE_S 7
u64 base;
u16 qlen;
#define ICE_RLAN_CTX_DBUF_S 7
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 5299caf55a7f..f1e80eed2fd6 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -901,7 +901,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
case ice_aqc_opc_get_link_status:
if (ice_handle_link_event(pf))
dev_err(&pf->pdev->dev,
- "Could not handle link event");
+ "Could not handle link event\n");
break;
default:
dev_dbg(&pf->pdev->dev,
@@ -917,13 +917,27 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
}
/**
+ * ice_ctrlq_pending - check if there is a difference between ntc and ntu
+ * @hw: pointer to hardware info
+ * @cq: control queue information
+ *
+ * returns true if there are pending messages in a queue, false if there aren't
+ */
+static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ u16 ntu;
+
+ ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
+ return cq->rq.next_to_clean != ntu;
+}
+
+/**
* ice_clean_adminq_subtask - clean the AdminQ rings
* @pf: board private structure
*/
static void ice_clean_adminq_subtask(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
- u32 val;
if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
return;
@@ -933,9 +947,13 @@ static void ice_clean_adminq_subtask(struct ice_pf *pf)
clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
- /* re-enable Admin queue interrupt causes */
- val = rd32(hw, PFINT_FW_CTL);
- wr32(hw, PFINT_FW_CTL, (val | PFINT_FW_CTL_CAUSE_ENA_M));
+ /* There might be a situation where new messages arrive to a control
+ * queue between processing the last message and clearing the
+ * EVENT_PENDING bit. So before exiting, check queue head again (using
+ * ice_ctrlq_pending) and process new messages if any.
+ */
+ if (ice_ctrlq_pending(hw, &hw->adminq))
+ __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
ice_flush(hw);
}
@@ -1295,11 +1313,8 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
qcount = numq_tc;
}
- /* find higher power-of-2 of qcount */
- pow = ilog2(qcount);
-
- if (!is_power_of_2(qcount))
- pow++;
+ /* find the (rounded up) power-of-2 of qcount */
+ pow = order_base_2(qcount);
for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
@@ -1352,14 +1367,15 @@ static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
/* Traffic from VSI can be sent to LAN */
ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
- /* Allow all packets untagged/tagged */
- ctxt->info.port_vlan_flags = ((ICE_AQ_VSI_PVLAN_MODE_ALL &
- ICE_AQ_VSI_PVLAN_MODE_M) >>
- ICE_AQ_VSI_PVLAN_MODE_S);
- /* Show VLAN/UP from packets in Rx descriptors */
- ctxt->info.port_vlan_flags |= ((ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH &
- ICE_AQ_VSI_PVLAN_EMOD_M) >>
- ICE_AQ_VSI_PVLAN_EMOD_S);
+
+ /* By default bits 3 and 4 in vlan_flags are 0's which results in legacy
+ * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all
+ * packets untagged/tagged.
+ */
+ ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL &
+ ICE_AQ_VSI_VLAN_MODE_M) >>
+ ICE_AQ_VSI_VLAN_MODE_S);
+
/* Have 1:1 UP mapping for both ingress/egress tables */
table |= ICE_UP_TABLE_TRANSLATE(0, 0);
table |= ICE_UP_TABLE_TRANSLATE(1, 1);
@@ -1688,15 +1704,12 @@ static void ice_ena_misc_vector(struct ice_pf *pf)
wr32(hw, PFINT_OICR_ENA, 0); /* disable all */
rd32(hw, PFINT_OICR); /* read to clear */
- val = (PFINT_OICR_HLP_RDY_M |
- PFINT_OICR_CPM_RDY_M |
- PFINT_OICR_ECC_ERR_M |
+ val = (PFINT_OICR_ECC_ERR_M |
PFINT_OICR_MAL_DETECT_M |
PFINT_OICR_GRST_M |
PFINT_OICR_PCI_EXCEPTION_M |
- PFINT_OICR_GPIO_M |
- PFINT_OICR_STORM_DETECT_M |
- PFINT_OICR_HMC_ERR_M);
+ PFINT_OICR_HMC_ERR_M |
+ PFINT_OICR_PE_CRITERR_M);
wr32(hw, PFINT_OICR_ENA, val);
@@ -2058,15 +2071,13 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
skip_req_irq:
ice_ena_misc_vector(pf);
- val = (pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
- (ICE_RX_ITR & PFINT_OICR_CTL_ITR_INDX_M) |
- PFINT_OICR_CTL_CAUSE_ENA_M;
+ val = ((pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
+ PFINT_OICR_CTL_CAUSE_ENA_M);
wr32(hw, PFINT_OICR_CTL, val);
/* This enables Admin queue Interrupt causes */
- val = (pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) |
- (ICE_RX_ITR & PFINT_FW_CTL_ITR_INDX_M) |
- PFINT_FW_CTL_CAUSE_ENA_M;
+ val = ((pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) |
+ PFINT_FW_CTL_CAUSE_ENA_M);
wr32(hw, PFINT_FW_CTL, val);
itr_gran = hw->itr_gran_200;
@@ -3246,8 +3257,10 @@ static void ice_clear_interrupt_scheme(struct ice_pf *pf)
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
ice_dis_msix(pf);
- devm_kfree(&pf->pdev->dev, pf->irq_tracker);
- pf->irq_tracker = NULL;
+ if (pf->irq_tracker) {
+ devm_kfree(&pf->pdev->dev, pf->irq_tracker);
+ pf->irq_tracker = NULL;
+ }
}
/**
@@ -3271,7 +3284,7 @@ static int ice_probe(struct pci_dev *pdev,
err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev));
if (err) {
- dev_err(&pdev->dev, "I/O map error %d\n", err);
+ dev_err(&pdev->dev, "BAR0 I/O map error %d\n", err);
return err;
}
@@ -3720,10 +3733,10 @@ static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
enum ice_status status;
/* Here we are configuring the VSI to let the driver add VLAN tags by
- * setting port_vlan_flags to ICE_AQ_VSI_PVLAN_MODE_ALL. The actual VLAN
- * tag insertion happens in the Tx hot path, in ice_tx_map.
+ * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag
+ * insertion happens in the Tx hot path, in ice_tx_map.
*/
- ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_MODE_ALL;
+ ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
ctxt.vsi_num = vsi->vsi_num;
@@ -3735,7 +3748,7 @@ static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
return -EIO;
}
- vsi->info.port_vlan_flags = ctxt.info.port_vlan_flags;
+ vsi->info.vlan_flags = ctxt.info.vlan_flags;
return 0;
}
@@ -3757,12 +3770,15 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
*/
if (ena) {
/* Strip VLAN tag from Rx packet and put it in the desc */
- ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH;
+ ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
} else {
/* Disable stripping. Leave tag in packet */
- ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_EMOD_NOTHING;
+ ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
}
+ /* Allow all packets untagged/tagged */
+ ctxt.info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
+
ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
ctxt.vsi_num = vsi->vsi_num;
@@ -3773,7 +3789,7 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
return -EIO;
}
- vsi->info.port_vlan_flags = ctxt.info.port_vlan_flags;
+ vsi->info.vlan_flags = ctxt.info.vlan_flags;
return 0;
}
@@ -3986,7 +4002,7 @@ static int ice_setup_rx_ctx(struct ice_ring *ring)
/* clear the context structure first */
memset(&rlan_ctx, 0, sizeof(rlan_ctx));
- rlan_ctx.base = ring->dma >> 7;
+ rlan_ctx.base = ring->dma >> ICE_RLAN_BASE_S;
rlan_ctx.qlen = ring->count;
@@ -4098,11 +4114,12 @@ static int ice_vsi_cfg(struct ice_vsi *vsi)
{
int err;
- ice_set_rx_mode(vsi->netdev);
-
- err = ice_restore_vlan(vsi);
- if (err)
- return err;
+ if (vsi->netdev) {
+ ice_set_rx_mode(vsi->netdev);
+ err = ice_restore_vlan(vsi);
+ if (err)
+ return err;
+ }
err = ice_vsi_cfg_txqs(vsi);
if (!err)
@@ -4868,7 +4885,7 @@ int ice_down(struct ice_vsi *vsi)
*/
static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
{
- int i, err;
+ int i, err = 0;
if (!vsi->num_txq) {
dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n",
@@ -4893,7 +4910,7 @@ static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
*/
static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
{
- int i, err;
+ int i, err = 0;
if (!vsi->num_rxq) {
dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n",
@@ -5235,7 +5252,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
u8 count = 0;
if (new_mtu == netdev->mtu) {
- netdev_warn(netdev, "mtu is already %d\n", netdev->mtu);
+ netdev_warn(netdev, "mtu is already %u\n", netdev->mtu);
return 0;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index 92da0a626ce0..295a8cd87fc1 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -131,9 +131,8 @@ ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
*
* This function will request NVM ownership.
*/
-static enum
-ice_status ice_acquire_nvm(struct ice_hw *hw,
- enum ice_aq_res_access_type access)
+static enum ice_status
+ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access)
{
if (hw->nvm.blank_nvm_mode)
return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 2e6c1d92cc88..eeae199469b6 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -1576,8 +1576,7 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc,
return status;
}
- if (owner == ICE_SCHED_NODE_OWNER_LAN)
- vsi->max_lanq[tc] = new_numqs;
+ vsi->max_lanq[tc] = new_numqs;
return status;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 723d15f1e90b..6b7ec2ae5ad6 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -645,14 +645,14 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
lg_act->pdata.lg_act.act[1] = cpu_to_le32(act);
- act = (7 << ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_VALUE_M;
+ act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
+ ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
/* Third action Marker value */
act |= ICE_LG_ACT_GENERIC;
act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
ICE_LG_ACT_GENERIC_VALUE_M;
- act |= (0 << ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_VALUE_M;
lg_act->pdata.lg_act.act[2] = cpu_to_le32(act);
/* call the fill switch rule to fill the lookup tx rx structure */
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index 6f4a0d159dbf..9b8ec128ee31 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -17,7 +17,7 @@ struct ice_vsi_ctx {
u16 vsis_unallocated;
u16 flags;
struct ice_aqc_vsi_props info;
- bool alloc_from_pool;
+ u8 alloc_from_pool;
};
enum ice_sw_fwd_act_type {
@@ -94,8 +94,8 @@ struct ice_fltr_info {
u8 qgrp_size;
/* Rule creations populate these indicators basing on the switch type */
- bool lb_en; /* Indicate if packet can be looped back */
- bool lan_en; /* Indicate if packet can be forwarded to the uplink */
+ u8 lb_en; /* Indicate if packet can be looped back */
+ u8 lan_en; /* Indicate if packet can be forwarded to the uplink */
};
/* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list id */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 567067b650c4..31bc998fe200 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -143,7 +143,7 @@ struct ice_ring {
u16 next_to_use;
u16 next_to_clean;
- bool ring_active; /* is ring online or not */
+ u8 ring_active; /* is ring online or not */
/* stats structs */
struct ice_q_stats stats;
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 99c8a9a71b5e..97c366e0ca59 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -83,7 +83,7 @@ struct ice_link_status {
u64 phy_type_low;
u16 max_frame_size;
u16 link_speed;
- bool lse_ena; /* Link Status Event notification */
+ u8 lse_ena; /* Link Status Event notification */
u8 link_info;
u8 an_info;
u8 ext_info;
@@ -101,7 +101,7 @@ struct ice_phy_info {
struct ice_link_status link_info_old;
u64 phy_type_low;
enum ice_media_type media_type;
- bool get_link_info;
+ u8 get_link_info;
};
/* Common HW capabilities for SW use */
@@ -167,7 +167,7 @@ struct ice_nvm_info {
u32 oem_ver; /* OEM version info */
u16 sr_words; /* Shadow RAM size in words */
u16 ver; /* NVM package version */
- bool blank_nvm_mode; /* is NVM empty (no FW present) */
+ u8 blank_nvm_mode; /* is NVM empty (no FW present) */
};
/* Max number of port to queue branches w.r.t topology */
@@ -181,7 +181,7 @@ struct ice_sched_node {
struct ice_aqc_txsched_elem_data info;
u32 agg_id; /* aggregator group id */
u16 vsi_id;
- bool in_use; /* suspended or in use */
+ u8 in_use; /* suspended or in use */
u8 tx_sched_layer; /* Logical Layer (1-9) */
u8 num_children;
u8 tc_num;
@@ -218,7 +218,7 @@ struct ice_sched_vsi_info {
struct ice_sched_tx_policy {
u16 max_num_vsis;
u8 max_num_lan_qs_per_tc[ICE_MAX_TRAFFIC_CLASS];
- bool rdma_ena;
+ u8 rdma_ena;
};
struct ice_port_info {
@@ -243,7 +243,7 @@ struct ice_port_info {
struct list_head agg_list; /* lists all aggregator */
u8 lport;
#define ICE_LPORT_MASK 0xff
- bool is_vf;
+ u8 is_vf;
};
struct ice_switch_info {
@@ -287,7 +287,7 @@ struct ice_hw {
u8 max_cgds;
u8 sw_entry_point_layer;
- bool evb_veb; /* true for VEB, false for VEPA */
+ u8 evb_veb; /* true for VEB, false for VEPA */
struct ice_bus_info bus;
struct ice_nvm_info nvm;
struct ice_hw_dev_caps dev_caps; /* device capabilities */
@@ -318,7 +318,7 @@ struct ice_hw {
u8 itr_gran_100;
u8 itr_gran_50;
u8 itr_gran_25;
- bool ucast_shared; /* true if VSIs can share unicast addr */
+ u8 ucast_shared; /* true if VSIs can share unicast addr */
};
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index f92f7918112d..5acf3b743876 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1649,7 +1649,7 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
if (hw->phy.type == e1000_phy_m88)
igb_phy_disable_receiver(adapter);
- mdelay(500);
+ msleep(500);
return 0;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index d03c2f0d7592..a32c576c1e65 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3873,7 +3873,7 @@ static int igb_sw_init(struct igb_adapter *adapter)
adapter->mac_table = kcalloc(hw->mac.rar_entry_count,
sizeof(struct igb_mac_addr),
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!adapter->mac_table)
return -ENOMEM;
@@ -3883,7 +3883,7 @@ static int igb_sw_init(struct igb_adapter *adapter)
/* Setup and initialize a copy of the hw vlan table array */
adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!adapter->shadow_vfta)
return -ENOMEM;
@@ -5816,7 +5816,8 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
if (skb->ip_summed != CHECKSUM_PARTIAL) {
csum_failed:
- if (!(first->tx_flags & IGB_TX_FLAGS_VLAN))
+ if (!(first->tx_flags & IGB_TX_FLAGS_VLAN) &&
+ !tx_ring->launchtime_enable)
return;
goto no_csum;
}
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 43664adf7a3c..d3e72d0f66ef 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -771,14 +771,13 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
rxdr->size = ALIGN(rxdr->size, 4096);
- rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
- GFP_KERNEL);
+ rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
+ GFP_KERNEL);
if (!rxdr->desc) {
vfree(rxdr->buffer_info);
return -ENOMEM;
}
- memset(rxdr->desc, 0, rxdr->size);
rxdr->next_to_clean = 0;
rxdr->next_to_use = 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index 94b3165ff543..ccd852ad62a4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -192,7 +192,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
}
/* alloc the udl from per cpu ddp pool */
- ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp);
+ ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_KERNEL, &ddp->udp);
if (!ddp->udl) {
e_err(drv, "failed allocated ddp context\n");
goto out_noddp_unmap;
@@ -760,7 +760,7 @@ int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
return 0;
/* Extra buffer to be shared by all DDPs for HW work around */
- buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
+ buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 447098005490..9a23d33a47ed 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -6201,7 +6201,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
adapter->mac_table = kcalloc(hw->mac.num_rar_entries,
sizeof(struct ixgbe_mac_addr),
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!adapter->mac_table)
return -ENOMEM;
@@ -6620,8 +6620,18 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
struct ixgbe_adapter *adapter = netdev_priv(netdev);
if (adapter->xdp_prog) {
- e_warn(probe, "MTU cannot be changed while XDP program is loaded\n");
- return -EPERM;
+ int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN +
+ VLAN_HLEN;
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct ixgbe_ring *ring = adapter->rx_ring[i];
+
+ if (new_frame_size > ixgbe_rx_bufsz(ring)) {
+ e_warn(probe, "Requested MTU size is not supported with XDP\n");
+ return -EINVAL;
+ }
+ }
}
/*
@@ -8983,6 +8993,15 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
#ifdef CONFIG_IXGBE_DCB
if (tc) {
+ if (adapter->xdp_prog) {
+ e_warn(probe, "DCB is not supported with XDP\n");
+
+ ixgbe_init_interrupt_scheme(adapter);
+ if (netif_running(dev))
+ ixgbe_open(dev);
+ return -EINVAL;
+ }
+
netdev_set_num_tc(dev, tc);
ixgbe_set_prio_tc_map(adapter);
@@ -9171,14 +9190,12 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter,
struct tcf_exts *exts, u64 *action, u8 *queue)
{
const struct tc_action *a;
- LIST_HEAD(actions);
+ int i;
if (!tcf_exts_has_actions(exts))
return -EINVAL;
- tcf_exts_to_list(exts, &actions);
- list_for_each_entry(a, &actions, list) {
-
+ tcf_exts_for_each_action(i, a, exts) {
/* Drop action */
if (is_tcf_gact_shot(a)) {
*action = IXGBE_FDIR_DROP_QUEUE;
@@ -9936,6 +9953,11 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
int tcs = adapter->hw_tcs ? : 1;
int pool, err;
+ if (adapter->xdp_prog) {
+ e_warn(probe, "L2FW offload is not supported with XDP\n");
+ return ERR_PTR(-EINVAL);
+ }
+
/* The hardware supported by ixgbe only filters on the destination MAC
* address. In order to avoid issues we only support offloading modes
* where the hardware can actually provide the functionality.
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 6f59933cdff7..3c6f01c41b78 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -53,6 +53,11 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
struct ixgbe_hw *hw = &adapter->hw;
int i;
+ if (adapter->xdp_prog) {
+ e_warn(probe, "SRIOV is not supported with XDP\n");
+ return -EINVAL;
+ }
+
/* Enable VMDq flag so device will be set in VM mode */
adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED |
IXGBE_FLAG_VMDQ_ENABLED;
@@ -688,8 +693,13 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
+ u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
u8 num_tcs = adapter->hw_tcs;
+ u32 reg_val;
+ u32 queue;
+ u32 word;
/* remove VLAN filters beloning to this VF */
ixgbe_clear_vf_vlans(adapter, vf);
@@ -726,6 +736,27 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
/* reset VF api back to unknown */
adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10;
+
+ /* Restart each queue for given VF */
+ for (queue = 0; queue < q_per_pool; queue++) {
+ unsigned int reg_idx = (vf * q_per_pool) + queue;
+
+ reg_val = IXGBE_READ_REG(hw, IXGBE_PVFTXDCTL(reg_idx));
+
+ /* Re-enabling only configured queues */
+ if (reg_val) {
+ reg_val |= IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), reg_val);
+ reg_val &= ~IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), reg_val);
+ }
+ }
+
+ /* Clear VF's mailbox memory */
+ for (word = 0; word < IXGBE_VFMAILBOX_SIZE; word++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf), word, 0);
+
+ IXGBE_WRITE_FLUSH(hw);
}
static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 44cfb2021145..41bcbb337e83 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -2518,6 +2518,7 @@ enum {
/* Translated register #defines */
#define IXGBE_PVFTDH(P) (0x06010 + (0x40 * (P)))
#define IXGBE_PVFTDT(P) (0x06018 + (0x40 * (P)))
+#define IXGBE_PVFTXDCTL(P) (0x06028 + (0x40 * (P)))
#define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P)))
#define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P)))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 9131a1376e7d..9fed54017659 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1982,14 +1982,15 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
goto out_ok;
modify_ip_header = false;
- tcf_exts_to_list(exts, &actions);
- list_for_each_entry(a, &actions, list) {
+ tcf_exts_for_each_action(i, a, exts) {
+ int k;
+
if (!is_tcf_pedit(a))
continue;
nkeys = tcf_pedit_nkeys(a);
- for (i = 0; i < nkeys; i++) {
- htype = tcf_pedit_htype(a, i);
+ for (k = 0; k < nkeys; k++) {
+ htype = tcf_pedit_htype(a, k);
if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 ||
htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) {
modify_ip_header = true;
@@ -2053,15 +2054,14 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
const struct tc_action *a;
LIST_HEAD(actions);
u32 action = 0;
- int err;
+ int err, i;
if (!tcf_exts_has_actions(exts))
return -EINVAL;
attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
- tcf_exts_to_list(exts, &actions);
- list_for_each_entry(a, &actions, list) {
+ tcf_exts_for_each_action(i, a, exts) {
if (is_tcf_gact_shot(a)) {
action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
if (MLX5_CAP_FLOWTABLE(priv->mdev,
@@ -2666,7 +2666,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
LIST_HEAD(actions);
bool encap = false;
u32 action = 0;
- int err;
+ int err, i;
if (!tcf_exts_has_actions(exts))
return -EINVAL;
@@ -2674,8 +2674,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
attr->in_rep = rpriv->rep;
attr->in_mdev = priv->mdev;
- tcf_exts_to_list(exts, &actions);
- list_for_each_entry(a, &actions, list) {
+ tcf_exts_for_each_action(i, a, exts) {
if (is_tcf_gact_shot(a)) {
action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 6070d1591d1e..930700413b1d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1346,8 +1346,7 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
return -ENOMEM;
mall_tc_entry->cookie = f->cookie;
- tcf_exts_to_list(f->exts, &actions);
- a = list_first_entry(&actions, struct tc_action, list);
+ a = tcf_exts_first_action(f->exts);
if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 3ae930196741..3cdb7aca90b7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -414,6 +414,8 @@ mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
void
mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
+void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *dev);
/* spectrum_kvdl.c */
enum mlxsw_sp_kvdl_entry_type {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index ebd1b24ebaa5..8d211972c5e9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -21,8 +21,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
struct netlink_ext_ack *extack)
{
const struct tc_action *a;
- LIST_HEAD(actions);
- int err;
+ int err, i;
if (!tcf_exts_has_actions(exts))
return 0;
@@ -32,8 +31,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- tcf_exts_to_list(exts, &actions);
- list_for_each_entry(a, &actions, list) {
+ tcf_exts_for_each_action(i, a, exts) {
if (is_tcf_gact_ok(a)) {
err = mlxsw_sp_acl_rulei_act_terminate(rulei);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 3a96307f51b0..2ab9cf25a08a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -6234,6 +6234,17 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
mlxsw_sp_vr_put(mlxsw_sp, vr);
}
+void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *dev)
+{
+ struct mlxsw_sp_rif *rif;
+
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+ if (!rif)
+ return;
+ mlxsw_sp_rif_destroy(rif);
+}
+
static void
mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 0d8444aaba01..db715da7bab7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -127,6 +127,24 @@ bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
}
+static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev,
+ void *data)
+{
+ struct mlxsw_sp *mlxsw_sp = data;
+
+ mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
+ return 0;
+}
+
+static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *dev)
+{
+ mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
+ netdev_walk_all_upper_dev_rcu(dev,
+ mlxsw_sp_bridge_device_upper_rif_destroy,
+ mlxsw_sp);
+}
+
static struct mlxsw_sp_bridge_device *
mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
struct net_device *br_dev)
@@ -165,6 +183,8 @@ static void
mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
struct mlxsw_sp_bridge_device *bridge_device)
{
+ mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp,
+ bridge_device->dev);
list_del(&bridge_device->list);
if (bridge_device->vlan_enabled)
bridge->vlan_enabled_exists = false;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 0ba0356ec4e6..9044496803e6 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -796,11 +796,10 @@ int nfp_flower_compile_action(struct nfp_app *app,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow)
{
- int act_len, act_cnt, err, tun_out_cnt, out_cnt;
+ int act_len, act_cnt, err, tun_out_cnt, out_cnt, i;
enum nfp_flower_tun_type tun_type;
const struct tc_action *a;
u32 csum_updated = 0;
- LIST_HEAD(actions);
memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
nfp_flow->meta.act_len = 0;
@@ -810,8 +809,7 @@ int nfp_flower_compile_action(struct nfp_app *app,
tun_out_cnt = 0;
out_cnt = 0;
- tcf_exts_to_list(flow->exts, &actions);
- list_for_each_entry(a, &actions, list) {
+ tcf_exts_for_each_action(i, a, flow->exts) {
err = nfp_flower_loop_action(app, a, flow, nfp_flow, &act_len,
netdev, &tun_type, &tun_out_cnt,
&out_cnt, &csum_updated);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
index d9ab5add27a8..34193c2f1699 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -407,7 +407,7 @@ static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
if (i == QED_INIT_MAX_POLL_COUNT) {
DP_ERR(p_hwfn,
- "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n",
+ "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparison %08x)]\n",
addr, le32_to_cpu(cmd->expected_val),
val, le32_to_cpu(cmd->op_data));
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index d89a0e22f6e4..5d37ec7e9b0b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -48,7 +48,7 @@
#include "qed_reg_addr.h"
#include "qed_sriov.h"
-#define CHIP_MCP_RESP_ITER_US 10
+#define QED_MCP_RESP_ITER_US 10
#define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
#define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
@@ -183,18 +183,57 @@ int qed_mcp_free(struct qed_hwfn *p_hwfn)
return 0;
}
+/* Maximum of 1 sec to wait for the SHMEM ready indication */
+#define QED_MCP_SHMEM_RDY_MAX_RETRIES 20
+#define QED_MCP_SHMEM_RDY_ITER_MS 50
+
static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_mcp_info *p_info = p_hwfn->mcp_info;
+ u8 cnt = QED_MCP_SHMEM_RDY_MAX_RETRIES;
+ u8 msec = QED_MCP_SHMEM_RDY_ITER_MS;
u32 drv_mb_offsize, mfw_mb_offsize;
u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
- if (!p_info->public_base)
- return 0;
+ if (!p_info->public_base) {
+ DP_NOTICE(p_hwfn,
+ "The address of the MCP scratch-pad is not configured\n");
+ return -EINVAL;
+ }
p_info->public_base |= GRCBASE_MCP;
+ /* Get the MFW MB address and number of supported messages */
+ mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
+ SECTION_OFFSIZE_ADDR(p_info->public_base,
+ PUBLIC_MFW_MB));
+ p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
+ p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt,
+ p_info->mfw_mb_addr +
+ offsetof(struct public_mfw_mb,
+ sup_msgs));
+
+ /* The driver can notify that there was an MCP reset, and might read the
+ * SHMEM values before the MFW has completed initializing them.
+ * To avoid this, the "sup_msgs" field in the MFW mailbox is used as a
+ * data ready indication.
+ */
+ while (!p_info->mfw_mb_length && --cnt) {
+ msleep(msec);
+ p_info->mfw_mb_length =
+ (u16)qed_rd(p_hwfn, p_ptt,
+ p_info->mfw_mb_addr +
+ offsetof(struct public_mfw_mb, sup_msgs));
+ }
+
+ if (!cnt) {
+ DP_NOTICE(p_hwfn,
+ "Failed to get the SHMEM ready notification after %d msec\n",
+ QED_MCP_SHMEM_RDY_MAX_RETRIES * msec);
+ return -EBUSY;
+ }
+
/* Calculate the driver and MFW mailbox address */
drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
SECTION_OFFSIZE_ADDR(p_info->public_base,
@@ -204,13 +243,6 @@ static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
"drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
- /* Set the MFW MB address */
- mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
- SECTION_OFFSIZE_ADDR(p_info->public_base,
- PUBLIC_MFW_MB));
- p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
- p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr);
-
/* Get the current driver mailbox sequence before sending
* the first command
*/
@@ -285,9 +317,15 @@ static void qed_mcp_reread_offsets(struct qed_hwfn *p_hwfn,
int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
- u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0;
+ u32 org_mcp_reset_seq, seq, delay = QED_MCP_RESP_ITER_US, cnt = 0;
int rc = 0;
+ if (p_hwfn->mcp_info->b_block_cmd) {
+ DP_NOTICE(p_hwfn,
+ "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
+ return -EBUSY;
+ }
+
/* Ensure that only a single thread is accessing the mailbox */
spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
@@ -413,14 +451,41 @@ static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
(p_mb_params->cmd | seq_num), p_mb_params->param);
}
+static void qed_mcp_cmd_set_blocking(struct qed_hwfn *p_hwfn, bool block_cmd)
+{
+ p_hwfn->mcp_info->b_block_cmd = block_cmd;
+
+ DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n",
+ block_cmd ? "Block" : "Unblock");
+}
+
+static void qed_mcp_print_cpu_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
+ u32 delay = QED_MCP_RESP_ITER_US;
+
+ cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
+ cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
+ cpu_pc_0 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
+ udelay(delay);
+ cpu_pc_1 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
+ udelay(delay);
+ cpu_pc_2 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
+
+ DP_NOTICE(p_hwfn,
+ "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
+ cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
+}
+
static int
_qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_mcp_mb_params *p_mb_params,
- u32 max_retries, u32 delay)
+ u32 max_retries, u32 usecs)
{
+ u32 cnt = 0, msecs = DIV_ROUND_UP(usecs, 1000);
struct qed_mcp_cmd_elem *p_cmd_elem;
- u32 cnt = 0;
u16 seq_num;
int rc = 0;
@@ -443,7 +508,11 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
goto err;
spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
- udelay(delay);
+
+ if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
+ msleep(msecs);
+ else
+ udelay(usecs);
} while (++cnt < max_retries);
if (cnt >= max_retries) {
@@ -472,7 +541,11 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
* The spinlock stays locked until the list element is removed.
*/
- udelay(delay);
+ if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
+ msleep(msecs);
+ else
+ udelay(usecs);
+
spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
if (p_cmd_elem->b_is_completed)
@@ -491,11 +564,15 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
DP_NOTICE(p_hwfn,
"The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
p_mb_params->cmd, p_mb_params->param);
+ qed_mcp_print_cpu_info(p_hwfn, p_ptt);
spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
+ if (!QED_MB_FLAGS_IS_SET(p_mb_params, AVOID_BLOCK))
+ qed_mcp_cmd_set_blocking(p_hwfn, true);
+
return -EAGAIN;
}
@@ -507,7 +584,7 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
"MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
p_mb_params->mcp_resp,
p_mb_params->mcp_param,
- (cnt * delay) / 1000, (cnt * delay) % 1000);
+ (cnt * usecs) / 1000, (cnt * usecs) % 1000);
/* Clear the sequence number from the MFW response */
p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
@@ -525,7 +602,7 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
{
size_t union_data_size = sizeof(union drv_union_data);
u32 max_retries = QED_DRV_MB_MAX_RETRIES;
- u32 delay = CHIP_MCP_RESP_ITER_US;
+ u32 usecs = QED_MCP_RESP_ITER_US;
/* MCP not initialized */
if (!qed_mcp_is_init(p_hwfn)) {
@@ -533,6 +610,13 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
return -EBUSY;
}
+ if (p_hwfn->mcp_info->b_block_cmd) {
+ DP_NOTICE(p_hwfn,
+ "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
+ p_mb_params->cmd, p_mb_params->param);
+ return -EBUSY;
+ }
+
if (p_mb_params->data_src_size > union_data_size ||
p_mb_params->data_dst_size > union_data_size) {
DP_ERR(p_hwfn,
@@ -542,8 +626,13 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
return -EINVAL;
}
+ if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) {
+ max_retries = DIV_ROUND_UP(max_retries, 1000);
+ usecs *= 1000;
+ }
+
return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
- delay);
+ usecs);
}
int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
@@ -761,6 +850,7 @@ __qed_mcp_load_req(struct qed_hwfn *p_hwfn,
mb_params.data_src_size = sizeof(load_req);
mb_params.p_data_dst = &load_rsp;
mb_params.data_dst_size = sizeof(load_rsp);
+ mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
DP_VERBOSE(p_hwfn, QED_MSG_SP,
"Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
@@ -982,7 +1072,8 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
- u32 wol_param, mcp_resp, mcp_param;
+ struct qed_mcp_mb_params mb_params;
+ u32 wol_param;
switch (p_hwfn->cdev->wol_config) {
case QED_OV_WOL_DISABLED:
@@ -1000,8 +1091,12 @@ int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
}
- return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param,
- &mcp_resp, &mcp_param);
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_UNLOAD_REQ;
+ mb_params.param = wol_param;
+ mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
+
+ return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
}
int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
@@ -2077,31 +2172,65 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
return rc;
}
+/* A maximal 100 msec waiting time for the MCP to halt */
+#define QED_MCP_HALT_SLEEP_MS 10
+#define QED_MCP_HALT_MAX_RETRIES 10
+
int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
- u32 resp = 0, param = 0;
+ u32 resp = 0, param = 0, cpu_state, cnt = 0;
int rc;
rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
&param);
- if (rc)
+ if (rc) {
DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+ return rc;
+ }
- return rc;
+ do {
+ msleep(QED_MCP_HALT_SLEEP_MS);
+ cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
+ if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
+ break;
+ } while (++cnt < QED_MCP_HALT_MAX_RETRIES);
+
+ if (cnt == QED_MCP_HALT_MAX_RETRIES) {
+ DP_NOTICE(p_hwfn,
+ "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
+ qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
+ return -EBUSY;
+ }
+
+ qed_mcp_cmd_set_blocking(p_hwfn, true);
+
+ return 0;
}
+#define QED_MCP_RESUME_SLEEP_MS 10
+
int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
- u32 value, cpu_mode;
+ u32 cpu_mode, cpu_state;
qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
- value = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
- value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
- qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
+ cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
+ qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
+ msleep(QED_MCP_RESUME_SLEEP_MS);
+ cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
- return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -EAGAIN : 0;
+ if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
+ DP_NOTICE(p_hwfn,
+ "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
+ cpu_mode, cpu_state);
+ return -EBUSY;
+ }
+
+ qed_mcp_cmd_set_blocking(p_hwfn, false);
+
+ return 0;
}
int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 047976d5c6e9..85e6b3989e7a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -635,11 +635,14 @@ struct qed_mcp_info {
*/
spinlock_t cmd_lock;
+ /* Flag to indicate whether sending a MFW mailbox command is blocked */
+ bool b_block_cmd;
+
/* Spinlock used for syncing SW link-changes and link-changes
* originating from attention context.
*/
spinlock_t link_lock;
- bool block_mb_sending;
+
u32 public_base;
u32 drv_mb_addr;
u32 mfw_mb_addr;
@@ -660,14 +663,20 @@ struct qed_mcp_info {
};
struct qed_mcp_mb_params {
- u32 cmd;
- u32 param;
- void *p_data_src;
- u8 data_src_size;
- void *p_data_dst;
- u8 data_dst_size;
- u32 mcp_resp;
- u32 mcp_param;
+ u32 cmd;
+ u32 param;
+ void *p_data_src;
+ void *p_data_dst;
+ u8 data_src_size;
+ u8 data_dst_size;
+ u32 mcp_resp;
+ u32 mcp_param;
+ u32 flags;
+#define QED_MB_FLAG_CAN_SLEEP (0x1 << 0)
+#define QED_MB_FLAG_AVOID_BLOCK (0x1 << 1)
+#define QED_MB_FLAGS_IS_SET(params, flag) \
+ ({ typeof(params) __params = (params); \
+ (__params && (__params->flags & QED_MB_FLAG_ ## flag)); })
};
struct qed_drv_tlv_hdr {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index d8ad2dcad8d5..f736f70956fd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -562,8 +562,10 @@
0
#define MCP_REG_CPU_STATE \
0xe05004UL
+#define MCP_REG_CPU_STATE_SOFT_HALTED (0x1UL << 10)
#define MCP_REG_CPU_EVENT_MASK \
0xe05008UL
+#define MCP_REG_CPU_PROGRAM_COUNTER 0xe0501cUL
#define PGLUE_B_REG_PF_BAR0_SIZE \
0x2aae60UL
#define PGLUE_B_REG_PF_BAR1_SIZE \
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index 9673d19308e6..b16ce7d93caf 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -2006,18 +2006,16 @@ unlock:
static int qede_parse_actions(struct qede_dev *edev,
struct tcf_exts *exts)
{
- int rc = -EINVAL, num_act = 0;
+ int rc = -EINVAL, num_act = 0, i;
const struct tc_action *a;
bool is_drop = false;
- LIST_HEAD(actions);
if (!tcf_exts_has_actions(exts)) {
DP_NOTICE(edev, "No tc actions received\n");
return rc;
}
- tcf_exts_to_list(exts, &actions);
- list_for_each_entry(a, &actions, list) {
+ tcf_exts_for_each_action(i, a, exts) {
num_act++;
if (is_tcf_gact_shot(a))
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 353f1c129af1..059ba9429e51 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -2384,26 +2384,20 @@ static int qlge_update_hw_vlan_features(struct net_device *ndev,
return status;
}
-static netdev_features_t qlge_fix_features(struct net_device *ndev,
- netdev_features_t features)
-{
- int err;
-
- /* Update the behavior of vlan accel in the adapter */
- err = qlge_update_hw_vlan_features(ndev, features);
- if (err)
- return err;
-
- return features;
-}
-
static int qlge_set_features(struct net_device *ndev,
netdev_features_t features)
{
netdev_features_t changed = ndev->features ^ features;
+ int err;
+
+ if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
+ /* Update the behavior of vlan accel in the adapter */
+ err = qlge_update_hw_vlan_features(ndev, features);
+ if (err)
+ return err;
- if (changed & NETIF_F_HW_VLAN_CTAG_RX)
qlge_vlan_mode(ndev, features);
+ }
return 0;
}
@@ -4719,7 +4713,6 @@ static const struct net_device_ops qlge_netdev_ops = {
.ndo_set_mac_address = qlge_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = qlge_tx_timeout,
- .ndo_fix_features = qlge_fix_features,
.ndo_set_features = qlge_set_features,
.ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index b81f4faf7b10..1470fc12282b 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Renesas Ethernet AVB device driver
*
* Copyright (C) 2014-2015 Renesas Electronics Corporation
@@ -5,10 +6,6 @@
* Copyright (C) 2015-2016 Cogent Embedded, Inc. <[email protected]>
*
* Based on the SuperH Ethernet driver
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License version 2,
- * as published by the Free Software Foundation.
*/
#ifndef __RAVB_H__
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index c06f2df895c2..aff5516b781e 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Renesas Ethernet AVB device driver
*
* Copyright (C) 2014-2015 Renesas Electronics Corporation
@@ -5,10 +6,6 @@
* Copyright (C) 2015-2016 Cogent Embedded, Inc. <[email protected]>
*
* Based on the SuperH Ethernet driver
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License version 2,
- * as published by the Free Software Foundation.
*/
#include <linux/cache.h>
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 5573199c4536..ad4433d59237 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* SuperH Ethernet device driver
*
* Copyright (C) 2014 Renesas Electronics Corporation
@@ -5,18 +6,6 @@
* Copyright (C) 2008-2014 Renesas Solutions Corp.
* Copyright (C) 2013-2017 Cogent Embedded, Inc.
* Copyright (C) 2014 Codethink Limited
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
*/
#include <linux/module.h>
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index f94be99cf400..0c18650bbfe6 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* SuperH Ethernet device driver
*
* Copyright (C) 2006-2012 Nobuhiro Iwamatsu
* Copyright (C) 2008-2012 Renesas Solutions Corp.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
*/
#ifndef __SH_ETH_H__
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index edf20361ea5f..bf4acebb6bcd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -33,7 +33,7 @@ config DWMAC_DWC_QOS_ETH
select PHYLIB
select CRC32
select MII
- depends on OF && COMMON_CLK && HAS_DMA
+ depends on OF && HAS_DMA
help
Support for chips using the snps,dwc-qos-ethernet.txt DT binding.
@@ -57,7 +57,7 @@ config DWMAC_ANARION
config DWMAC_IPQ806X
tristate "QCA IPQ806x DWMAC support"
default ARCH_QCOM
- depends on OF && COMMON_CLK && (ARCH_QCOM || COMPILE_TEST)
+ depends on OF && (ARCH_QCOM || COMPILE_TEST)
select MFD_SYSCON
help
Support for QCA IPQ806X DWMAC Ethernet.
@@ -100,7 +100,7 @@ config DWMAC_OXNAS
config DWMAC_ROCKCHIP
tristate "Rockchip dwmac support"
default ARCH_ROCKCHIP
- depends on OF && COMMON_CLK && (ARCH_ROCKCHIP || COMPILE_TEST)
+ depends on OF && (ARCH_ROCKCHIP || COMPILE_TEST)
select MFD_SYSCON
help
Support for Ethernet controller on Rockchip RK3288 SoC.
@@ -123,7 +123,7 @@ config DWMAC_SOCFPGA
config DWMAC_STI
tristate "STi GMAC support"
default ARCH_STI
- depends on OF && COMMON_CLK && (ARCH_STI || COMPILE_TEST)
+ depends on OF && (ARCH_STI || COMPILE_TEST)
select MFD_SYSCON
---help---
Support for ethernet controller on STi SOCs.
@@ -147,7 +147,7 @@ config DWMAC_STM32
config DWMAC_SUNXI
tristate "Allwinner GMAC support"
default ARCH_SUNXI
- depends on OF && COMMON_CLK && (ARCH_SUNXI || COMPILE_TEST)
+ depends on OF && (ARCH_SUNXI || COMPILE_TEST)
---help---
Support for Allwinner A20/A31 GMAC ethernet controllers.
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 1a96dd9c1091..531294f4978b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -61,7 +61,7 @@ static int tc_fill_actions(struct stmmac_tc_entry *entry,
struct stmmac_tc_entry *action_entry = entry;
const struct tc_action *act;
struct tcf_exts *exts;
- LIST_HEAD(actions);
+ int i;
exts = cls->knode.exts;
if (!tcf_exts_has_actions(exts))
@@ -69,8 +69,7 @@ static int tc_fill_actions(struct stmmac_tc_entry *entry,
if (frag)
action_entry = frag;
- tcf_exts_to_list(exts, &actions);
- list_for_each_entry(act, &actions, list) {
+ tcf_exts_for_each_action(i, act, exts) {
/* Accept */
if (is_tcf_gact_ok(act)) {
action_entry->val.af = 1;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 507f68190cb1..1121a1ec407c 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -29,6 +29,7 @@
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
#include <linux/etherdevice.h>
+#include <linux/pci.h>
#include <linux/skbuff.h>
#include <linux/if_vlan.h>
#include <linux/in.h>
@@ -2039,12 +2040,16 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
{
struct net_device *ndev;
struct net_device_context *net_device_ctx;
+ struct device *pdev = vf_netdev->dev.parent;
struct netvsc_device *netvsc_dev;
int ret;
if (vf_netdev->addr_len != ETH_ALEN)
return NOTIFY_DONE;
+ if (!pdev || !dev_is_pci(pdev) || dev_is_pf(pdev))
+ return NOTIFY_DONE;
+
/*
* We will use the MAC address to locate the synthetic interface to
* associate with the VF interface. If we don't find a matching
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 97742708460b..2cd71bdb6484 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -5217,8 +5217,8 @@ static int rtl8152_probe(struct usb_interface *intf,
netdev->hw_features &= ~NETIF_F_RXCSUM;
}
- if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 &&
- udev->serial && !strcmp(udev->serial, "000001000000")) {
+ if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && udev->serial &&
+ (!strcmp(udev->serial, "000001000000") || !strcmp(udev->serial, "000002000000"))) {
dev_info(&udev->dev, "Dell TB16 Dock, disable RX aggregation");
set_bit(DELL_TB_RX_AGG_BUG, &tp->flags);
}
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 1b9951d2067e..d668682f91df 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -316,6 +316,14 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
old_value = *dbbuf_db;
*dbbuf_db = value;
+ /*
+ * Ensure that the doorbell is updated before reading the event
+ * index from memory. The controller needs to provide similar
+ * ordering to ensure the envent index is updated before reading
+ * the doorbell.
+ */
+ mb();
+
if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value))
return false;
}
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index ebf3e7a6c49e..b5ec96abd048 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -1210,7 +1210,7 @@ static int __init nvmet_init(void)
error = nvmet_init_discovery();
if (error)
- goto out;
+ goto out_free_work_queue;
error = nvmet_init_configfs();
if (error)
@@ -1219,6 +1219,8 @@ static int __init nvmet_init(void)
out_exit_discovery:
nvmet_exit_discovery();
+out_free_work_queue:
+ destroy_workqueue(buffered_io_wq);
out:
return error;
}
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 34712def81b1..5251689a1d9a 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -311,7 +311,7 @@ fcloop_tgt_lsrqst_done_work(struct work_struct *work)
struct fcloop_tport *tport = tls_req->tport;
struct nvmefc_ls_req *lsreq = tls_req->lsreq;
- if (tport->remoteport)
+ if (!tport || tport->remoteport)
lsreq->done(lsreq, tls_req->status);
}
@@ -329,6 +329,7 @@ fcloop_ls_req(struct nvme_fc_local_port *localport,
if (!rport->targetport) {
tls_req->status = -ECONNREFUSED;
+ tls_req->tport = NULL;
schedule_work(&tls_req->work);
return ret;
}
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 466e3c8582f0..9095b8290150 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -54,6 +54,28 @@ DEFINE_MUTEX(of_mutex);
*/
DEFINE_RAW_SPINLOCK(devtree_lock);
+bool of_node_name_eq(const struct device_node *np, const char *name)
+{
+ const char *node_name;
+ size_t len;
+
+ if (!np)
+ return false;
+
+ node_name = kbasename(np->full_name);
+ len = strchrnul(node_name, '@') - node_name;
+
+ return (strlen(name) == len) && (strncmp(node_name, name, len) == 0);
+}
+
+bool of_node_name_prefix(const struct device_node *np, const char *prefix)
+{
+ if (!np)
+ return false;
+
+ return strncmp(kbasename(np->full_name), prefix, strlen(prefix)) == 0;
+}
+
int of_n_addr_cells(struct device_node *np)
{
u32 cells;
@@ -720,6 +742,31 @@ struct device_node *of_get_next_available_child(const struct device_node *node,
EXPORT_SYMBOL(of_get_next_available_child);
/**
+ * of_get_compatible_child - Find compatible child node
+ * @parent: parent node
+ * @compatible: compatible string
+ *
+ * Lookup child node whose compatible property contains the given compatible
+ * string.
+ *
+ * Returns a node pointer with refcount incremented, use of_node_put() on it
+ * when done; or NULL if not found.
+ */
+struct device_node *of_get_compatible_child(const struct device_node *parent,
+ const char *compatible)
+{
+ struct device_node *child;
+
+ for_each_child_of_node(parent, child) {
+ if (of_device_is_compatible(child, compatible))
+ break;
+ }
+
+ return child;
+}
+EXPORT_SYMBOL(of_get_compatible_child);
+
+/**
* of_get_child_by_name - Find the child node by name for a given parent
* @node: parent node
* @name: child name to look for.
diff --git a/drivers/staging/vboxvideo/vbox_fb.c b/drivers/staging/vboxvideo/vbox_fb.c
index 43c39eca4ae1..034f8ffa8f20 100644
--- a/drivers/staging/vboxvideo/vbox_fb.c
+++ b/drivers/staging/vboxvideo/vbox_fb.c
@@ -155,8 +155,7 @@ static int vboxfb_create(struct drm_fb_helper *helper,
* The last flag forces a mode set on VT switches even if the kernel
* does not think it is needed.
*/
- info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT |
- FBINFO_MISC_ALWAYS_SETPAR;
+ info->flags = FBINFO_DEFAULT | FBINFO_MISC_ALWAYS_SETPAR;
info->fbops = &vboxfb_ops;
/*
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index 977a8307fbb1..4f2816559205 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -260,10 +260,13 @@ static int of_thermal_set_mode(struct thermal_zone_device *tz,
mutex_lock(&tz->lock);
- if (mode == THERMAL_DEVICE_ENABLED)
+ if (mode == THERMAL_DEVICE_ENABLED) {
tz->polling_delay = data->polling_delay;
- else
+ tz->passive_delay = data->passive_delay;
+ } else {
tz->polling_delay = 0;
+ tz->passive_delay = 0;
+ }
mutex_unlock(&tz->lock);
diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c
index c866cc165960..450ed66edf58 100644
--- a/drivers/thermal/qoriq_thermal.c
+++ b/drivers/thermal/qoriq_thermal.c
@@ -1,16 +1,6 @@
-/*
- * Copyright 2016 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright 2016 Freescale Semiconductor, Inc.
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -197,7 +187,7 @@ static int qoriq_tmu_probe(struct platform_device *pdev)
int ret;
struct qoriq_tmu_data *data;
struct device_node *np = pdev->dev.of_node;
- u32 site = 0;
+ u32 site;
if (!np) {
dev_err(&pdev->dev, "Device OF-Node is NULL");
@@ -233,8 +223,9 @@ static int qoriq_tmu_probe(struct platform_device *pdev)
if (ret < 0)
goto err_tmu;
- data->tz = thermal_zone_of_sensor_register(&pdev->dev, data->sensor_id,
- data, &tmu_tz_ops);
+ data->tz = devm_thermal_zone_of_sensor_register(&pdev->dev,
+ data->sensor_id,
+ data, &tmu_tz_ops);
if (IS_ERR(data->tz)) {
ret = PTR_ERR(data->tz);
dev_err(&pdev->dev,
@@ -243,7 +234,7 @@ static int qoriq_tmu_probe(struct platform_device *pdev)
}
/* Enable monitoring */
- site |= 0x1 << (15 - data->sensor_id);
+ site = 0x1 << (15 - data->sensor_id);
tmu_write(data, site | TMR_ME | TMR_ALPF, &data->regs->tmr);
return 0;
@@ -261,8 +252,6 @@ static int qoriq_tmu_remove(struct platform_device *pdev)
{
struct qoriq_tmu_data *data = platform_get_drvdata(pdev);
- thermal_zone_of_sensor_unregister(&pdev->dev, data->tz);
-
/* Disable monitoring */
tmu_write(data, TMR_DISABLE, &data->regs->tmr);
diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c
index 766521eb7071..7aed5337bdd3 100644
--- a/drivers/thermal/rcar_gen3_thermal.c
+++ b/drivers/thermal/rcar_gen3_thermal.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* R-Car Gen3 THS thermal sensor driver
* Based on rcar_thermal.c and work from Hien Dang and Khiem Nguyen.
*
* Copyright (C) 2016 Renesas Electronics Corporation.
* Copyright (C) 2016 Sang Engineering
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
*/
#include <linux/delay.h>
#include <linux/err.h>
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index e77e63070e99..78f932822d38 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -1,21 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* R-Car THS/TSC thermal sensor driver
*
* Copyright (C) 2012 Renesas Solutions Corp.
* Kuninori Morimoto <[email protected]>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
*/
#include <linux/delay.h>
#include <linux/err.h>
@@ -660,6 +648,6 @@ static struct platform_driver rcar_thermal_driver = {
};
module_platform_driver(rcar_thermal_driver);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("R-Car THS/TSC thermal sensor driver");
MODULE_AUTHOR("Kuninori Morimoto <[email protected]>");
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 5f1183b0b89d..55370e651db3 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1004,9 +1004,7 @@ void redraw_screen(struct vc_data *vc, int is_switch)
clear_buffer_attributes(vc);
}
- /* Forcibly update if we're panicing */
- if ((update && vc->vc_mode != KD_GRAPHICS) ||
- vt_force_oops_output(vc))
+ if (update && vc->vc_mode != KD_GRAPHICS)
do_update_region(vc, vc->vc_origin, vc->vc_screenbuf_size / 2);
}
set_cursor(vc);
@@ -1046,7 +1044,6 @@ static void visual_init(struct vc_data *vc, int num, int init)
vc->vc_hi_font_mask = 0;
vc->vc_complement_mask = 0;
vc->vc_can_do_color = 0;
- vc->vc_panic_force_write = false;
vc->vc_cur_blink_ms = DEFAULT_CURSOR_BLINK_MS;
vc->vc_sw->con_init(vc, init);
if (!vc->vc_complement_mask)
@@ -2911,7 +2908,7 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
goto quit;
}
- if (vc->vc_mode != KD_TEXT && !vt_force_oops_output(vc))
+ if (vc->vc_mode != KD_TEXT)
goto quit;
/* undraw cursor first */
@@ -4229,8 +4226,7 @@ void do_unblank_screen(int leaving_gfx)
return;
}
vc = vc_cons[fg_console].d;
- /* Try to unblank in oops case too */
- if (vc->vc_mode != KD_TEXT && !vt_force_oops_output(vc))
+ if (vc->vc_mode != KD_TEXT)
return; /* but leave console_blanked != 0 */
if (blankinterval) {
@@ -4239,7 +4235,7 @@ void do_unblank_screen(int leaving_gfx)
}
console_blanked = 0;
- if (vc->vc_sw->con_blank(vc, 0, leaving_gfx) || vt_force_oops_output(vc))
+ if (vc->vc_sw->con_blank(vc, 0, leaving_gfx))
/* Low-level driver cannot restore -> do it ourselves */
update_screen(vc);
if (console_blank_hook)
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 96c1d8400822..b13c6b4b2c66 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -952,7 +952,7 @@ static void vhost_iotlb_notify_vq(struct vhost_dev *d,
list_for_each_entry_safe(node, n, &d->pending_list, node) {
struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb;
if (msg->iova <= vq_msg->iova &&
- msg->iova + msg->size - 1 > vq_msg->iova &&
+ msg->iova + msg->size - 1 >= vq_msg->iova &&
vq_msg->type == VHOST_IOTLB_MISS) {
vhost_poll_queue(&node->vq->poll);
list_del(&node->node);
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 75ebbbf0a1fb..8958ccc8b1ac 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -284,8 +284,7 @@ static inline int fbcon_is_inactive(struct vc_data *vc, struct fb_info *info)
struct fbcon_ops *ops = info->fbcon_par;
return (info->state != FBINFO_STATE_RUNNING ||
- vc->vc_mode != KD_TEXT || ops->graphics) &&
- !vt_force_oops_output(vc);
+ vc->vc_mode != KD_TEXT || ops->graphics);
}
static int get_color(struct vc_data *vc, struct fb_info *info,
@@ -1104,7 +1103,6 @@ static void fbcon_init(struct vc_data *vc, int init)
if (p->userfont)
charcnt = FNTCHARCNT(p->fontdata);
- vc->vc_panic_force_write = !!(info->flags & FBINFO_CAN_FORCE_OUTPUT);
vc->vc_can_do_color = (fb_get_color_depth(&info->var, &info->fix)!=1);
vc->vc_complement_mask = vc->vc_can_do_color ? 0x7700 : 0x0800;
if (charcnt == 256) {
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 20405421a5ed..861bf8081619 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -34,6 +34,7 @@
#include <linux/fb.h>
#include <linux/fbcon.h>
#include <linux/mem_encrypt.h>
+#include <linux/pci.h>
#include <asm/fb.h>
@@ -1116,6 +1117,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
if (!lock_fb_info(info))
return -ENODEV;
fix = info->fix;
+ if (info->flags & FBINFO_HIDE_SMEM_START)
+ fix.smem_start = 0;
unlock_fb_info(info);
ret = copy_to_user(argp, &fix, sizeof(fix)) ? -EFAULT : 0;
@@ -1326,6 +1329,8 @@ static int fb_get_fscreeninfo(struct fb_info *info, unsigned int cmd,
if (!lock_fb_info(info))
return -ENODEV;
fix = info->fix;
+ if (info->flags & FBINFO_HIDE_SMEM_START)
+ fix.smem_start = 0;
unlock_fb_info(info);
return do_fscreeninfo_to_user(&fix, compat_ptr(arg));
}
@@ -1605,8 +1610,8 @@ static int do_remove_conflicting_framebuffers(struct apertures_struct *a,
(primary && gen_aper && gen_aper->count &&
gen_aper->ranges[0].base == VGA_FB_PHYS)) {
- printk(KERN_INFO "fb: switching to %s from %s\n",
- name, registered_fb[i]->fix.id);
+ printk(KERN_INFO "fb%d: switching to %s from %s\n",
+ i, name, registered_fb[i]->fix.id);
ret = do_unregister_framebuffer(registered_fb[i]);
if (ret)
return ret;
@@ -1793,20 +1798,78 @@ int unlink_framebuffer(struct fb_info *fb_info)
}
EXPORT_SYMBOL(unlink_framebuffer);
+/**
+ * remove_conflicting_framebuffers - remove firmware-configured framebuffers
+ * @a: memory range, users of which are to be removed
+ * @name: requesting driver name
+ * @primary: also kick vga16fb if present
+ *
+ * This function removes framebuffer devices (initialized by firmware/bootloader)
+ * which use memory range described by @a. If @a is NULL all such devices are
+ * removed.
+ */
int remove_conflicting_framebuffers(struct apertures_struct *a,
const char *name, bool primary)
{
int ret;
+ bool do_free = false;
+
+ if (!a) {
+ a = alloc_apertures(1);
+ if (!a)
+ return -ENOMEM;
+
+ a->ranges[0].base = 0;
+ a->ranges[0].size = ~0;
+ do_free = true;
+ }
mutex_lock(&registration_lock);
ret = do_remove_conflicting_framebuffers(a, name, primary);
mutex_unlock(&registration_lock);
+ if (do_free)
+ kfree(a);
+
return ret;
}
EXPORT_SYMBOL(remove_conflicting_framebuffers);
/**
+ * remove_conflicting_pci_framebuffers - remove firmware-configured framebuffers for PCI devices
+ * @pdev: PCI device
+ * @res_id: index of PCI BAR configuring framebuffer memory
+ * @name: requesting driver name
+ *
+ * This function removes framebuffer devices (eg. initialized by firmware)
+ * using memory range configured for @pdev's BAR @res_id.
+ *
+ * The function assumes that PCI device with shadowed ROM drives a primary
+ * display and so kicks out vga16fb.
+ */
+int remove_conflicting_pci_framebuffers(struct pci_dev *pdev, int res_id, const char *name)
+{
+ struct apertures_struct *ap;
+ bool primary = false;
+ int err;
+
+ ap = alloc_apertures(1);
+ if (!ap)
+ return -ENOMEM;
+
+ ap->ranges[0].base = pci_resource_start(pdev, res_id);
+ ap->ranges[0].size = pci_resource_len(pdev, res_id);
+#ifdef CONFIG_X86
+ primary = pdev->resource[PCI_ROM_RESOURCE].flags &
+ IORESOURCE_ROM_SHADOW;
+#endif
+ err = remove_conflicting_framebuffers(ap, name, primary);
+ kfree(ap);
+ return err;
+}
+EXPORT_SYMBOL(remove_conflicting_pci_framebuffers);
+
+/**
* register_framebuffer - registers a frame buffer device
* @fb_info: frame buffer info structure
*
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index f2088838f690..5b471889d723 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -402,10 +402,19 @@ static ssize_t modalias_show(struct device *dev,
}
static DEVICE_ATTR_RO(modalias);
+static ssize_t state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s\n",
+ xenbus_strstate(to_xenbus_device(dev)->state));
+}
+static DEVICE_ATTR_RO(state);
+
static struct attribute *xenbus_dev_attrs[] = {
&dev_attr_nodename.attr,
&dev_attr_devtype.attr,
&dev_attr_modalias.attr,
+ &dev_attr_state.attr,
NULL,
};
diff --git a/fs/buffer.c b/fs/buffer.c
index 4cc679d5bf58..6f1ae3ac9789 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -39,7 +39,6 @@
#include <linux/buffer_head.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/bio.h>
-#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/bitops.h>
#include <linux/mpage.h>
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index ec3fba7d492f..488a9e7f8f66 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -24,6 +24,7 @@
#include <linux/mpage.h>
#include <linux/user_namespace.h>
#include <linux/seq_file.h>
+#include <linux/blkdev.h>
#include "isofs.h"
#include "zisofs.h"
@@ -653,6 +654,12 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent)
/*
* What if bugger tells us to go beyond page size?
*/
+ if (bdev_logical_block_size(s->s_bdev) > 2048) {
+ printk(KERN_WARNING
+ "ISOFS: unsupported/invalid hardware sector size %d\n",
+ bdev_logical_block_size(s->s_bdev));
+ goto out_freesbi;
+ }
opt.blocksize = sb_min_blocksize(s, opt.blocksize);
sbi->s_high_sierra = 0; /* default is iso9660 */
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index 05506d60131c..59cdb27826de 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -132,13 +132,13 @@ static void __fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
struct fsnotify_mark *mark;
assert_spin_locked(&conn->lock);
+ /* We can get detached connector here when inode is getting unlinked. */
+ if (!fsnotify_valid_obj_type(conn->type))
+ return;
hlist_for_each_entry(mark, &conn->list, obj_list) {
if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED)
new_mask |= mark->mask;
}
- if (WARN_ON(!fsnotify_valid_obj_type(conn->type)))
- return;
-
*fsnotify_conn_mask_p(conn) = new_mask;
}
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index 860bfbe7a07a..f0cbf58ad4da 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -18,6 +18,7 @@
#include <linux/quotaops.h>
#include <linux/types.h>
#include <linux/writeback.h>
+#include <linux/nospec.h>
static int check_quotactl_permission(struct super_block *sb, int type, int cmd,
qid_t id)
@@ -120,8 +121,6 @@ static int quota_getinfo(struct super_block *sb, int type, void __user *addr)
struct if_dqinfo uinfo;
int ret;
- /* This checks whether qc_state has enough entries... */
- BUILD_BUG_ON(MAXQUOTAS > XQM_MAXQUOTAS);
if (!sb->s_qcop->get_state)
return -ENOSYS;
ret = sb->s_qcop->get_state(sb, &state);
@@ -354,10 +353,10 @@ static int quota_getstate(struct super_block *sb, struct fs_quota_stat *fqs)
* GETXSTATE quotactl has space for just one set of time limits so
* report them for the first enabled quota type
*/
- for (type = 0; type < XQM_MAXQUOTAS; type++)
+ for (type = 0; type < MAXQUOTAS; type++)
if (state.s_state[type].flags & QCI_ACCT_ENABLED)
break;
- BUG_ON(type == XQM_MAXQUOTAS);
+ BUG_ON(type == MAXQUOTAS);
fqs->qs_btimelimit = state.s_state[type].spc_timelimit;
fqs->qs_itimelimit = state.s_state[type].ino_timelimit;
fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit;
@@ -427,10 +426,10 @@ static int quota_getstatev(struct super_block *sb, struct fs_quota_statv *fqs)
* GETXSTATV quotactl has space for just one set of time limits so
* report them for the first enabled quota type
*/
- for (type = 0; type < XQM_MAXQUOTAS; type++)
+ for (type = 0; type < MAXQUOTAS; type++)
if (state.s_state[type].flags & QCI_ACCT_ENABLED)
break;
- BUG_ON(type == XQM_MAXQUOTAS);
+ BUG_ON(type == MAXQUOTAS);
fqs->qs_btimelimit = state.s_state[type].spc_timelimit;
fqs->qs_itimelimit = state.s_state[type].ino_timelimit;
fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit;
@@ -701,8 +700,9 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
{
int ret;
- if (type >= (XQM_COMMAND(cmd) ? XQM_MAXQUOTAS : MAXQUOTAS))
+ if (type >= MAXQUOTAS)
return -EINVAL;
+ type = array_index_nospec(type, MAXQUOTAS);
/*
* Quota not supported on this fs? Check this before s_quota_types
* since they needn't be set if quota is not supported at all.
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 3040dc2a32f6..6f515651a2c2 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -764,9 +764,7 @@ static int udf_find_fileset(struct super_block *sb,
struct kernel_lb_addr *root)
{
struct buffer_head *bh = NULL;
- long lastblock;
uint16_t ident;
- struct udf_sb_info *sbi;
if (fileset->logicalBlockNum != 0xFFFFFFFF ||
fileset->partitionReferenceNum != 0xFFFF) {
@@ -779,69 +777,11 @@ static int udf_find_fileset(struct super_block *sb,
return 1;
}
- }
-
- sbi = UDF_SB(sb);
- if (!bh) {
- /* Search backwards through the partitions */
- struct kernel_lb_addr newfileset;
-
-/* --> cvg: FIXME - is it reasonable? */
- return 1;
-
- for (newfileset.partitionReferenceNum = sbi->s_partitions - 1;
- (newfileset.partitionReferenceNum != 0xFFFF &&
- fileset->logicalBlockNum == 0xFFFFFFFF &&
- fileset->partitionReferenceNum == 0xFFFF);
- newfileset.partitionReferenceNum--) {
- lastblock = sbi->s_partmaps
- [newfileset.partitionReferenceNum]
- .s_partition_len;
- newfileset.logicalBlockNum = 0;
-
- do {
- bh = udf_read_ptagged(sb, &newfileset, 0,
- &ident);
- if (!bh) {
- newfileset.logicalBlockNum++;
- continue;
- }
-
- switch (ident) {
- case TAG_IDENT_SBD:
- {
- struct spaceBitmapDesc *sp;
- sp = (struct spaceBitmapDesc *)
- bh->b_data;
- newfileset.logicalBlockNum += 1 +
- ((le32_to_cpu(sp->numOfBytes) +
- sizeof(struct spaceBitmapDesc)
- - 1) >> sb->s_blocksize_bits);
- brelse(bh);
- break;
- }
- case TAG_IDENT_FSD:
- *fileset = newfileset;
- break;
- default:
- newfileset.logicalBlockNum++;
- brelse(bh);
- bh = NULL;
- break;
- }
- } while (newfileset.logicalBlockNum < lastblock &&
- fileset->logicalBlockNum == 0xFFFFFFFF &&
- fileset->partitionReferenceNum == 0xFFFF);
- }
- }
-
- if ((fileset->logicalBlockNum != 0xFFFFFFFF ||
- fileset->partitionReferenceNum != 0xFFFF) && bh) {
udf_debug("Fileset at block=%u, partition=%u\n",
fileset->logicalBlockNum,
fileset->partitionReferenceNum);
- sbi->s_partition = fileset->partitionReferenceNum;
+ UDF_SB(sb)->s_partition = fileset->partitionReferenceNum;
udf_load_fileset(sb, bh, root);
brelse(bh);
return 0;
@@ -1570,10 +1510,16 @@ static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_
*/
#define PART_DESC_ALLOC_STEP 32
+struct part_desc_seq_scan_data {
+ struct udf_vds_record rec;
+ u32 partnum;
+};
+
struct desc_seq_scan_data {
struct udf_vds_record vds[VDS_POS_LENGTH];
unsigned int size_part_descs;
- struct udf_vds_record *part_descs_loc;
+ unsigned int num_part_descs;
+ struct part_desc_seq_scan_data *part_descs_loc;
};
static struct udf_vds_record *handle_partition_descriptor(
@@ -1582,10 +1528,14 @@ static struct udf_vds_record *handle_partition_descriptor(
{
struct partitionDesc *desc = (struct partitionDesc *)bh->b_data;
int partnum;
+ int i;
partnum = le16_to_cpu(desc->partitionNumber);
- if (partnum >= data->size_part_descs) {
- struct udf_vds_record *new_loc;
+ for (i = 0; i < data->num_part_descs; i++)
+ if (partnum == data->part_descs_loc[i].partnum)
+ return &(data->part_descs_loc[i].rec);
+ if (data->num_part_descs >= data->size_part_descs) {
+ struct part_desc_seq_scan_data *new_loc;
unsigned int new_size = ALIGN(partnum, PART_DESC_ALLOC_STEP);
new_loc = kcalloc(new_size, sizeof(*new_loc), GFP_KERNEL);
@@ -1597,7 +1547,7 @@ static struct udf_vds_record *handle_partition_descriptor(
data->part_descs_loc = new_loc;
data->size_part_descs = new_size;
}
- return &(data->part_descs_loc[partnum]);
+ return &(data->part_descs_loc[data->num_part_descs++].rec);
}
@@ -1647,6 +1597,7 @@ static noinline int udf_process_sequence(
memset(data.vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH);
data.size_part_descs = PART_DESC_ALLOC_STEP;
+ data.num_part_descs = 0;
data.part_descs_loc = kcalloc(data.size_part_descs,
sizeof(*data.part_descs_loc),
GFP_KERNEL);
@@ -1658,7 +1609,6 @@ static noinline int udf_process_sequence(
* are in it.
*/
for (; (!done && block <= lastblock); block++) {
-
bh = udf_read_tagged(sb, block, block, &ident);
if (!bh)
break;
@@ -1730,13 +1680,10 @@ static noinline int udf_process_sequence(
}
/* Now handle prevailing Partition Descriptors */
- for (i = 0; i < data.size_part_descs; i++) {
- if (data.part_descs_loc[i].block) {
- ret = udf_load_partdesc(sb,
- data.part_descs_loc[i].block);
- if (ret < 0)
- return ret;
- }
+ for (i = 0; i < data.num_part_descs; i++) {
+ ret = udf_load_partdesc(sb, data.part_descs_loc[i].rec.block);
+ if (ret < 0)
+ return ret;
}
return 0;
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index f7a19c2a7a80..05350424a4d3 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -110,7 +110,4 @@ static inline bool drm_can_sleep(void)
return true;
}
-/* helper for handling conditionals in various for_each macros */
-#define for_each_if(condition) if (!(condition)) {} else
-
#endif
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index da9d95a19580..d6adebcd6ea4 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -29,6 +29,7 @@
#define DRM_ATOMIC_H_
#include <drm/drm_crtc.h>
+#include <drm/drm_util.h>
/**
* struct drm_crtc_commit - track modeset commits on a CRTC
@@ -373,9 +374,6 @@ void drm_atomic_state_default_release(struct drm_atomic_state *state);
struct drm_crtc_state * __must_check
drm_atomic_get_crtc_state(struct drm_atomic_state *state,
struct drm_crtc *crtc);
-int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
- struct drm_crtc_state *state, struct drm_property *property,
- uint64_t val);
struct drm_plane_state * __must_check
drm_atomic_get_plane_state(struct drm_atomic_state *state,
struct drm_plane *plane);
@@ -587,25 +585,6 @@ __drm_atomic_get_current_plane_state(struct drm_atomic_state *state,
}
int __must_check
-drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
- const struct drm_display_mode *mode);
-int __must_check
-drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
- struct drm_property_blob *blob);
-int __must_check
-drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
- struct drm_crtc *crtc);
-void drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
- struct drm_framebuffer *fb);
-void drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
- struct dma_fence *fence);
-int __must_check
-drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
- struct drm_crtc *crtc);
-int drm_atomic_set_writeback_fb_for_connector(
- struct drm_connector_state *conn_state,
- struct drm_framebuffer *fb);
-int __must_check
drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
struct drm_crtc *crtc);
int __must_check
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index 99e2a5297c69..657af7b39379 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -31,6 +31,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_modeset_helper.h>
+#include <drm/drm_util.h>
struct drm_atomic_state;
struct drm_private_obj;
@@ -156,6 +157,8 @@ void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state);
void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state);
+void __drm_atomic_helper_plane_reset(struct drm_plane *plane,
+ struct drm_plane_state *state);
void drm_atomic_helper_plane_reset(struct drm_plane *plane);
void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
struct drm_plane_state *state);
diff --git a/include/drm/drm_atomic_uapi.h b/include/drm/drm_atomic_uapi.h
new file mode 100644
index 000000000000..8cec52ad1277
--- /dev/null
+++ b/include/drm/drm_atomic_uapi.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2014 Red Hat
+ * Copyright (C) 2014 Intel Corp.
+ * Copyright (C) 2018 Intel Corp.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <[email protected]>
+ * Daniel Vetter <[email protected]>
+ */
+
+#ifndef DRM_ATOMIC_UAPI_H_
+#define DRM_ATOMIC_UAPI_H_
+
+struct drm_crtc_state;
+struct drm_display_mode;
+struct drm_property_blob;
+struct drm_plane_state;
+struct drm_crtc;
+struct drm_connector_state;
+struct dma_fence;
+struct drm_framebuffer;
+
+int __must_check
+drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
+ const struct drm_display_mode *mode);
+int __must_check
+drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
+ struct drm_property_blob *blob);
+int __must_check
+drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
+ struct drm_crtc *crtc);
+void drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
+ struct drm_framebuffer *fb);
+void drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
+ struct dma_fence *fence);
+int __must_check
+drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
+ struct drm_crtc *crtc);
+
+#endif
diff --git a/include/drm/drm_blend.h b/include/drm/drm_blend.h
index 330c561c4c11..88bdfec3bd88 100644
--- a/include/drm/drm_blend.h
+++ b/include/drm/drm_blend.h
@@ -27,6 +27,10 @@
#include <linux/ctype.h>
#include <drm/drm_mode.h>
+#define DRM_MODE_BLEND_PREMULTI 0
+#define DRM_MODE_BLEND_COVERAGE 1
+#define DRM_MODE_BLEND_PIXEL_NONE 2
+
struct drm_device;
struct drm_atomic_state;
struct drm_plane;
@@ -52,4 +56,6 @@ int drm_plane_create_zpos_immutable_property(struct drm_plane *plane,
unsigned int zpos);
int drm_atomic_normalize_zpos(struct drm_device *dev,
struct drm_atomic_state *state);
+int drm_plane_create_blend_mode_property(struct drm_plane *plane,
+ unsigned int supported_modes);
#endif
diff --git a/include/drm/drm_color_mgmt.h b/include/drm/drm_color_mgmt.h
index 44f04233e3db..90ef9996d9a4 100644
--- a/include/drm/drm_color_mgmt.h
+++ b/include/drm/drm_color_mgmt.h
@@ -24,6 +24,7 @@
#define __DRM_COLOR_MGMT_H__
#include <linux/ctype.h>
+#include <drm/drm_property.h>
struct drm_crtc;
struct drm_plane;
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 97ea41dc678f..91a877fa00cb 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -28,6 +28,7 @@
#include <linux/ctype.h>
#include <linux/hdmi.h>
#include <drm/drm_mode_object.h>
+#include <drm/drm_util.h>
#include <uapi/drm/drm_mode.h>
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 92e7fc7f05a4..b21437bc95bf 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -744,8 +744,45 @@ struct drm_crtc_funcs {
*
* 0 on success or a negative error code on failure.
*/
- int (*set_crc_source)(struct drm_crtc *crtc, const char *source,
- size_t *values_cnt);
+ int (*set_crc_source)(struct drm_crtc *crtc, const char *source);
+ /**
+ * @verify_crc_source:
+ *
+ * verifies the source of CRC checksums of frames before setting the
+ * source for CRC and during crc open. Source parameter can be NULL
+ * while disabling crc source.
+ *
+ * This callback is optional if the driver does not support any CRC
+ * generation functionality.
+ *
+ * RETURNS:
+ *
+ * 0 on success or a negative error code on failure.
+ */
+ int (*verify_crc_source)(struct drm_crtc *crtc, const char *source,
+ size_t *values_cnt);
+ /**
+ * @get_crc_sources:
+ *
+ * Driver callback for getting a list of all the available sources for
+ * CRC generation. This callback depends upon verify_crc_source, So
+ * verify_crc_source callback should be implemented before implementing
+ * this. Driver can pass full list of available crc sources, this
+ * callback does the verification on each crc-source before passing it
+ * to userspace.
+ *
+ * This callback is optional if the driver does not support exporting of
+ * possible CRC sources list.
+ *
+ * RETURNS:
+ *
+ * a constant character pointer to the list of all the available CRC
+ * sources. On failure driver should return NULL. count should be
+ * updated with number of sources in list. if zero we don't process any
+ * source from the list.
+ */
+ const char *const *(*get_crc_sources)(struct drm_crtc *crtc,
+ size_t *count);
/**
* @atomic_print_state:
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index f9c6e0e3aec7..42411b3ea0c8 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -46,6 +46,16 @@ struct drm_device {
struct drm_master *master;
/**
+ * @driver_features: per-device driver features
+ *
+ * Drivers can clear specific flags here to disallow
+ * certain features on a per-device basis while still
+ * sharing a single &struct drm_driver instance across
+ * all devices.
+ */
+ u32 driver_features;
+
+ /**
* @unplugged:
*
* Flag to tell if the device has been unplugged.
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 05cc31b5db16..2a3843f248cf 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -123,8 +123,9 @@
# define DP_FRAMING_CHANGE_CAP (1 << 1)
# define DP_DPCD_DISPLAY_CONTROL_CAPABLE (1 << 3) /* edp v1.2 or higher */
-#define DP_TRAINING_AUX_RD_INTERVAL 0x00e /* XXX 1.2? */
-# define DP_TRAINING_AUX_RD_MASK 0x7F /* XXX 1.2? */
+#define DP_TRAINING_AUX_RD_INTERVAL 0x00e /* XXX 1.2? */
+# define DP_TRAINING_AUX_RD_MASK 0x7F /* DP 1.3 */
+# define DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT (1 << 7) /* DP 1.3 */
#define DP_ADAPTER_CAP 0x00f /* 1.2 */
# define DP_FORCE_LOAD_SENSE_CAP (1 << 0)
@@ -1260,12 +1261,12 @@ int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
*/
enum drm_dp_quirk {
/**
- * @DP_DPCD_QUIRK_LIMITED_M_N:
+ * @DP_DPCD_QUIRK_CONSTANT_N:
*
* The device requires main link attributes Mvid and Nvid to be limited
- * to 16 bits.
+ * to 16 bits. So will give a constant value (0x8000) for compatability.
*/
- DP_DPCD_QUIRK_LIMITED_M_N,
+ DP_DPCD_QUIRK_CONSTANT_N,
};
/**
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index 46a8009784df..8830e3de3a86 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -56,7 +56,6 @@ struct drm_printer;
#define DRIVER_ATOMIC 0x10000
#define DRIVER_KMS_LEGACY_CONTEXT 0x20000
#define DRIVER_SYNCOBJ 0x40000
-#define DRIVER_PREFER_XBGR_30BPP 0x80000
/**
* struct drm_driver - DRM driver structure
@@ -654,14 +653,14 @@ static inline bool drm_dev_is_unplugged(struct drm_device *dev)
* @dev: DRM device to check
* @feature: feature flag
*
- * This checks @dev for driver features, see &drm_driver.driver_features and the
- * various DRIVER_\* flags.
+ * This checks @dev for driver features, see &drm_driver.driver_features,
+ * &drm_device.driver_features, and the various DRIVER_\* flags.
*
* Returns true if the @feature is supported, false otherwise.
*/
-static inline bool drm_core_check_feature(struct drm_device *dev, int feature)
+static inline bool drm_core_check_feature(struct drm_device *dev, u32 feature)
{
- return dev->driver->driver_features & feature;
+ return dev->driver->driver_features & dev->driver_features & feature;
}
/**
diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h
index 4f597c0730b4..70cfca03d812 100644
--- a/include/drm/drm_encoder.h
+++ b/include/drm/drm_encoder.h
@@ -28,6 +28,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_mode.h>
#include <drm/drm_mode_object.h>
+#include <drm/drm_util.h>
struct drm_encoder;
diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h
index 96e26e3b9a0c..4a65f0d155b0 100644
--- a/include/drm/drm_fb_cma_helper.h
+++ b/include/drm/drm_fb_cma_helper.h
@@ -26,7 +26,6 @@ void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma);
void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma);
void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma);
-void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, bool state);
void drm_fbdev_cma_set_suspend_unlocked(struct drm_fbdev_cma *fbdev_cma,
bool state);
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 5db08c8f1d25..bb9acea61369 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -604,6 +604,16 @@ drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
#endif
+/**
+ * drm_fb_helper_remove_conflicting_framebuffers - remove firmware-configured framebuffers
+ * @a: memory range, users of which are to be removed
+ * @name: requesting driver name
+ * @primary: also kick vga16fb if present
+ *
+ * This function removes framebuffer devices (initialized by firmware/bootloader)
+ * which use memory range described by @a. If @a is NULL all such devices are
+ * removed.
+ */
static inline int
drm_fb_helper_remove_conflicting_framebuffers(struct apertures_struct *a,
const char *name, bool primary)
@@ -615,4 +625,28 @@ drm_fb_helper_remove_conflicting_framebuffers(struct apertures_struct *a,
#endif
}
+/**
+ * drm_fb_helper_remove_conflicting_pci_framebuffers - remove firmware-configured framebuffers for PCI devices
+ * @pdev: PCI device
+ * @resource_id: index of PCI BAR configuring framebuffer memory
+ * @name: requesting driver name
+ *
+ * This function removes framebuffer devices (eg. initialized by firmware)
+ * using memory range configured for @pdev's BAR @resource_id.
+ *
+ * The function assumes that PCI device with shadowed ROM drives a primary
+ * display and so kicks out vga16fb.
+ */
+static inline int
+drm_fb_helper_remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
+ int resource_id,
+ const char *name)
+{
+#if IS_REACHABLE(CONFIG_FB)
+ return remove_conflicting_pci_framebuffers(pdev, resource_id, name);
+#else
+ return 0;
+#endif
+}
+
#endif
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index f9c15845f465..fac831c40106 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -25,6 +25,28 @@
#include <linux/types.h>
#include <uapi/drm/drm_fourcc.h>
+/*
+ * DRM formats are little endian. Define host endian variants for the
+ * most common formats here, to reduce the #ifdefs needed in drivers.
+ *
+ * Note that the DRM_FORMAT_BIG_ENDIAN flag should only be used in
+ * case the format can't be specified otherwise, so we don't end up
+ * with two values describing the same format.
+ */
+#ifdef __BIG_ENDIAN
+# define DRM_FORMAT_HOST_XRGB1555 (DRM_FORMAT_XRGB1555 | \
+ DRM_FORMAT_BIG_ENDIAN)
+# define DRM_FORMAT_HOST_RGB565 (DRM_FORMAT_RGB565 | \
+ DRM_FORMAT_BIG_ENDIAN)
+# define DRM_FORMAT_HOST_XRGB8888 DRM_FORMAT_BGRX8888
+# define DRM_FORMAT_HOST_ARGB8888 DRM_FORMAT_BGRA8888
+#else
+# define DRM_FORMAT_HOST_XRGB1555 DRM_FORMAT_XRGB1555
+# define DRM_FORMAT_HOST_RGB565 DRM_FORMAT_RGB565
+# define DRM_FORMAT_HOST_XRGB8888 DRM_FORMAT_XRGB8888
+# define DRM_FORMAT_HOST_ARGB8888 DRM_FORMAT_ARGB8888
+#endif
+
struct drm_device;
struct drm_mode_fb_cmd2;
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index a0b202e1d69a..928e4172a0bb 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -809,6 +809,21 @@ struct drm_mode_config {
/* dumb ioctl parameters */
uint32_t preferred_depth, prefer_shadow;
+ bool quirk_addfb_prefer_xbgr_30bpp;
+
+ /**
+ * @quirk_addfb_prefer_host_byte_order:
+ *
+ * When set to true drm_mode_addfb() will pick host byte order
+ * pixel_format when calling drm_mode_addfb2(). This is how
+ * drm_mode_addfb() should have worked from day one. It
+ * didn't though, so we ended up with quirks in both kernel
+ * and userspace drivers to deal with the broken behavior.
+ * Simply fixing drm_mode_addfb() unconditionally would break
+ * these drivers, so add a quirk bit here to allow drivers
+ * opt-in.
+ */
+ bool quirk_addfb_prefer_host_byte_order;
/**
* @async_page_flip: Does this device support async flips on the primary
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index 582a0ec0aa70..a82c292af6c5 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -82,6 +82,7 @@ struct drm_panel_funcs {
* @drm: DRM device owning the panel
* @connector: DRM connector that the panel is attached to
* @dev: parent device of the panel
+ * @link: link from panel device (supplier) to DRM device (consumer)
* @funcs: operations that can be performed on the panel
* @list: panel entry in registry
*/
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 8a152dc16ea5..0a0834bef8bd 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -27,6 +27,9 @@
#include <linux/ctype.h>
#include <drm/drm_mode_object.h>
#include <drm/drm_color_mgmt.h>
+#include <drm/drm_rect.h>
+#include <drm/drm_modeset_lock.h>
+#include <drm/drm_util.h>
struct drm_crtc;
struct drm_printer;
@@ -119,6 +122,14 @@ struct drm_plane_state {
u16 alpha;
/**
+ * @pixel_blend_mode:
+ * The alpha blending equation selection, describing how the pixels from
+ * the current plane are composited with the background. Value can be
+ * one of DRM_MODE_BLEND_*
+ */
+ uint16_t pixel_blend_mode;
+
+ /**
* @rotation:
* Rotation of the plane. See drm_plane_create_rotation_property() for
* more details.
@@ -659,6 +670,14 @@ struct drm_plane {
* drm_plane_create_rotation_property().
*/
struct drm_property *rotation_property;
+ /**
+ * @blend_mode_property:
+ * Optional "pixel blend mode" enum property for this plane.
+ * Blend mode property represents the alpha blending equation selection,
+ * describing how the pixels from the current plane are composited with
+ * the background.
+ */
+ struct drm_property *blend_mode_property;
/**
* @color_encoding_property:
diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
index f3e6eed3e79c..afbc3beef089 100644
--- a/include/drm/drm_print.h
+++ b/include/drm/drm_print.h
@@ -381,7 +381,7 @@ void drm_err(const char *format, ...);
#define DRM_DEV_DEBUG_DP(dev, fmt, ...) \
drm_dev_dbg(dev, DRM_UT_DP, fmt, ## __VA_ARGS__)
-#define DRM_DEBUG_DP(dev, fmt, ...) \
+#define DRM_DEBUG_DP(fmt, ...) \
drm_dbg(DRM_UT_DP, fmt, ## __VA_ARGS__)
#define _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, category, fmt, ...) \
diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h
index c030f6ccab99..5b9efff35d6d 100644
--- a/include/drm/drm_property.h
+++ b/include/drm/drm_property.h
@@ -27,6 +27,8 @@
#include <linux/ctype.h>
#include <drm/drm_mode_object.h>
+#include <uapi/drm/drm_mode.h>
+
/**
* struct drm_property_enum - symbolic values for enumerations
* @value: numeric property value for this enum entry
diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h
index 3980602472c0..425432b85a87 100644
--- a/include/drm/drm_syncobj.h
+++ b/include/drm/drm_syncobj.h
@@ -131,15 +131,10 @@ drm_syncobj_fence_get(struct drm_syncobj *syncobj)
struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
u32 handle);
-void drm_syncobj_add_callback(struct drm_syncobj *syncobj,
- struct drm_syncobj_cb *cb,
- drm_syncobj_func_t func);
-void drm_syncobj_remove_callback(struct drm_syncobj *syncobj,
- struct drm_syncobj_cb *cb);
-void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
+void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, u64 point,
struct dma_fence *fence);
int drm_syncobj_find_fence(struct drm_file *file_private,
- u32 handle,
+ u32 handle, u64 point,
struct dma_fence **fence);
void drm_syncobj_free(struct kref *kref);
int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
diff --git a/include/drm/drm_util.h b/include/drm/drm_util.h
new file mode 100644
index 000000000000..88abdca89baa
--- /dev/null
+++ b/include/drm/drm_util.h
@@ -0,0 +1,32 @@
+/*
+ * Internal Header for the Direct Rendering Manager
+ *
+ * Copyright 2018 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _DRM_UTIL_H_
+#define _DRM_UTIL_H_
+
+/* helper for handling conditionals in various for_each macros */
+#define for_each_if(condition) if (!(condition)) {} else
+
+#endif
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index fbf5cfc9b352..fd965ffbb92e 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -386,6 +386,7 @@
INTEL_VGA_DEVICE(0x3E91, info), /* SRV GT2 */ \
INTEL_VGA_DEVICE(0x3E92, info), /* SRV GT2 */ \
INTEL_VGA_DEVICE(0x3E96, info), /* SRV GT2 */ \
+ INTEL_VGA_DEVICE(0x3E98, info), /* SRV GT2 */ \
INTEL_VGA_DEVICE(0x3E9A, info) /* SRV GT2 */
/* CFL H */
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
index ca1d2cc2cdfa..18863d56273c 100644
--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -199,47 +199,57 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
#define __declare_arg_0(a0, res) \
struct arm_smccc_res *___res = res; \
- register u32 r0 asm("r0") = a0; \
+ register unsigned long r0 asm("r0") = (u32)a0; \
register unsigned long r1 asm("r1"); \
register unsigned long r2 asm("r2"); \
register unsigned long r3 asm("r3")
#define __declare_arg_1(a0, a1, res) \
+ typeof(a1) __a1 = a1; \
struct arm_smccc_res *___res = res; \
- register u32 r0 asm("r0") = a0; \
- register typeof(a1) r1 asm("r1") = a1; \
+ register unsigned long r0 asm("r0") = (u32)a0; \
+ register unsigned long r1 asm("r1") = __a1; \
register unsigned long r2 asm("r2"); \
register unsigned long r3 asm("r3")
#define __declare_arg_2(a0, a1, a2, res) \
+ typeof(a1) __a1 = a1; \
+ typeof(a2) __a2 = a2; \
struct arm_smccc_res *___res = res; \
- register u32 r0 asm("r0") = a0; \
- register typeof(a1) r1 asm("r1") = a1; \
- register typeof(a2) r2 asm("r2") = a2; \
+ register unsigned long r0 asm("r0") = (u32)a0; \
+ register unsigned long r1 asm("r1") = __a1; \
+ register unsigned long r2 asm("r2") = __a2; \
register unsigned long r3 asm("r3")
#define __declare_arg_3(a0, a1, a2, a3, res) \
+ typeof(a1) __a1 = a1; \
+ typeof(a2) __a2 = a2; \
+ typeof(a3) __a3 = a3; \
struct arm_smccc_res *___res = res; \
- register u32 r0 asm("r0") = a0; \
- register typeof(a1) r1 asm("r1") = a1; \
- register typeof(a2) r2 asm("r2") = a2; \
- register typeof(a3) r3 asm("r3") = a3
+ register unsigned long r0 asm("r0") = (u32)a0; \
+ register unsigned long r1 asm("r1") = __a1; \
+ register unsigned long r2 asm("r2") = __a2; \
+ register unsigned long r3 asm("r3") = __a3
#define __declare_arg_4(a0, a1, a2, a3, a4, res) \
+ typeof(a4) __a4 = a4; \
__declare_arg_3(a0, a1, a2, a3, res); \
- register typeof(a4) r4 asm("r4") = a4
+ register unsigned long r4 asm("r4") = __a4
#define __declare_arg_5(a0, a1, a2, a3, a4, a5, res) \
+ typeof(a5) __a5 = a5; \
__declare_arg_4(a0, a1, a2, a3, a4, res); \
- register typeof(a5) r5 asm("r5") = a5
+ register unsigned long r5 asm("r5") = __a5
#define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res) \
+ typeof(a6) __a6 = a6; \
__declare_arg_5(a0, a1, a2, a3, a4, a5, res); \
- register typeof(a6) r6 asm("r6") = a6
+ register unsigned long r6 asm("r6") = __a6
#define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \
+ typeof(a7) __a7 = a7; \
__declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \
- register typeof(a7) r7 asm("r7") = a7
+ register unsigned long r7 asm("r7") = __a7
#define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__)
#define __declare_args(count, ...) ___declare_args(count, __VA_ARGS__)
diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
index fea64f2692a0..ab137f97ecbd 100644
--- a/include/linux/console_struct.h
+++ b/include/linux/console_struct.h
@@ -141,7 +141,6 @@ struct vc_data {
struct uni_pagedir *vc_uni_pagedir;
struct uni_pagedir **vc_uni_pagedir_loc; /* [!] Location of uni_pagedir variable for this console */
struct uni_screen *vc_uni_screen; /* unicode screen content */
- bool vc_panic_force_write; /* when oops/panic this VC can accept forced output/blanking */
/* additional information is in vt_kern.h */
};
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 3e7e75383d32..a3cab6dc9b44 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -456,10 +456,13 @@ struct fb_tile_ops {
* and host endianness. Drivers should not use this flag.
*/
#define FBINFO_BE_MATH 0x100000
+/*
+ * Hide smem_start in the FBIOGET_FSCREENINFO IOCTL. This is used by modern DRM
+ * drivers to stop userspace from trying to share buffers behind the kernel's
+ * back. Instead dma-buf based buffer sharing should be used.
+ */
+#define FBINFO_HIDE_SMEM_START 0x200000
-/* report to the VT layer that this fb driver can accept forced console
- output like oopses */
-#define FBINFO_CAN_FORCE_OUTPUT 0x200000
struct fb_info {
atomic_t count;
@@ -632,6 +635,8 @@ extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf,
extern int register_framebuffer(struct fb_info *fb_info);
extern int unregister_framebuffer(struct fb_info *fb_info);
extern int unlink_framebuffer(struct fb_info *fb_info);
+extern int remove_conflicting_pci_framebuffers(struct pci_dev *pdev, int res_id,
+ const char *name);
extern int remove_conflicting_framebuffers(struct apertures_struct *a,
const char *name, bool primary);
extern int fb_prepare_logo(struct fb_info *fb_info, int rotate);
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index b79387fd57da..65b4eaed1d96 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -855,7 +855,7 @@ static inline u8 i2c_8bit_addr_from_msg(const struct i2c_msg *msg)
}
u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold);
-void i2c_release_dma_safe_msg_buf(struct i2c_msg *msg, u8 *buf);
+void i2c_put_dma_safe_msg_buf(u8 *buf, struct i2c_msg *msg, bool xferred);
int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr);
/**
diff --git a/include/linux/of.h b/include/linux/of.h
index 4d25e4f952d9..99b0ebf49632 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -256,6 +256,9 @@ static inline unsigned long of_read_ulong(const __be32 *cell, int size)
#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags)
#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags)
+extern bool of_node_name_eq(const struct device_node *np, const char *name);
+extern bool of_node_name_prefix(const struct device_node *np, const char *prefix);
+
static inline const char *of_node_full_name(const struct device_node *np)
{
return np ? np->full_name : "<no-node>";
@@ -290,6 +293,8 @@ extern struct device_node *of_get_next_child(const struct device_node *node,
extern struct device_node *of_get_next_available_child(
const struct device_node *node, struct device_node *prev);
+extern struct device_node *of_get_compatible_child(const struct device_node *parent,
+ const char *compatible);
extern struct device_node *of_get_child_by_name(const struct device_node *node,
const char *name);
@@ -561,6 +566,16 @@ static inline struct device_node *to_of_node(const struct fwnode_handle *fwnode)
return NULL;
}
+static inline bool of_node_name_eq(const struct device_node *np, const char *name)
+{
+ return false;
+}
+
+static inline bool of_node_name_prefix(const struct device_node *np, const char *prefix)
+{
+ return false;
+}
+
static inline const char* of_node_full_name(const struct device_node *np)
{
return "<no-node>";
@@ -632,6 +647,12 @@ static inline bool of_have_populated_dt(void)
return false;
}
+static inline struct device_node *of_get_compatible_child(const struct device_node *parent,
+ const char *compatible)
+{
+ return NULL;
+}
+
static inline struct device_node *of_get_child_by_name(
const struct device_node *node,
const char *name)
@@ -967,6 +988,18 @@ static inline struct device_node *of_find_matching_node(
return of_find_matching_node_and_match(from, matches, NULL);
}
+static inline const char *of_node_get_device_type(const struct device_node *np)
+{
+ return of_get_property(np, "type", NULL);
+}
+
+static inline bool of_node_is_type(const struct device_node *np, const char *type)
+{
+ const char *match = of_node_get_device_type(np);
+
+ return np && match && type && !strcmp(match, type);
+}
+
/**
* of_property_count_u8_elems - Count the number of u8 elements in a property
*
diff --git a/include/linux/platform_data/ina2xx.h b/include/linux/platform_data/ina2xx.h
index 9abc0ca7259b..9f0aa1b48c78 100644
--- a/include/linux/platform_data/ina2xx.h
+++ b/include/linux/platform_data/ina2xx.h
@@ -1,7 +1,7 @@
/*
* Driver for Texas Instruments INA219, INA226 power monitor chips
*
- * Copyright (C) 2012 Lothar Felten <[email protected]>
+ * Copyright (C) 2012 Lothar Felten <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/include/linux/platform_data/shmob_drm.h b/include/linux/platform_data/shmob_drm.h
index ee495d707f17..fe815d7d9f58 100644
--- a/include/linux/platform_data/shmob_drm.h
+++ b/include/linux/platform_data/shmob_drm.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* shmob_drm.h -- SH Mobile DRM driver
*
* Copyright (C) 2012 Renesas Corporation
*
* Laurent Pinchart ([email protected])
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __SHMOB_DRM_H__
diff --git a/include/linux/quota.h b/include/linux/quota.h
index ca9772c8e48b..f32dd270b8e3 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -408,13 +408,7 @@ struct qc_type_state {
struct qc_state {
unsigned int s_incoredqs; /* Number of dquots in core */
- /*
- * Per quota type information. The array should really have
- * max(MAXQUOTAS, XQM_MAXQUOTAS) entries. BUILD_BUG_ON in
- * quota_getinfo() makes sure XQM_MAXQUOTAS is large enough. Once VFS
- * supports project quotas, this can be changed to MAXQUOTAS
- */
- struct qc_type_state s_state[XQM_MAXQUOTAS];
+ struct qc_type_state s_state[MAXQUOTAS]; /* Per quota type information */
};
/* Structure for communicating via ->set_info */
diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h
index 3fd07912909c..8dc77e40bc03 100644
--- a/include/linux/vt_kern.h
+++ b/include/linux/vt_kern.h
@@ -135,13 +135,6 @@ extern int do_unbind_con_driver(const struct consw *csw, int first, int last,
int deflt);
int vty_init(const struct file_operations *console_fops);
-static inline bool vt_force_oops_output(struct vc_data *vc)
-{
- if (oops_in_progress && vc->vc_panic_force_write && panic_timeout >= 0)
- return true;
- return false;
-}
-
extern char vt_dont_switch;
extern int default_utf8;
extern int global_cursor_default;
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 1ad5b19e83a9..970303448c90 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -23,13 +23,11 @@ struct tc_action {
const struct tc_action_ops *ops;
__u32 type; /* for backward compat(TCA_OLD_COMPAT) */
__u32 order;
- struct list_head list;
struct tcf_idrinfo *idrinfo;
u32 tcfa_index;
refcount_t tcfa_refcnt;
atomic_t tcfa_bindcnt;
- u32 tcfa_capab;
int tcfa_action;
struct tcf_t tcfa_tm;
struct gnet_stats_basic_packed tcfa_bstats;
@@ -44,7 +42,6 @@ struct tc_action {
#define tcf_index common.tcfa_index
#define tcf_refcnt common.tcfa_refcnt
#define tcf_bindcnt common.tcfa_bindcnt
-#define tcf_capab common.tcfa_capab
#define tcf_action common.tcfa_action
#define tcf_tm common.tcfa_tm
#define tcf_bstats common.tcfa_bstats
@@ -102,7 +99,6 @@ struct tc_action_ops {
size_t (*get_fill_size)(const struct tc_action *act);
struct net_device *(*get_dev)(const struct tc_action *a);
void (*put_dev)(struct net_device *dev);
- int (*delete)(struct net *net, u32 index);
};
struct tc_action_net {
@@ -148,8 +144,6 @@ int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
const struct tc_action_ops *ops,
struct netlink_ext_ack *extack);
int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index);
-bool tcf_idr_check(struct tc_action_net *tn, u32 index, struct tc_action **a,
- int bind);
int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
struct tc_action **a, const struct tc_action_ops *ops,
int bind, bool cpustats);
@@ -158,7 +152,6 @@ void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a);
void tcf_idr_cleanup(struct tc_action_net *tn, u32 index);
int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
struct tc_action **a, int bind);
-int tcf_idr_delete_index(struct tc_action_net *tn, u32 index);
int __tcf_idr_release(struct tc_action *a, bool bind, bool strict);
static inline int tcf_idr_release(struct tc_action *a, bool bind)
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index ef727f71336e..75a3f3fdb359 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -298,19 +298,13 @@ static inline void tcf_exts_put_net(struct tcf_exts *exts)
#endif
}
-static inline void tcf_exts_to_list(const struct tcf_exts *exts,
- struct list_head *actions)
-{
#ifdef CONFIG_NET_CLS_ACT
- int i;
-
- for (i = 0; i < exts->nr_actions; i++) {
- struct tc_action *a = exts->actions[i];
-
- list_add_tail(&a->list, actions);
- }
+#define tcf_exts_for_each_action(i, a, exts) \
+ for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++)
+#else
+#define tcf_exts_for_each_action(i, a, exts) \
+ for (; 0; (void)(i), (void)(a), (void)(exts))
#endif
-}
static inline void
tcf_exts_stats_update(const struct tcf_exts *exts,
@@ -361,6 +355,15 @@ static inline bool tcf_exts_has_one_action(struct tcf_exts *exts)
#endif
}
+static inline struct tc_action *tcf_exts_first_action(struct tcf_exts *exts)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ return exts->actions[0];
+#else
+ return NULL;
+#endif
+}
+
/**
* tcf_exts_exec - execute tc filter extensions
* @skb: socket buffer
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 721ab7e54d96..139632b87181 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -30,11 +30,50 @@
extern "C" {
#endif
+/**
+ * DOC: overview
+ *
+ * In the DRM subsystem, framebuffer pixel formats are described using the
+ * fourcc codes defined in `include/uapi/drm/drm_fourcc.h`. In addition to the
+ * fourcc code, a Format Modifier may optionally be provided, in order to
+ * further describe the buffer's format - for example tiling or compression.
+ *
+ * Format Modifiers
+ * ----------------
+ *
+ * Format modifiers are used in conjunction with a fourcc code, forming a
+ * unique fourcc:modifier pair. This format:modifier pair must fully define the
+ * format and data layout of the buffer, and should be the only way to describe
+ * that particular buffer.
+ *
+ * Having multiple fourcc:modifier pairs which describe the same layout should
+ * be avoided, as such aliases run the risk of different drivers exposing
+ * different names for the same data format, forcing userspace to understand
+ * that they are aliases.
+ *
+ * Format modifiers may change any property of the buffer, including the number
+ * of planes and/or the required allocation size. Format modifiers are
+ * vendor-namespaced, and as such the relationship between a fourcc code and a
+ * modifier is specific to the modifer being used. For example, some modifiers
+ * may preserve meaning - such as number of planes - from the fourcc code,
+ * whereas others may not.
+ *
+ * Vendors should document their modifier usage in as much detail as
+ * possible, to ensure maximum compatibility across devices, drivers and
+ * applications.
+ *
+ * The authoritative list of format modifier codes is found in
+ * `include/uapi/drm/drm_fourcc.h`
+ */
+
#define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \
((__u32)(c) << 16) | ((__u32)(d) << 24))
#define DRM_FORMAT_BIG_ENDIAN (1<<31) /* format is big endian instead of little endian */
+/* Reserve 0 for the invalid format specifier */
+#define DRM_FORMAT_INVALID 0
+
/* color index */
#define DRM_FORMAT_C8 fourcc_code('C', '8', ' ', ' ') /* [7:0] C */
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 8d67243952f4..d3e0fe31efc5 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -186,8 +186,9 @@ extern "C" {
/*
* DRM_MODE_REFLECT_<axis>
*
- * Signals that the contents of a drm plane is reflected in the <axis> axis,
+ * Signals that the contents of a drm plane is reflected along the <axis> axis,
* in the same way as mirroring.
+ * See kerneldoc chapter "Plane Composition Properties" for more details.
*
* This define is provided as a convenience, looking up the property id
* using the name->prop id lookup is the preferred method.
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 7f5634ce8e88..a4446f452040 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -529,6 +529,28 @@ typedef struct drm_i915_irq_wait {
*/
#define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51
+/*
+ * Once upon a time we supposed that writes through the GGTT would be
+ * immediately in physical memory (once flushed out of the CPU path). However,
+ * on a few different processors and chipsets, this is not necessarily the case
+ * as the writes appear to be buffered internally. Thus a read of the backing
+ * storage (physical memory) via a different path (with different physical tags
+ * to the indirect write via the GGTT) will see stale values from before
+ * the GGTT write. Inside the kernel, we can for the most part keep track of
+ * the different read/write domains in use (e.g. set-domain), but the assumption
+ * of coherency is baked into the ABI, hence reporting its true state in this
+ * parameter.
+ *
+ * Reports true when writes via mmap_gtt are immediately visible following an
+ * lfence to flush the WCB.
+ *
+ * Reports false when writes via mmap_gtt are indeterminately delayed in an in
+ * internal buffer and are _not_ immediately visible to third parties accessing
+ * directly via mmap_cpu/mmap_wc. Use of mmap_gtt as part of an IPC
+ * communications channel when reporting false is strongly disadvised.
+ */
+#define I915_PARAM_MMAP_GTT_COHERENT 52
+
typedef struct drm_i915_getparam {
__s32 param;
/*
diff --git a/include/uapi/linux/udmabuf.h b/include/uapi/linux/udmabuf.h
new file mode 100644
index 000000000000..46b6532ed855
--- /dev/null
+++ b/include/uapi/linux/udmabuf.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_LINUX_UDMABUF_H
+#define _UAPI_LINUX_UDMABUF_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define UDMABUF_FLAGS_CLOEXEC 0x01
+
+struct udmabuf_create {
+ __u32 memfd;
+ __u32 flags;
+ __u64 offset;
+ __u64 size;
+};
+
+struct udmabuf_create_item {
+ __u32 memfd;
+ __u32 __pad;
+ __u64 offset;
+ __u64 size;
+};
+
+struct udmabuf_create_list {
+ __u32 flags;
+ __u32 count;
+ struct udmabuf_create_item list[];
+};
+
+#define UDMABUF_CREATE _IOW('u', 0x42, struct udmabuf_create)
+#define UDMABUF_CREATE_LIST _IOW('u', 0x43, struct udmabuf_create_list)
+
+#endif /* _UAPI_LINUX_UDMABUF_H */
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 04b8eda94e7d..03cc59ee9c95 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -15,6 +15,7 @@
#include <linux/jhash.h>
#include <linux/filter.h>
#include <linux/rculist_nulls.h>
+#include <linux/random.h>
#include <uapi/linux/btf.h>
#include "percpu_freelist.h"
#include "bpf_lru_list.h"
@@ -41,6 +42,7 @@ struct bpf_htab {
atomic_t count; /* number of elements in this hashtable */
u32 n_buckets; /* number of hash buckets */
u32 elem_size; /* size of each element in bytes */
+ u32 hashrnd;
};
/* each htab element is struct htab_elem + key + value */
@@ -371,6 +373,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
if (!htab->buckets)
goto free_htab;
+ htab->hashrnd = get_random_int();
for (i = 0; i < htab->n_buckets; i++) {
INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
raw_spin_lock_init(&htab->buckets[i].lock);
@@ -402,9 +405,9 @@ free_htab:
return ERR_PTR(err);
}
-static inline u32 htab_map_hash(const void *key, u32 key_len)
+static inline u32 htab_map_hash(const void *key, u32 key_len, u32 hashrnd)
{
- return jhash(key, key_len, 0);
+ return jhash(key, key_len, hashrnd);
}
static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
@@ -470,7 +473,7 @@ static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
key_size = map->key_size;
- hash = htab_map_hash(key, key_size);
+ hash = htab_map_hash(key, key_size, htab->hashrnd);
head = select_bucket(htab, hash);
@@ -597,7 +600,7 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
if (!key)
goto find_first_elem;
- hash = htab_map_hash(key, key_size);
+ hash = htab_map_hash(key, key_size, htab->hashrnd);
head = select_bucket(htab, hash);
@@ -824,7 +827,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
key_size = map->key_size;
- hash = htab_map_hash(key, key_size);
+ hash = htab_map_hash(key, key_size, htab->hashrnd);
b = __select_bucket(htab, hash);
head = &b->head;
@@ -880,7 +883,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
key_size = map->key_size;
- hash = htab_map_hash(key, key_size);
+ hash = htab_map_hash(key, key_size, htab->hashrnd);
b = __select_bucket(htab, hash);
head = &b->head;
@@ -945,7 +948,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
key_size = map->key_size;
- hash = htab_map_hash(key, key_size);
+ hash = htab_map_hash(key, key_size, htab->hashrnd);
b = __select_bucket(htab, hash);
head = &b->head;
@@ -998,7 +1001,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
key_size = map->key_size;
- hash = htab_map_hash(key, key_size);
+ hash = htab_map_hash(key, key_size, htab->hashrnd);
b = __select_bucket(htab, hash);
head = &b->head;
@@ -1071,7 +1074,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
key_size = map->key_size;
- hash = htab_map_hash(key, key_size);
+ hash = htab_map_hash(key, key_size, htab->hashrnd);
b = __select_bucket(htab, hash);
head = &b->head;
@@ -1103,7 +1106,7 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
key_size = map->key_size;
- hash = htab_map_hash(key, key_size);
+ hash = htab_map_hash(key, key_size, htab->hashrnd);
b = __select_bucket(htab, hash);
head = &b->head;
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index 98e621a29e8e..cf5195c7c331 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -1427,12 +1427,15 @@ out:
static void smap_write_space(struct sock *sk)
{
struct smap_psock *psock;
+ void (*write_space)(struct sock *sk);
rcu_read_lock();
psock = smap_psock_sk(sk);
if (likely(psock && test_bit(SMAP_TX_RUNNING, &psock->state)))
schedule_work(&psock->tx_work);
+ write_space = psock->save_write_space;
rcu_read_unlock();
+ write_space(sk);
}
static void smap_stop_sock(struct smap_psock *psock, struct sock *sk)
@@ -2140,7 +2143,9 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
return ERR_PTR(-EPERM);
/* check sanity of attributes */
- if (attr->max_entries == 0 || attr->value_size != 4 ||
+ if (attr->max_entries == 0 ||
+ attr->key_size == 0 ||
+ attr->value_size != 4 ||
attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
return ERR_PTR(-EINVAL);
@@ -2267,8 +2272,10 @@ static struct htab_elem *alloc_sock_hash_elem(struct bpf_htab *htab,
}
l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN,
htab->map.numa_node);
- if (!l_new)
+ if (!l_new) {
+ atomic_dec(&htab->count);
return ERR_PTR(-ENOMEM);
+ }
memcpy(l_new->key, key, key_size);
l_new->sk = sk;
diff --git a/kernel/cpu.c b/kernel/cpu.c
index ed44d7d34c2d..aa7fe85ad62e 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -102,8 +102,6 @@ static inline void cpuhp_lock_release(bool bringup) { }
* @name: Name of the step
* @startup: Startup function of the step
* @teardown: Teardown function of the step
- * @skip_onerr: Do not invoke the functions on error rollback
- * Will go away once the notifiers are gone
* @cant_stop: Bringup/teardown can't be stopped at this step
*/
struct cpuhp_step {
@@ -119,7 +117,6 @@ struct cpuhp_step {
struct hlist_node *node);
} teardown;
struct hlist_head list;
- bool skip_onerr;
bool cant_stop;
bool multi_instance;
};
@@ -550,12 +547,8 @@ static int bringup_cpu(unsigned int cpu)
static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
{
- for (st->state--; st->state > st->target; st->state--) {
- struct cpuhp_step *step = cpuhp_get_step(st->state);
-
- if (!step->skip_onerr)
- cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
- }
+ for (st->state--; st->state > st->target; st->state--)
+ cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
}
static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
@@ -644,12 +637,6 @@ static void cpuhp_thread_fun(unsigned int cpu)
WARN_ON_ONCE(!cpuhp_is_ap_state(state));
- if (st->rollback) {
- struct cpuhp_step *step = cpuhp_get_step(state);
- if (step->skip_onerr)
- goto next;
- }
-
if (cpuhp_is_atomic_state(state)) {
local_irq_disable();
st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
@@ -673,7 +660,6 @@ static void cpuhp_thread_fun(unsigned int cpu)
st->should_run = false;
}
-next:
cpuhp_lock_release(bringup);
if (!st->should_run)
@@ -916,12 +902,8 @@ void cpuhp_report_idle_dead(void)
static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
{
- for (st->state++; st->state < st->target; st->state++) {
- struct cpuhp_step *step = cpuhp_get_step(st->state);
-
- if (!step->skip_onerr)
- cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
- }
+ for (st->state++; st->state < st->target; st->state++)
+ cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
}
static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 924e37fb1620..fd6f8ed28e01 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -38,7 +38,6 @@
#include <linux/kmsg_dump.h>
#include <linux/syslog.h>
#include <linux/cpu.h>
-#include <linux/notifier.h>
#include <linux/rculist.h>
#include <linux/poll.h>
#include <linux/irq_work.h>
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 5470dce212c0..977918d5d350 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -261,7 +261,7 @@ static void __touch_watchdog(void)
* entering idle state. This should only be used for scheduler events.
* Use touch_softlockup_watchdog() for everything else.
*/
-void touch_softlockup_watchdog_sched(void)
+notrace void touch_softlockup_watchdog_sched(void)
{
/*
* Preemption can be enabled. It doesn't matter which CPU's timestamp
@@ -270,7 +270,7 @@ void touch_softlockup_watchdog_sched(void)
raw_cpu_write(watchdog_touch_ts, 0);
}
-void touch_softlockup_watchdog(void)
+notrace void touch_softlockup_watchdog(void)
{
touch_softlockup_watchdog_sched();
wq_watchdog_touch(raw_smp_processor_id());
diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c
index 1f7020d65d0a..71381168dede 100644
--- a/kernel/watchdog_hld.c
+++ b/kernel/watchdog_hld.c
@@ -29,7 +29,7 @@ static struct cpumask dead_events_mask;
static unsigned long hardlockup_allcpu_dumped;
static atomic_t watchdog_cpus = ATOMIC_INIT(0);
-void arch_touch_nmi_watchdog(void)
+notrace void arch_touch_nmi_watchdog(void)
{
/*
* Using __raw here because some code paths have
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 60e80198c3df..0280deac392e 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -5574,7 +5574,7 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
mod_timer(&wq_watchdog_timer, jiffies + thresh);
}
-void wq_watchdog_touch(int cpu)
+notrace void wq_watchdog_touch(int cpu)
{
if (cpu >= 0)
per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index c72577e472f2..a66595ba5543 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -4,7 +4,6 @@
*/
#include <linux/percpu_counter.h>
-#include <linux/notifier.h>
#include <linux/mutex.h>
#include <linux/init.h>
#include <linux/cpu.h>
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 310e29b51507..30526afa8343 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -28,7 +28,6 @@
#include <linux/rhashtable.h>
#include <linux/err.h>
#include <linux/export.h>
-#include <linux/rhashtable.h>
#define HASH_DEFAULT_SIZE 64UL
#define HASH_MIN_SIZE 4U
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 6551d3b0dc30..84ae9bf5858a 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -27,7 +27,6 @@
#include <linux/mpage.h>
#include <linux/rmap.h>
#include <linux/percpu.h>
-#include <linux/notifier.h>
#include <linux/smp.h>
#include <linux/sysctl.h>
#include <linux/cpu.h>
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e75865d58ba7..05e983f42316 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -32,7 +32,6 @@
#include <linux/slab.h>
#include <linux/ratelimit.h>
#include <linux/oom.h>
-#include <linux/notifier.h>
#include <linux/topology.h>
#include <linux/sysctl.h>
#include <linux/cpu.h>
diff --git a/mm/slub.c b/mm/slub.c
index ce2b9e5cea77..8da34a8af53d 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -19,7 +19,6 @@
#include <linux/slab.h>
#include "slab.h"
#include <linux/proc_fs.h>
-#include <linux/notifier.h>
#include <linux/seq_file.h>
#include <linux/kasan.h>
#include <linux/cpu.h>
diff --git a/net/core/dev.c b/net/core/dev.c
index 325fc5088370..82114e1111e6 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -93,7 +93,6 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
-#include <linux/notifier.h>
#include <linux/skbuff.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 962c4fd338ba..1c45c1d6d241 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -767,7 +767,6 @@ static int dsa_slave_add_cls_matchall(struct net_device *dev,
const struct tc_action *a;
struct dsa_port *to_dp;
int err = -EOPNOTSUPP;
- LIST_HEAD(actions);
if (!ds->ops->port_mirror_add)
return err;
@@ -775,8 +774,7 @@ static int dsa_slave_add_cls_matchall(struct net_device *dev,
if (!tcf_exts_has_one_action(cls->exts))
return err;
- tcf_exts_to_list(cls->exts, &actions);
- a = list_first_entry(&actions, struct tc_action, list);
+ a = tcf_exts_first_action(cls->exts);
if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
struct dsa_mall_mirror_tc_entry *mirror;
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index 13d34427ca3d..02ff2dde9609 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -95,11 +95,10 @@ struct bbr {
u32 mode:3, /* current bbr_mode in state machine */
prev_ca_state:3, /* CA state on previous ACK */
packet_conservation:1, /* use packet conservation? */
- restore_cwnd:1, /* decided to revert cwnd to old value */
round_start:1, /* start of packet-timed tx->ack round? */
idle_restart:1, /* restarting after idle? */
probe_rtt_round_done:1, /* a BBR_PROBE_RTT round at 4 pkts? */
- unused:12,
+ unused:13,
lt_is_sampling:1, /* taking long-term ("LT") samples now? */
lt_rtt_cnt:7, /* round trips in long-term interval */
lt_use_bw:1; /* use lt_bw as our bw estimate? */
@@ -175,6 +174,8 @@ static const u32 bbr_lt_bw_diff = 4000 / 8;
/* If we estimate we're policed, use lt_bw for this many round trips: */
static const u32 bbr_lt_bw_max_rtts = 48;
+static void bbr_check_probe_rtt_done(struct sock *sk);
+
/* Do we estimate that STARTUP filled the pipe? */
static bool bbr_full_bw_reached(const struct sock *sk)
{
@@ -309,6 +310,8 @@ static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event)
*/
if (bbr->mode == BBR_PROBE_BW)
bbr_set_pacing_rate(sk, bbr_bw(sk), BBR_UNIT);
+ else if (bbr->mode == BBR_PROBE_RTT)
+ bbr_check_probe_rtt_done(sk);
}
}
@@ -396,17 +399,11 @@ static bool bbr_set_cwnd_to_recover_or_restore(
cwnd = tcp_packets_in_flight(tp) + acked;
} else if (prev_state >= TCP_CA_Recovery && state < TCP_CA_Recovery) {
/* Exiting loss recovery; restore cwnd saved before recovery. */
- bbr->restore_cwnd = 1;
+ cwnd = max(cwnd, bbr->prior_cwnd);
bbr->packet_conservation = 0;
}
bbr->prev_ca_state = state;
- if (bbr->restore_cwnd) {
- /* Restore cwnd after exiting loss recovery or PROBE_RTT. */
- cwnd = max(cwnd, bbr->prior_cwnd);
- bbr->restore_cwnd = 0;
- }
-
if (bbr->packet_conservation) {
*new_cwnd = max(cwnd, tcp_packets_in_flight(tp) + acked);
return true; /* yes, using packet conservation */
@@ -423,10 +420,10 @@ static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs,
{
struct tcp_sock *tp = tcp_sk(sk);
struct bbr *bbr = inet_csk_ca(sk);
- u32 cwnd = 0, target_cwnd = 0;
+ u32 cwnd = tp->snd_cwnd, target_cwnd = 0;
if (!acked)
- return;
+ goto done; /* no packet fully ACKed; just apply caps */
if (bbr_set_cwnd_to_recover_or_restore(sk, rs, acked, &cwnd))
goto done;
@@ -748,6 +745,20 @@ static void bbr_check_drain(struct sock *sk, const struct rate_sample *rs)
bbr_reset_probe_bw_mode(sk); /* we estimate queue is drained */
}
+static void bbr_check_probe_rtt_done(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct bbr *bbr = inet_csk_ca(sk);
+
+ if (!(bbr->probe_rtt_done_stamp &&
+ after(tcp_jiffies32, bbr->probe_rtt_done_stamp)))
+ return;
+
+ bbr->min_rtt_stamp = tcp_jiffies32; /* wait a while until PROBE_RTT */
+ tp->snd_cwnd = max(tp->snd_cwnd, bbr->prior_cwnd);
+ bbr_reset_mode(sk);
+}
+
/* The goal of PROBE_RTT mode is to have BBR flows cooperatively and
* periodically drain the bottleneck queue, to converge to measure the true
* min_rtt (unloaded propagation delay). This allows the flows to keep queues
@@ -806,12 +817,8 @@ static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs)
} else if (bbr->probe_rtt_done_stamp) {
if (bbr->round_start)
bbr->probe_rtt_round_done = 1;
- if (bbr->probe_rtt_round_done &&
- after(tcp_jiffies32, bbr->probe_rtt_done_stamp)) {
- bbr->min_rtt_stamp = tcp_jiffies32;
- bbr->restore_cwnd = 1; /* snap to prior_cwnd */
- bbr_reset_mode(sk);
- }
+ if (bbr->probe_rtt_round_done)
+ bbr_check_probe_rtt_done(sk);
}
}
/* Restart after idle ends only once we process a new S/ACK for data */
@@ -862,7 +869,6 @@ static void bbr_init(struct sock *sk)
bbr->has_seen_rtt = 0;
bbr_init_pacing_rate_from_rtt(sk);
- bbr->restore_cwnd = 0;
bbr->round_start = 0;
bbr->idle_restart = 0;
bbr->full_bw_reached = 0;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 9e041fa5c545..44c09eddbb78 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2517,6 +2517,12 @@ static int __net_init tcp_sk_init(struct net *net)
if (res)
goto fail;
sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
+
+ /* Please enforce IP_DF and IPID==0 for RST and
+ * ACK sent in SYN-RECV and TIME-WAIT state.
+ */
+ inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO;
+
*per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
}
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 2fac4ad74867..d51a8c0b3372 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2398,7 +2398,7 @@ static void addrconf_add_mroute(struct net_device *dev)
ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0);
- ip6_route_add(&cfg, GFP_ATOMIC, NULL);
+ ip6_route_add(&cfg, GFP_KERNEL, NULL);
}
static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
@@ -3062,7 +3062,7 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
if (addr.s6_addr32[3]) {
add_addr(idev, &addr, plen, scope);
addrconf_prefix_route(&addr, plen, 0, idev->dev, 0, pflags,
- GFP_ATOMIC);
+ GFP_KERNEL);
return;
}
@@ -3087,7 +3087,7 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
add_addr(idev, &addr, plen, flag);
addrconf_prefix_route(&addr, plen, 0, idev->dev,
- 0, pflags, GFP_ATOMIC);
+ 0, pflags, GFP_KERNEL);
}
}
}
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index d212738e9d10..c861a6d4671d 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -198,6 +198,8 @@ void fib6_info_destroy_rcu(struct rcu_head *head)
}
}
+ lwtstate_put(f6i->fib6_nh.nh_lwtstate);
+
if (f6i->fib6_nh.nh_dev)
dev_put(f6i->fib6_nh.nh_dev);
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 38dec9da90d3..5095367c7204 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -1094,7 +1094,8 @@ static void __net_exit vti6_destroy_tunnels(struct vti6_net *ip6n,
}
t = rtnl_dereference(ip6n->tnls_wc[0]);
- unregister_netdevice_queue(t->dev, list);
+ if (t)
+ unregister_netdevice_queue(t->dev, list);
}
static int __net_init vti6_init_net(struct net *net)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 7208c16302f6..c4ea13e8360b 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -956,7 +956,7 @@ static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort)
rt->dst.error = 0;
rt->dst.output = ip6_output;
- if (ort->fib6_type == RTN_LOCAL) {
+ if (ort->fib6_type == RTN_LOCAL || ort->fib6_type == RTN_ANYCAST) {
rt->dst.input = ip6_input;
} else if (ipv6_addr_type(&ort->fib6_dst.addr) & IPV6_ADDR_MULTICAST) {
rt->dst.input = ip6_mc_input;
diff --git a/net/ncsi/ncsi-netlink.c b/net/ncsi/ncsi-netlink.c
index 82e6edf9c5d9..45f33d6dedf7 100644
--- a/net/ncsi/ncsi-netlink.c
+++ b/net/ncsi/ncsi-netlink.c
@@ -100,7 +100,7 @@ static int ncsi_write_package_info(struct sk_buff *skb,
bool found;
int rc;
- if (id > ndp->package_num) {
+ if (id > ndp->package_num - 1) {
netdev_info(ndp->ndev.dev, "NCSI: No package with id %u\n", id);
return -ENODEV;
}
@@ -240,7 +240,7 @@ static int ncsi_pkg_info_all_nl(struct sk_buff *skb,
return 0; /* done */
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
- &ncsi_genl_family, 0, NCSI_CMD_PKG_INFO);
+ &ncsi_genl_family, NLM_F_MULTI, NCSI_CMD_PKG_INFO);
if (!hdr) {
rc = -EMSGSIZE;
goto err;
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index 2c7b7c352d3e..b9bbcf3d6c63 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -37,7 +37,6 @@
#include <net/tcp.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
-#include <net/tcp.h>
#include <net/addrconf.h>
#include "rds.h"
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 229d63c99be2..db83dac1e7f4 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -300,21 +300,17 @@ int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
}
EXPORT_SYMBOL(tcf_generic_walker);
-static bool __tcf_idr_check(struct tc_action_net *tn, u32 index,
- struct tc_action **a, int bind)
+int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index)
{
struct tcf_idrinfo *idrinfo = tn->idrinfo;
struct tc_action *p;
spin_lock(&idrinfo->lock);
p = idr_find(&idrinfo->action_idr, index);
- if (IS_ERR(p)) {
+ if (IS_ERR(p))
p = NULL;
- } else if (p) {
+ else if (p)
refcount_inc(&p->tcfa_refcnt);
- if (bind)
- atomic_inc(&p->tcfa_bindcnt);
- }
spin_unlock(&idrinfo->lock);
if (p) {
@@ -323,23 +319,10 @@ static bool __tcf_idr_check(struct tc_action_net *tn, u32 index,
}
return false;
}
-
-int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index)
-{
- return __tcf_idr_check(tn, index, a, 0);
-}
EXPORT_SYMBOL(tcf_idr_search);
-bool tcf_idr_check(struct tc_action_net *tn, u32 index, struct tc_action **a,
- int bind)
+static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index)
{
- return __tcf_idr_check(tn, index, a, bind);
-}
-EXPORT_SYMBOL(tcf_idr_check);
-
-int tcf_idr_delete_index(struct tc_action_net *tn, u32 index)
-{
- struct tcf_idrinfo *idrinfo = tn->idrinfo;
struct tc_action *p;
int ret = 0;
@@ -370,7 +353,6 @@ int tcf_idr_delete_index(struct tc_action_net *tn, u32 index)
spin_unlock(&idrinfo->lock);
return ret;
}
-EXPORT_SYMBOL(tcf_idr_delete_index);
int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
struct tc_action **a, const struct tc_action_ops *ops,
@@ -409,7 +391,6 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
p->idrinfo = idrinfo;
p->ops = ops;
- INIT_LIST_HEAD(&p->list);
*a = p;
return 0;
err3:
@@ -686,14 +667,18 @@ static int tcf_action_put(struct tc_action *p)
return __tcf_action_put(p, false);
}
+/* Put all actions in this array, skip those NULL's. */
static void tcf_action_put_many(struct tc_action *actions[])
{
int i;
- for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
+ for (i = 0; i < TCA_ACT_MAX_PRIO; i++) {
struct tc_action *a = actions[i];
- const struct tc_action_ops *ops = a->ops;
+ const struct tc_action_ops *ops;
+ if (!a)
+ continue;
+ ops = a->ops;
if (tcf_action_put(a))
module_put(ops->owner);
}
@@ -1175,41 +1160,38 @@ err_out:
return err;
}
-static int tcf_action_delete(struct net *net, struct tc_action *actions[],
- int *acts_deleted, struct netlink_ext_ack *extack)
+static int tcf_action_delete(struct net *net, struct tc_action *actions[])
{
- u32 act_index;
- int ret, i;
+ int i;
for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
struct tc_action *a = actions[i];
const struct tc_action_ops *ops = a->ops;
-
/* Actions can be deleted concurrently so we must save their
* type and id to search again after reference is released.
*/
- act_index = a->tcfa_index;
+ struct tcf_idrinfo *idrinfo = a->idrinfo;
+ u32 act_index = a->tcfa_index;
if (tcf_action_put(a)) {
/* last reference, action was deleted concurrently */
module_put(ops->owner);
} else {
+ int ret;
+
/* now do the delete */
- ret = ops->delete(net, act_index);
- if (ret < 0) {
- *acts_deleted = i + 1;
+ ret = tcf_idr_delete_index(idrinfo, act_index);
+ if (ret < 0)
return ret;
- }
}
+ actions[i] = NULL;
}
- *acts_deleted = i;
return 0;
}
static int
tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
- int *acts_deleted, u32 portid, size_t attr_size,
- struct netlink_ext_ack *extack)
+ u32 portid, size_t attr_size, struct netlink_ext_ack *extack)
{
int ret;
struct sk_buff *skb;
@@ -1227,7 +1209,7 @@ tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
}
/* now do the delete */
- ret = tcf_action_delete(net, actions, acts_deleted, extack);
+ ret = tcf_action_delete(net, actions);
if (ret < 0) {
NL_SET_ERR_MSG(extack, "Failed to delete TC action");
kfree_skb(skb);
@@ -1249,8 +1231,7 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
struct tc_action *act;
size_t attr_size = 0;
- struct tc_action *actions[TCA_ACT_MAX_PRIO + 1] = {};
- int acts_deleted = 0;
+ struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL, extack);
if (ret < 0)
@@ -1280,14 +1261,13 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
if (event == RTM_GETACTION)
ret = tcf_get_notify(net, portid, n, actions, event, extack);
else { /* delete */
- ret = tcf_del_notify(net, n, actions, &acts_deleted, portid,
- attr_size, extack);
+ ret = tcf_del_notify(net, n, actions, portid, attr_size, extack);
if (ret)
goto err;
- return ret;
+ return 0;
}
err:
- tcf_action_put_many(&actions[acts_deleted]);
+ tcf_action_put_many(actions);
return ret;
}
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index d30b23e42436..0c68bc9cf0b4 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -395,13 +395,6 @@ static int tcf_bpf_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
-static int tcf_bpf_delete(struct net *net, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, bpf_net_id);
-
- return tcf_idr_delete_index(tn, index);
-}
-
static struct tc_action_ops act_bpf_ops __read_mostly = {
.kind = "bpf",
.type = TCA_ACT_BPF,
@@ -412,7 +405,6 @@ static struct tc_action_ops act_bpf_ops __read_mostly = {
.init = tcf_bpf_init,
.walk = tcf_bpf_walker,
.lookup = tcf_bpf_search,
- .delete = tcf_bpf_delete,
.size = sizeof(struct tcf_bpf),
};
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index 54c0bf54f2ac..6f0f273f1139 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -198,13 +198,6 @@ static int tcf_connmark_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
-static int tcf_connmark_delete(struct net *net, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, connmark_net_id);
-
- return tcf_idr_delete_index(tn, index);
-}
-
static struct tc_action_ops act_connmark_ops = {
.kind = "connmark",
.type = TCA_ACT_CONNMARK,
@@ -214,7 +207,6 @@ static struct tc_action_ops act_connmark_ops = {
.init = tcf_connmark_init,
.walk = tcf_connmark_walker,
.lookup = tcf_connmark_search,
- .delete = tcf_connmark_delete,
.size = sizeof(struct tcf_connmark_info),
};
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index e698d3fe2080..b8a67ae3105a 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -659,13 +659,6 @@ static size_t tcf_csum_get_fill_size(const struct tc_action *act)
return nla_total_size(sizeof(struct tc_csum));
}
-static int tcf_csum_delete(struct net *net, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, csum_net_id);
-
- return tcf_idr_delete_index(tn, index);
-}
-
static struct tc_action_ops act_csum_ops = {
.kind = "csum",
.type = TCA_ACT_CSUM,
@@ -677,7 +670,6 @@ static struct tc_action_ops act_csum_ops = {
.walk = tcf_csum_walker,
.lookup = tcf_csum_search,
.get_fill_size = tcf_csum_get_fill_size,
- .delete = tcf_csum_delete,
.size = sizeof(struct tcf_csum),
};
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index 6a3f25a8ffb3..cd1d9bd32ef9 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -243,13 +243,6 @@ static size_t tcf_gact_get_fill_size(const struct tc_action *act)
return sz;
}
-static int tcf_gact_delete(struct net *net, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, gact_net_id);
-
- return tcf_idr_delete_index(tn, index);
-}
-
static struct tc_action_ops act_gact_ops = {
.kind = "gact",
.type = TCA_ACT_GACT,
@@ -261,7 +254,6 @@ static struct tc_action_ops act_gact_ops = {
.walk = tcf_gact_walker,
.lookup = tcf_gact_search,
.get_fill_size = tcf_gact_get_fill_size,
- .delete = tcf_gact_delete,
.size = sizeof(struct tcf_gact),
};
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index d1081bdf1bdb..196430aefe87 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -167,16 +167,16 @@ static struct tcf_meta_ops *find_ife_oplist(u16 metaid)
{
struct tcf_meta_ops *o;
- read_lock_bh(&ife_mod_lock);
+ read_lock(&ife_mod_lock);
list_for_each_entry(o, &ifeoplist, list) {
if (o->metaid == metaid) {
if (!try_module_get(o->owner))
o = NULL;
- read_unlock_bh(&ife_mod_lock);
+ read_unlock(&ife_mod_lock);
return o;
}
}
- read_unlock_bh(&ife_mod_lock);
+ read_unlock(&ife_mod_lock);
return NULL;
}
@@ -190,12 +190,12 @@ int register_ife_op(struct tcf_meta_ops *mops)
!mops->get || !mops->alloc)
return -EINVAL;
- write_lock_bh(&ife_mod_lock);
+ write_lock(&ife_mod_lock);
list_for_each_entry(m, &ifeoplist, list) {
if (m->metaid == mops->metaid ||
(strcmp(mops->name, m->name) == 0)) {
- write_unlock_bh(&ife_mod_lock);
+ write_unlock(&ife_mod_lock);
return -EEXIST;
}
}
@@ -204,7 +204,7 @@ int register_ife_op(struct tcf_meta_ops *mops)
mops->release = ife_release_meta_gen;
list_add_tail(&mops->list, &ifeoplist);
- write_unlock_bh(&ife_mod_lock);
+ write_unlock(&ife_mod_lock);
return 0;
}
EXPORT_SYMBOL_GPL(unregister_ife_op);
@@ -214,7 +214,7 @@ int unregister_ife_op(struct tcf_meta_ops *mops)
struct tcf_meta_ops *m;
int err = -ENOENT;
- write_lock_bh(&ife_mod_lock);
+ write_lock(&ife_mod_lock);
list_for_each_entry(m, &ifeoplist, list) {
if (m->metaid == mops->metaid) {
list_del(&mops->list);
@@ -222,7 +222,7 @@ int unregister_ife_op(struct tcf_meta_ops *mops)
break;
}
}
- write_unlock_bh(&ife_mod_lock);
+ write_unlock(&ife_mod_lock);
return err;
}
@@ -265,11 +265,8 @@ static const char *ife_meta_id2name(u32 metaid)
#endif
/* called when adding new meta information
- * under ife->tcf_lock for existing action
*/
-static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
- void *val, int len, bool exists,
- bool rtnl_held)
+static int load_metaops_and_vet(u32 metaid, void *val, int len, bool rtnl_held)
{
struct tcf_meta_ops *ops = find_ife_oplist(metaid);
int ret = 0;
@@ -277,15 +274,11 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
if (!ops) {
ret = -ENOENT;
#ifdef CONFIG_MODULES
- if (exists)
- spin_unlock_bh(&ife->tcf_lock);
if (rtnl_held)
rtnl_unlock();
request_module("ife-meta-%s", ife_meta_id2name(metaid));
if (rtnl_held)
rtnl_lock();
- if (exists)
- spin_lock_bh(&ife->tcf_lock);
ops = find_ife_oplist(metaid);
#endif
}
@@ -302,24 +295,17 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
}
/* called when adding new meta information
- * under ife->tcf_lock for existing action
*/
-static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
- int len, bool atomic)
+static int __add_metainfo(const struct tcf_meta_ops *ops,
+ struct tcf_ife_info *ife, u32 metaid, void *metaval,
+ int len, bool atomic, bool exists)
{
struct tcf_meta_info *mi = NULL;
- struct tcf_meta_ops *ops = find_ife_oplist(metaid);
int ret = 0;
- if (!ops)
- return -ENOENT;
-
mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL);
- if (!mi) {
- /*put back what find_ife_oplist took */
- module_put(ops->owner);
+ if (!mi)
return -ENOMEM;
- }
mi->metaid = metaid;
mi->ops = ops;
@@ -327,29 +313,47 @@ static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL);
if (ret != 0) {
kfree(mi);
- module_put(ops->owner);
return ret;
}
}
+ if (exists)
+ spin_lock_bh(&ife->tcf_lock);
list_add_tail(&mi->metalist, &ife->metalist);
+ if (exists)
+ spin_unlock_bh(&ife->tcf_lock);
+
+ return ret;
+}
+
+static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
+ int len, bool exists)
+{
+ const struct tcf_meta_ops *ops = find_ife_oplist(metaid);
+ int ret;
+ if (!ops)
+ return -ENOENT;
+ ret = __add_metainfo(ops, ife, metaid, metaval, len, false, exists);
+ if (ret)
+ /*put back what find_ife_oplist took */
+ module_put(ops->owner);
return ret;
}
-static int use_all_metadata(struct tcf_ife_info *ife)
+static int use_all_metadata(struct tcf_ife_info *ife, bool exists)
{
struct tcf_meta_ops *o;
int rc = 0;
int installed = 0;
- read_lock_bh(&ife_mod_lock);
+ read_lock(&ife_mod_lock);
list_for_each_entry(o, &ifeoplist, list) {
- rc = add_metainfo(ife, o->metaid, NULL, 0, true);
+ rc = __add_metainfo(o, ife, o->metaid, NULL, 0, true, exists);
if (rc == 0)
installed += 1;
}
- read_unlock_bh(&ife_mod_lock);
+ read_unlock(&ife_mod_lock);
if (installed)
return 0;
@@ -422,7 +426,6 @@ static void tcf_ife_cleanup(struct tc_action *a)
kfree_rcu(p, rcu);
}
-/* under ife->tcf_lock for existing action */
static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
bool exists, bool rtnl_held)
{
@@ -436,8 +439,7 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
val = nla_data(tb[i]);
len = nla_len(tb[i]);
- rc = load_metaops_and_vet(ife, i, val, len, exists,
- rtnl_held);
+ rc = load_metaops_and_vet(i, val, len, rtnl_held);
if (rc != 0)
return rc;
@@ -540,8 +542,6 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
p->eth_type = ife_type;
}
- if (exists)
- spin_lock_bh(&ife->tcf_lock);
if (ret == ACT_P_CREATED)
INIT_LIST_HEAD(&ife->metalist);
@@ -551,10 +551,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
NULL, NULL);
if (err) {
metadata_parse_err:
- if (exists)
- spin_unlock_bh(&ife->tcf_lock);
tcf_idr_release(*a, bind);
-
kfree(p);
return err;
}
@@ -569,17 +566,16 @@ metadata_parse_err:
* as we can. You better have at least one else we are
* going to bail out
*/
- err = use_all_metadata(ife);
+ err = use_all_metadata(ife, exists);
if (err) {
- if (exists)
- spin_unlock_bh(&ife->tcf_lock);
tcf_idr_release(*a, bind);
-
kfree(p);
return err;
}
}
+ if (exists)
+ spin_lock_bh(&ife->tcf_lock);
ife->tcf_action = parm->action;
/* protected by tcf_lock when modifying existing action */
rcu_swap_protected(ife->params, p, 1);
@@ -853,13 +849,6 @@ static int tcf_ife_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
-static int tcf_ife_delete(struct net *net, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, ife_net_id);
-
- return tcf_idr_delete_index(tn, index);
-}
-
static struct tc_action_ops act_ife_ops = {
.kind = "ife",
.type = TCA_ACT_IFE,
@@ -870,7 +859,6 @@ static struct tc_action_ops act_ife_ops = {
.init = tcf_ife_init,
.walk = tcf_ife_walker,
.lookup = tcf_ife_search,
- .delete = tcf_ife_delete,
.size = sizeof(struct tcf_ife_info),
};
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 51f235bbeb5b..23273b5303fd 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -337,13 +337,6 @@ static int tcf_ipt_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
-static int tcf_ipt_delete(struct net *net, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, ipt_net_id);
-
- return tcf_idr_delete_index(tn, index);
-}
-
static struct tc_action_ops act_ipt_ops = {
.kind = "ipt",
.type = TCA_ACT_IPT,
@@ -354,7 +347,6 @@ static struct tc_action_ops act_ipt_ops = {
.init = tcf_ipt_init,
.walk = tcf_ipt_walker,
.lookup = tcf_ipt_search,
- .delete = tcf_ipt_delete,
.size = sizeof(struct tcf_ipt),
};
@@ -395,13 +387,6 @@ static int tcf_xt_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
-static int tcf_xt_delete(struct net *net, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, xt_net_id);
-
- return tcf_idr_delete_index(tn, index);
-}
-
static struct tc_action_ops act_xt_ops = {
.kind = "xt",
.type = TCA_ACT_XT,
@@ -412,7 +397,6 @@ static struct tc_action_ops act_xt_ops = {
.init = tcf_xt_init,
.walk = tcf_xt_walker,
.lookup = tcf_xt_search,
- .delete = tcf_xt_delete,
.size = sizeof(struct tcf_ipt),
};
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 38fd20f10f67..8bf66d0a6800 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -395,13 +395,6 @@ static void tcf_mirred_put_dev(struct net_device *dev)
dev_put(dev);
}
-static int tcf_mirred_delete(struct net *net, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, mirred_net_id);
-
- return tcf_idr_delete_index(tn, index);
-}
-
static struct tc_action_ops act_mirred_ops = {
.kind = "mirred",
.type = TCA_ACT_MIRRED,
@@ -416,7 +409,6 @@ static struct tc_action_ops act_mirred_ops = {
.size = sizeof(struct tcf_mirred),
.get_dev = tcf_mirred_get_dev,
.put_dev = tcf_mirred_put_dev,
- .delete = tcf_mirred_delete,
};
static __net_init int mirred_init_net(struct net *net)
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 822e903bfc25..4313aa102440 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -300,13 +300,6 @@ static int tcf_nat_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
-static int tcf_nat_delete(struct net *net, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, nat_net_id);
-
- return tcf_idr_delete_index(tn, index);
-}
-
static struct tc_action_ops act_nat_ops = {
.kind = "nat",
.type = TCA_ACT_NAT,
@@ -316,7 +309,6 @@ static struct tc_action_ops act_nat_ops = {
.init = tcf_nat_init,
.walk = tcf_nat_walker,
.lookup = tcf_nat_search,
- .delete = tcf_nat_delete,
.size = sizeof(struct tcf_nat),
};
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 8a7a7cb94e83..107034070019 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -460,13 +460,6 @@ static int tcf_pedit_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
-static int tcf_pedit_delete(struct net *net, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, pedit_net_id);
-
- return tcf_idr_delete_index(tn, index);
-}
-
static struct tc_action_ops act_pedit_ops = {
.kind = "pedit",
.type = TCA_ACT_PEDIT,
@@ -477,7 +470,6 @@ static struct tc_action_ops act_pedit_ops = {
.init = tcf_pedit_init,
.walk = tcf_pedit_walker,
.lookup = tcf_pedit_search,
- .delete = tcf_pedit_delete,
.size = sizeof(struct tcf_pedit),
};
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 06f0742db593..5d8bfa878477 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -320,13 +320,6 @@ static int tcf_police_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
-static int tcf_police_delete(struct net *net, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, police_net_id);
-
- return tcf_idr_delete_index(tn, index);
-}
-
MODULE_AUTHOR("Alexey Kuznetsov");
MODULE_DESCRIPTION("Policing actions");
MODULE_LICENSE("GPL");
@@ -340,7 +333,6 @@ static struct tc_action_ops act_police_ops = {
.init = tcf_police_init,
.walk = tcf_police_walker,
.lookup = tcf_police_search,
- .delete = tcf_police_delete,
.size = sizeof(struct tcf_police),
};
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index 207b4132d1b0..44e9c00657bc 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -232,13 +232,6 @@ static int tcf_sample_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
-static int tcf_sample_delete(struct net *net, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, sample_net_id);
-
- return tcf_idr_delete_index(tn, index);
-}
-
static struct tc_action_ops act_sample_ops = {
.kind = "sample",
.type = TCA_ACT_SAMPLE,
@@ -249,7 +242,6 @@ static struct tc_action_ops act_sample_ops = {
.cleanup = tcf_sample_cleanup,
.walk = tcf_sample_walker,
.lookup = tcf_sample_search,
- .delete = tcf_sample_delete,
.size = sizeof(struct tcf_sample),
};
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index e616523ba3c1..52400d49f81f 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -196,13 +196,6 @@ static int tcf_simp_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
-static int tcf_simp_delete(struct net *net, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, simp_net_id);
-
- return tcf_idr_delete_index(tn, index);
-}
-
static struct tc_action_ops act_simp_ops = {
.kind = "simple",
.type = TCA_ACT_SIMP,
@@ -213,7 +206,6 @@ static struct tc_action_ops act_simp_ops = {
.init = tcf_simp_init,
.walk = tcf_simp_walker,
.lookup = tcf_simp_search,
- .delete = tcf_simp_delete,
.size = sizeof(struct tcf_defact),
};
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 926d7bc4a89d..73e44ce2a883 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -299,13 +299,6 @@ static int tcf_skbedit_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
-static int tcf_skbedit_delete(struct net *net, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, skbedit_net_id);
-
- return tcf_idr_delete_index(tn, index);
-}
-
static struct tc_action_ops act_skbedit_ops = {
.kind = "skbedit",
.type = TCA_ACT_SKBEDIT,
@@ -316,7 +309,6 @@ static struct tc_action_ops act_skbedit_ops = {
.cleanup = tcf_skbedit_cleanup,
.walk = tcf_skbedit_walker,
.lookup = tcf_skbedit_search,
- .delete = tcf_skbedit_delete,
.size = sizeof(struct tcf_skbedit),
};
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index d6a1af0c4171..588077fafd6c 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -259,13 +259,6 @@ static int tcf_skbmod_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
-static int tcf_skbmod_delete(struct net *net, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, skbmod_net_id);
-
- return tcf_idr_delete_index(tn, index);
-}
-
static struct tc_action_ops act_skbmod_ops = {
.kind = "skbmod",
.type = TCA_ACT_SKBMOD,
@@ -276,7 +269,6 @@ static struct tc_action_ops act_skbmod_ops = {
.cleanup = tcf_skbmod_cleanup,
.walk = tcf_skbmod_walker,
.lookup = tcf_skbmod_search,
- .delete = tcf_skbmod_delete,
.size = sizeof(struct tcf_skbmod),
};
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index 8f09cf08d8fe..420759153d5f 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -548,13 +548,6 @@ static int tunnel_key_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
-static int tunnel_key_delete(struct net *net, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
-
- return tcf_idr_delete_index(tn, index);
-}
-
static struct tc_action_ops act_tunnel_key_ops = {
.kind = "tunnel_key",
.type = TCA_ACT_TUNNEL_KEY,
@@ -565,7 +558,6 @@ static struct tc_action_ops act_tunnel_key_ops = {
.cleanup = tunnel_key_release,
.walk = tunnel_key_walker,
.lookup = tunnel_key_search,
- .delete = tunnel_key_delete,
.size = sizeof(struct tcf_tunnel_key),
};
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index 209e70ad2c09..033d273afe50 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -296,13 +296,6 @@ static int tcf_vlan_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
-static int tcf_vlan_delete(struct net *net, u32 index)
-{
- struct tc_action_net *tn = net_generic(net, vlan_net_id);
-
- return tcf_idr_delete_index(tn, index);
-}
-
static struct tc_action_ops act_vlan_ops = {
.kind = "vlan",
.type = TCA_ACT_VLAN,
@@ -313,7 +306,6 @@ static struct tc_action_ops act_vlan_ops = {
.cleanup = tcf_vlan_cleanup,
.walk = tcf_vlan_walker,
.lookup = tcf_vlan_search,
- .delete = tcf_vlan_delete,
.size = sizeof(struct tcf_vlan),
};
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index d5d2a6dc3921..f218ccf1e2d9 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -914,6 +914,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
struct nlattr *opt = tca[TCA_OPTIONS];
struct nlattr *tb[TCA_U32_MAX + 1];
u32 htid, flags = 0;
+ size_t sel_size;
int err;
#ifdef CONFIG_CLS_U32_PERF
size_t size;
@@ -1076,8 +1077,13 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
}
s = nla_data(tb[TCA_U32_SEL]);
+ sel_size = struct_size(s, keys, s->nkeys);
+ if (nla_len(tb[TCA_U32_SEL]) < sel_size) {
+ err = -EINVAL;
+ goto erridr;
+ }
- n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
+ n = kzalloc(offsetof(typeof(*n), sel) + sel_size, GFP_KERNEL);
if (n == NULL) {
err = -ENOBUFS;
goto erridr;
@@ -1092,7 +1098,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
}
#endif
- memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
+ memcpy(&n->sel, s, sel_size);
RCU_INIT_POINTER(n->ht_up, ht);
n->handle = handle;
n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 35fc7252187c..c07c30b916d5 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -64,7 +64,6 @@
#include <linux/vmalloc.h>
#include <linux/reciprocal_div.h>
#include <net/netlink.h>
-#include <linux/version.h>
#include <linux/if_vlan.h>
#include <net/pkt_sched.h>
#include <net/pkt_cls.h>
@@ -621,15 +620,20 @@ static bool cake_ddst(int flow_mode)
}
static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
- int flow_mode)
+ int flow_mode, u16 flow_override, u16 host_override)
{
- u32 flow_hash = 0, srchost_hash, dsthost_hash;
+ u32 flow_hash = 0, srchost_hash = 0, dsthost_hash = 0;
u16 reduced_hash, srchost_idx, dsthost_idx;
struct flow_keys keys, host_keys;
if (unlikely(flow_mode == CAKE_FLOW_NONE))
return 0;
+ /* If both overrides are set we can skip packet dissection entirely */
+ if ((flow_override || !(flow_mode & CAKE_FLOW_FLOWS)) &&
+ (host_override || !(flow_mode & CAKE_FLOW_HOSTS)))
+ goto skip_hash;
+
skb_flow_dissect_flow_keys(skb, &keys,
FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
@@ -676,6 +680,14 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
if (flow_mode & CAKE_FLOW_FLOWS)
flow_hash = flow_hash_from_keys(&keys);
+skip_hash:
+ if (flow_override)
+ flow_hash = flow_override - 1;
+ if (host_override) {
+ dsthost_hash = host_override - 1;
+ srchost_hash = host_override - 1;
+ }
+
if (!(flow_mode & CAKE_FLOW_FLOWS)) {
if (flow_mode & CAKE_FLOW_SRC_IP)
flow_hash ^= srchost_hash;
@@ -1571,7 +1583,7 @@ static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t,
struct cake_sched_data *q = qdisc_priv(sch);
struct tcf_proto *filter;
struct tcf_result res;
- u32 flow = 0;
+ u16 flow = 0, host = 0;
int result;
filter = rcu_dereference_bh(q->filter_list);
@@ -1595,10 +1607,12 @@ static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t,
#endif
if (TC_H_MIN(res.classid) <= CAKE_QUEUES)
flow = TC_H_MIN(res.classid);
+ if (TC_H_MAJ(res.classid) <= (CAKE_QUEUES << 16))
+ host = TC_H_MAJ(res.classid) >> 16;
}
hash:
*t = cake_select_tin(sch, skb);
- return flow ?: cake_hash(*t, skb, flow_mode) + 1;
+ return cake_hash(*t, skb, flow_mode, flow, host) + 1;
}
static void cake_reconfigure(struct Qdisc *sch);
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 93c0c225ab34..180b6640e531 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -213,9 +213,14 @@ static void tls_write_space(struct sock *sk)
{
struct tls_context *ctx = tls_get_ctx(sk);
- /* We are already sending pages, ignore notification */
- if (ctx->in_tcp_sendpages)
+ /* If in_tcp_sendpages call lower protocol write space handler
+ * to ensure we wake up any waiting operations there. For example
+ * if do_tcp_sendpages where to call sk_wait_event.
+ */
+ if (ctx->in_tcp_sendpages) {
+ ctx->sk_write_space(sk);
return;
+ }
if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) {
gfp_t sk_allocation = sk->sk_allocation;
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index 911ca6d3cb5a..bfe2dbea480b 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -74,14 +74,14 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
return 0;
if (!dev->netdev_ops->ndo_bpf || !dev->netdev_ops->ndo_xsk_async_xmit)
- return force_zc ? -ENOTSUPP : 0; /* fail or fallback */
+ return force_zc ? -EOPNOTSUPP : 0; /* fail or fallback */
bpf.command = XDP_QUERY_XSK_UMEM;
rtnl_lock();
err = xdp_umem_query(dev, queue_id);
if (err) {
- err = err < 0 ? -ENOTSUPP : -EBUSY;
+ err = err < 0 ? -EOPNOTSUPP : -EBUSY;
goto err_rtnl_unlock;
}
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index c75413d05a63..ce53639a864a 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -153,10 +153,6 @@ cc-fullversion = $(shell $(CONFIG_SHELL) \
# Usage: EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1)
cc-ifversion = $(shell [ $(cc-version) $(1) $(2) ] && echo $(3) || echo $(4))
-# cc-if-fullversion
-# Usage: EXTRA_CFLAGS += $(call cc-if-fullversion, -lt, 040502, -O1)
-cc-if-fullversion = $(shell [ $(cc-fullversion) $(1) $(2) ] && echo $(3) || echo $(4))
-
# cc-ldoption
# Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both)
cc-ldoption = $(call try-run,\
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 1c48572223d1..5a2d1c9578a0 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -246,8 +246,6 @@ objtool_args += --no-fp
endif
ifdef CONFIG_GCOV_KERNEL
objtool_args += --no-unreachable
-else
-objtool_args += $(call cc-ifversion, -lt, 0405, --no-unreachable)
endif
ifdef CONFIG_RETPOLINE
ifneq ($(RETPOLINE_CFLAGS),)
diff --git a/tools/bpf/bpftool/map_perf_ring.c b/tools/bpf/bpftool/map_perf_ring.c
index 1832100d1b27..6d41323be291 100644
--- a/tools/bpf/bpftool/map_perf_ring.c
+++ b/tools/bpf/bpftool/map_perf_ring.c
@@ -194,8 +194,10 @@ int do_event_pipe(int argc, char **argv)
}
while (argc) {
- if (argc < 2)
+ if (argc < 2) {
BAD_ARG();
+ goto err_close_map;
+ }
if (is_prefix(*argv, "cpu")) {
char *endptr;
@@ -221,6 +223,7 @@ int do_event_pipe(int argc, char **argv)
NEXT_ARG();
} else {
BAD_ARG();
+ goto err_close_map;
}
do_all = false;
diff --git a/tools/testing/selftests/drivers/dma-buf/Makefile b/tools/testing/selftests/drivers/dma-buf/Makefile
new file mode 100644
index 000000000000..4154c3d7aa58
--- /dev/null
+++ b/tools/testing/selftests/drivers/dma-buf/Makefile
@@ -0,0 +1,5 @@
+CFLAGS += -I../../../../../usr/include/
+
+TEST_GEN_PROGS := udmabuf
+
+include ../../lib.mk
diff --git a/tools/testing/selftests/drivers/dma-buf/udmabuf.c b/tools/testing/selftests/drivers/dma-buf/udmabuf.c
new file mode 100644
index 000000000000..376b1d6730bd
--- /dev/null
+++ b/tools/testing/selftests/drivers/dma-buf/udmabuf.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <malloc.h>
+
+#include <sys/ioctl.h>
+#include <sys/syscall.h>
+#include <linux/memfd.h>
+#include <linux/udmabuf.h>
+
+#define TEST_PREFIX "drivers/dma-buf/udmabuf"
+#define NUM_PAGES 4
+
+static int memfd_create(const char *name, unsigned int flags)
+{
+ return syscall(__NR_memfd_create, name, flags);
+}
+
+int main(int argc, char *argv[])
+{
+ struct udmabuf_create create;
+ int devfd, memfd, buf, ret;
+ off_t size;
+ void *mem;
+
+ devfd = open("/dev/udmabuf", O_RDWR);
+ if (devfd < 0) {
+ printf("%s: [skip,no-udmabuf]\n", TEST_PREFIX);
+ exit(77);
+ }
+
+ memfd = memfd_create("udmabuf-test", MFD_CLOEXEC);
+ if (memfd < 0) {
+ printf("%s: [skip,no-memfd]\n", TEST_PREFIX);
+ exit(77);
+ }
+
+ size = getpagesize() * NUM_PAGES;
+ ret = ftruncate(memfd, size);
+ if (ret == -1) {
+ printf("%s: [FAIL,memfd-truncate]\n", TEST_PREFIX);
+ exit(1);
+ }
+
+ memset(&create, 0, sizeof(create));
+
+ /* should fail (offset not page aligned) */
+ create.memfd = memfd;
+ create.offset = getpagesize()/2;
+ create.size = getpagesize();
+ buf = ioctl(devfd, UDMABUF_CREATE, &create);
+ if (buf >= 0) {
+ printf("%s: [FAIL,test-1]\n", TEST_PREFIX);
+ exit(1);
+ }
+
+ /* should fail (size not multiple of page) */
+ create.memfd = memfd;
+ create.offset = 0;
+ create.size = getpagesize()/2;
+ buf = ioctl(devfd, UDMABUF_CREATE, &create);
+ if (buf >= 0) {
+ printf("%s: [FAIL,test-2]\n", TEST_PREFIX);
+ exit(1);
+ }
+
+ /* should fail (not memfd) */
+ create.memfd = 0; /* stdin */
+ create.offset = 0;
+ create.size = size;
+ buf = ioctl(devfd, UDMABUF_CREATE, &create);
+ if (buf >= 0) {
+ printf("%s: [FAIL,test-3]\n", TEST_PREFIX);
+ exit(1);
+ }
+
+ /* should work */
+ create.memfd = memfd;
+ create.offset = 0;
+ create.size = size;
+ buf = ioctl(devfd, UDMABUF_CREATE, &create);
+ if (buf < 0) {
+ printf("%s: [FAIL,test-4]\n", TEST_PREFIX);
+ exit(1);
+ }
+
+ fprintf(stderr, "%s: ok\n", TEST_PREFIX);
+ close(buf);
+ close(memfd);
+ close(devfd);
+ return 0;
+}