aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.txt119
-rw-r--r--Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml150
-rw-r--r--Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt121
-rw-r--r--Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml137
-rw-r--r--Documentation/devicetree/bindings/display/connector/hdmi-connector.txt1
-rw-r--r--Documentation/devicetree/bindings/display/panel/nec,nl8048hl11.yaml62
-rw-r--r--Documentation/devicetree/bindings/display/panel/ti,nspire.yaml36
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/dw_mipi_dsi_rockchip.txt23
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.yaml5
-rw-r--r--Documentation/gpu/i915.rst3
-rw-r--r--MAINTAINERS4
-rw-r--r--drivers/dma-buf/Kconfig5
-rw-r--r--drivers/dma-buf/Makefile8
-rw-r--r--drivers/dma-buf/dma-buf.c28
-rw-r--r--drivers/dma-buf/dma-fence-array.c32
-rw-r--r--drivers/dma-buf/dma-fence.c55
-rw-r--r--drivers/dma-buf/dma-resv.c (renamed from drivers/dma-buf/reservation.c)156
-rw-r--r--drivers/dma-buf/selftest.c167
-rw-r--r--drivers/dma-buf/selftest.h30
-rw-r--r--drivers/dma-buf/selftests.h13
-rw-r--r--drivers/dma-buf/st-dma-fence.c574
-rw-r--r--drivers/dma-buf/sw_sync.c16
-rw-r--r--drivers/dma-buf/sync_file.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c20
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_crtc.c2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_dev.c9
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c12
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c7
-rw-r--r--drivers/gpu/drm/arm/malidp_crtc.c11
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c8
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.h7
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c7
-rw-r--r--drivers/gpu/drm/arm/malidp_mw.c5
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c4
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c10
-rw-r--r--drivers/gpu/drm/armada/armada_debugfs.c8
-rw-r--r--drivers/gpu/drm/armada/armada_drm.h5
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c8
-rw-r--r--drivers/gpu/drm/armada/armada_fb.c3
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c3
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c7
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c8
-rw-r--r--drivers/gpu/drm/armada/armada_plane.c4
-rw-r--r--drivers/gpu/drm/armada/armada_trace.h5
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c2
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c3
-rw-r--r--drivers/gpu/drm/bridge/dumb-vga-dac.c7
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c20
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h1
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c13
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c60
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c83
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.h13
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c10
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c2
-rw-r--r--drivers/gpu/drm/drm_connector.c10
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c32
-rw-r--r--drivers/gpu/drm/drm_drv.c14
-rw-r--r--drivers/gpu/drm/drm_gem.c29
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c4
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c61
-rw-r--r--drivers/gpu/drm/drm_kms_helper_common.c2
-rw-r--r--drivers/gpu/drm/drm_panel.c102
-rw-r--r--drivers/gpu/drm/drm_syncobj.c98
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_buffer.c93
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c58
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h15
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c68
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.h27
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_dump.c65
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_dump.h4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c64
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.h10
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c59
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c158
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h11
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu.c165
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu.h20
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c284
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c326
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.h114
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_perfmon.c48
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.h2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_scaler.c1
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c10
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Kconfig2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c19
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/Kconfig10
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/Makefile3
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h1
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c351
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c250
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h48
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c2
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug1
-rw-r--r--drivers/gpu/drm/i915/Makefile12
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ch7017.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ch7xxx.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ivch.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ns2501.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_sil164.c2
-rw-r--r--drivers/gpu/drm/i915/display/dvo_tfp410.c2
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c65
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.h15
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c29
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c138
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h189
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c147
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h38
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h (renamed from drivers/gpu/drm/i915/intel_drv.h)158
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c102
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c27
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c46
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c51
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c257
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.h70
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c29
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c39
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c43
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c143
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c2
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c84
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c20
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_busy.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_clflush.c127
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_client_blt.c41
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c170
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context_types.h5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c49
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c325
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_fence.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c24
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c80
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h16
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_blt.c370
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_blt.h25
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h9
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c13
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c18
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c74
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.h31
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c9
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.h35
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_wait.c18
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gemfs.c31
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c120
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c16
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c4
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c193
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c9
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c128
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c141
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h16
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c49
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c87
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h41
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h17
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c243
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c68
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.h18
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pool.c177
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pool.h34
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pool_types.h29
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h59
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_user.c303
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_user.h25
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gpu_commands.h11
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c36
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c455
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.h44
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c40
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.h31
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c109
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_irq.h22
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c486
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc_reg.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c166
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_renderstate.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c16
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ringbuffer.c147
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c132
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline_types.h20
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c26
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.c98
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_context.c456
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine.c28
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine.h14
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_cs.c26
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_pm.c83
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c17
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c143
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_timeline.c6
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_workarounds.c33
-rw-r--r--drivers/gpu/drm/i915/gt/selftests/mock_timeline.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c38
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h43
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c23
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h23
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c22
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h22
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c31
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h23
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h23
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c30
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.h23
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h24
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c108
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h26
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c57
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.h33
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c13
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c395
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.h62
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c292
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h134
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc.c21
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c180
-rw-r--r--drivers/gpu/drm/i915/gvt/debugfs.c47
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h8
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c15
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c57
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c36
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c4
-rw-r--r--drivers/gpu/drm/i915/i915_active.c331
-rw-r--r--drivers/gpu/drm/i915/i915_active.h33
-rw-r--r--drivers/gpu/drm/i915/i915_active_types.h17
-rw-r--r--drivers/gpu/drm/i915/i915_buddy.c428
-rw-r--r--drivers/gpu/drm/i915/i915_buddy.h128
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c4
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c290
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c791
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h585
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c295
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_batch_pool.c132
-rw-r--r--drivers/gpu/drm/i915/i915_gem_batch_pool.h26
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c9
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.c108
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c181
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h6
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c168
-rw-r--r--drivers/gpu/drm/i915/i915_globals.c1
-rw-r--r--drivers/gpu/drm/i915/i915_globals.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c288
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h7
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c1095
-rw-r--r--drivers/gpu/drm/i915/i915_irq.h79
-rw-r--r--drivers/gpu/drm/i915/i915_memcpy.c2
-rw-r--r--drivers/gpu/drm/i915/i915_memcpy.h32
-rw-r--r--drivers/gpu/drm/i915/i915_mm.c5
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c28
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c546
-rw-r--r--drivers/gpu/drm/i915/i915_perf.h32
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c294
-rw-r--r--drivers/gpu/drm/i915/i915_priolist_types.h25
-rw-r--r--drivers/gpu/drm/i915/i915_query.c5
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h131
-rw-r--r--drivers/gpu/drm/i915/i915_request.c256
-rw-r--r--drivers/gpu/drm/i915/i915_request.h5
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c3
-rw-r--r--drivers/gpu/drm/i915/i915_selftest.h2
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c3
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.h14
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c31
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.h11
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence_work.c95
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence_work.h44
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.h14
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h12
-rw-r--r--drivers/gpu/drm/i915/i915_utils.c78
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h59
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c4
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c80
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h29
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c7
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c5
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h1
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_pch.c201
-rw-r--r--drivers/gpu/drm/i915/intel_pch.h73
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c19
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c1
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c4
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c101
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h30
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.c80
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.h73
-rw-r--r--drivers/gpu/drm/i915/intel_wopcm.c270
-rw-r--r--drivers/gpu/drm/i915/intel_wopcm.h20
-rw-r--r--drivers/gpu/drm/i915/oa/Makefile7
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_bdw.c35
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_bdw.h7
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_bxt.c35
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_bxt.h7
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c35
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h7
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c35
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h7
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_chv.c35
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_chv.h7
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_cnl.c35
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_cnl.h7
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_glk.c35
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_glk.h7
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_hsw.c35
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_hsw.h7
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_icl.c35
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_icl.h7
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c35
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h7
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c35
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h7
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c35
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h7
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c35
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h7
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c35
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h7
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_active.c3
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_buddy.c720
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c19
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h6
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_mock_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c46
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c30
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.h6
-rw-r--r--drivers/gpu/drm/i915/selftests/lib_sw_fence.c1
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c1
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_request.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_request.h4
-rw-r--r--drivers/gpu/drm/imx/Makefile1
-rw-r--r--drivers/gpu/drm/imx/dw_hdmi-imx.c2
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c11
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c11
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c8
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_fb.c2
-rw-r--r--drivers/gpu/drm/meson/meson_crtc.c19
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c40
-rw-r--r--drivers/gpu/drm/meson/meson_drv.h13
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c4
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.h12
-rw-r--r--drivers/gpu/drm/meson/meson_overlay.c2
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c12
-rw-r--r--drivers/gpu/drm/meson/meson_registers.h136
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.c71
-rw-r--r--drivers/gpu/drm/meson/meson_venc.c171
-rw-r--r--drivers/gpu/drm/meson/meson_venc_cvbs.c13
-rw-r--r--drivers/gpu/drm/meson/meson_viu.c92
-rw-r--r--drivers/gpu/drm/meson/meson_vpp.c35
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c18
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c2
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c2
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_out.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/arb.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c54
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/cursor.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dac.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.h1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.h1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/atom.h14
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base507c.c26
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base827c.c11
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base907c.c65
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base917c.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec37d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c43
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.c18
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/ovly507e.c3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/ovly827e.c3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/ovly907e.c13
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/ovly917e.c5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c111
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.h10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c61
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c72
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/extdev.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c77
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_crtc.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c31
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ioc32.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvif/mmu.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c188
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c3
-rw-r--r--drivers/gpu/drm/omapdrm/displays/Kconfig38
-rw-r--r--drivers/gpu/drm/omapdrm/displays/Makefile6
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c251
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c271
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c262
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c755
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c390
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c513
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c11
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c7
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c9
-rw-r--r--drivers/gpu/drm/panel/Kconfig46
-rw-r--r--drivers/gpu/drm/panel/Makefile6
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9322.c34
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lb035q02.c237
-rw-r--r--drivers/gpu/drm/panel/panel-nec-nl8048hl11.c248
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c226
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c64
-rw-r--r--drivers/gpu/drm/panel/panel-sony-acx565akm.c701
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td028ttec1.c399
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td043mtea1.c509
-rw-r--r--drivers/gpu/drm/panfrost/Makefile1
-rw-r--r--drivers/gpu/drm/panfrost/TODO15
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c13
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.h1
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.c18
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h29
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c139
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c140
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.h23
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c107
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c27
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c413
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.h9
-rw-r--r--drivers/gpu/drm/pl111/pl111_display.c29
-rw-r--r--drivers/gpu/drm/qxl/qxl_debugfs.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c6
-rw-r--r--drivers/gpu/drm/radeon/cik.c2
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen_dma.c2
-rw-r--r--drivers/gpu/drm/radeon/r100.c2
-rw-r--r--drivers/gpu/drm/radeon/r200.c2
-rw-r--r--drivers/gpu/drm/radeon/r600.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_dma.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h8
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h18
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_mn.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_sync.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c2
-rw-r--r--drivers/gpu/drm/radeon/rv770_dma.c2
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c8
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c54
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h2
-rw-r--r--drivers/gpu/drm/tegra/dc.c13
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c5
-rw-r--r--drivers/gpu/drm/tegra/drm.c8
-rw-r--r--drivers/gpu/drm/tegra/drm.h3
-rw-r--r--drivers/gpu/drm/tegra/dsi.c8
-rw-r--r--drivers/gpu/drm/tegra/fb.c6
-rw-r--r--drivers/gpu/drm/tegra/gem.c3
-rw-r--r--drivers/gpu/drm/tegra/gem.h1
-rw-r--r--drivers/gpu/drm/tegra/gr2d.c1
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c5
-rw-r--r--drivers/gpu/drm/tegra/hub.c3
-rw-r--r--drivers/gpu/drm/tegra/hub.h1
-rw-r--r--drivers/gpu/drm/tegra/plane.c1
-rw-r--r--drivers/gpu/drm/tegra/sor.c3
-rw-r--r--drivers/gpu/drm/tegra/vic.c1
-rw-r--r--drivers/gpu/drm/tiny/gm12u320.c44
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c118
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c16
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c20
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c2
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c4
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c6
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c16
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_lock.c100
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_lock.h32
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_object.h7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.h3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_blit.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c180
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h126
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.h5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c33
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c62
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.h3
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_kms.c2
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c16
-rw-r--r--drivers/gpu/ipu-v3/ipu-cpmem.c26
-rw-r--r--drivers/gpu/ipu-v3/ipu-image-convert.c230
-rw-r--r--drivers/video/fbdev/aty/aty128fb.c18
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c29
-rw-r--r--drivers/video/fbdev/aty/radeon_base.c6
-rw-r--r--drivers/video/fbdev/efifb.c27
-rw-r--r--drivers/video/fbdev/mmp/core.c6
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/Kconfig5
-rw-r--r--drivers/video/fbdev/pvr2fb.c6
-rw-r--r--drivers/video/fbdev/sh_mobile_lcdcfb.c2
-rw-r--r--drivers/video/fbdev/sm712fb.c8
-rw-r--r--drivers/video/fbdev/udlfb.c2
-rw-r--r--drivers/video/fbdev/via/via-core.c43
-rw-r--r--include/drm/bridge/dw_hdmi.h2
-rw-r--r--include/drm/drmP.h2
-rw-r--r--include/drm/drm_connector.h4
-rw-r--r--include/drm/drm_gem.h8
-rw-r--r--include/drm/drm_gem_shmem_helper.h15
-rw-r--r--include/drm/drm_panel.h183
-rw-r--r--include/drm/i915_pciids.h5
-rw-r--r--include/drm/ttm/ttm_bo_api.h12
-rw-r--r--include/drm/ttm/ttm_bo_driver.h14
-rw-r--r--include/linux/amba/clcd-regs.h1
-rw-r--r--include/linux/dma-buf.h4
-rw-r--r--include/linux/dma-fence.h34
-rw-r--r--include/linux/dma-resv.h (renamed from include/linux/reservation.h)119
-rw-r--r--include/uapi/drm/etnaviv_drm.h10
-rw-r--r--include/uapi/drm/panfrost_drm.h25
655 files changed, 20223 insertions, 13354 deletions
diff --git a/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.txt b/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.txt
deleted file mode 100644
index 3a50a7862cf3..000000000000
--- a/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.txt
+++ /dev/null
@@ -1,119 +0,0 @@
-Amlogic specific extensions to the Synopsys Designware HDMI Controller
-======================================================================
-
-The Amlogic Meson Synopsys Designware Integration is composed of :
-- A Synopsys DesignWare HDMI Controller IP
-- A TOP control block controlling the Clocks and PHY
-- A custom HDMI PHY in order to convert video to TMDS signal
- ___________________________________
-| HDMI TOP |<= HPD
-|___________________________________|
-| | |
-| Synopsys HDMI | HDMI PHY |=> TMDS
-| Controller |________________|
-|___________________________________|<=> DDC
-
-The HDMI TOP block only supports HPD sensing.
-The Synopsys HDMI Controller interrupt is routed through the
-TOP Block interrupt.
-Communication to the TOP Block and the Synopsys HDMI Controller is done
-via a pair of dedicated addr+read/write registers.
-The HDMI PHY is configured by registers in the HHI register block.
-
-Pixel data arrives in 4:4:4 format from the VENC block and the VPU HDMI mux
-selects either the ENCI encoder for the 576i or 480i formats or the ENCP
-encoder for all the other formats including interlaced HD formats.
-
-The VENC uses a DVI encoder on top of the ENCI or ENCP encoders to generate
-DVI timings for the HDMI controller.
-
-Amlogic Meson GXBB, GXL and GXM SoCs families embeds the Synopsys DesignWare
-HDMI TX IP version 2.01a with HDCP and I2C & S/PDIF
-audio source interfaces.
-
-Required properties:
-- compatible: value should be different for each SoC family as :
- - GXBB (S905) : "amlogic,meson-gxbb-dw-hdmi"
- - GXL (S905X, S905D) : "amlogic,meson-gxl-dw-hdmi"
- - GXM (S912) : "amlogic,meson-gxm-dw-hdmi"
- followed by the common "amlogic,meson-gx-dw-hdmi"
- - G12A (S905X2, S905Y2, S905D2) : "amlogic,meson-g12a-dw-hdmi"
-- reg: Physical base address and length of the controller's registers.
-- interrupts: The HDMI interrupt number
-- clocks, clock-names : must have the phandles to the HDMI iahb and isfr clocks,
- and the Amlogic Meson venci clocks as described in
- Documentation/devicetree/bindings/clock/clock-bindings.txt,
- the clocks are soc specific, the clock-names should be "iahb", "isfr", "venci"
-- resets, resets-names: must have the phandles to the HDMI apb, glue and phy
- resets as described in :
- Documentation/devicetree/bindings/reset/reset.txt,
- the reset-names should be "hdmitx_apb", "hdmitx", "hdmitx_phy"
-
-Optional properties:
-- hdmi-supply: Optional phandle to an external 5V regulator to power the HDMI
- logic, as described in the file ../regulator/regulator.txt
-
-Required nodes:
-
-The connections to the HDMI ports are modeled using the OF graph
-bindings specified in Documentation/devicetree/bindings/graph.txt.
-
-The following table lists for each supported model the port number
-corresponding to each HDMI output and input.
-
- Port 0 Port 1
------------------------------------------
- S905 (GXBB) VENC Input TMDS Output
- S905X (GXL) VENC Input TMDS Output
- S905D (GXL) VENC Input TMDS Output
- S912 (GXM) VENC Input TMDS Output
- S905X2 (G12A) VENC Input TMDS Output
- S905Y2 (G12A) VENC Input TMDS Output
- S905D2 (G12A) VENC Input TMDS Output
-
-Example:
-
-hdmi-connector {
- compatible = "hdmi-connector";
- type = "a";
-
- port {
- hdmi_connector_in: endpoint {
- remote-endpoint = <&hdmi_tx_tmds_out>;
- };
- };
-};
-
-hdmi_tx: hdmi-tx@c883a000 {
- compatible = "amlogic,meson-gxbb-dw-hdmi", "amlogic,meson-gx-dw-hdmi";
- reg = <0x0 0xc883a000 0x0 0x1c>;
- interrupts = <GIC_SPI 57 IRQ_TYPE_EDGE_RISING>;
- resets = <&reset RESET_HDMITX_CAPB3>,
- <&reset RESET_HDMI_SYSTEM_RESET>,
- <&reset RESET_HDMI_TX>;
- reset-names = "hdmitx_apb", "hdmitx", "hdmitx_phy";
- clocks = <&clkc CLKID_HDMI_PCLK>,
- <&clkc CLKID_CLK81>,
- <&clkc CLKID_GCLK_VENCI_INT0>;
- clock-names = "isfr", "iahb", "venci";
- #address-cells = <1>;
- #size-cells = <0>;
-
- /* VPU VENC Input */
- hdmi_tx_venc_port: port@0 {
- reg = <0>;
-
- hdmi_tx_in: endpoint {
- remote-endpoint = <&hdmi_tx_out>;
- };
- };
-
- /* TMDS Output */
- hdmi_tx_tmds_port: port@1 {
- reg = <1>;
-
- hdmi_tx_tmds_out: endpoint {
- remote-endpoint = <&hdmi_connector_in>;
- };
- };
-};
diff --git a/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml b/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml
new file mode 100644
index 000000000000..fb747682006d
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml
@@ -0,0 +1,150 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2019 BayLibre, SAS
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/display/amlogic,meson-dw-hdmi.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Amlogic specific extensions to the Synopsys Designware HDMI Controller
+
+maintainers:
+ - Neil Armstrong <[email protected]>
+
+description: |
+ The Amlogic Meson Synopsys Designware Integration is composed of
+ - A Synopsys DesignWare HDMI Controller IP
+ - A TOP control block controlling the Clocks and PHY
+ - A custom HDMI PHY in order to convert video to TMDS signal
+ ___________________________________
+ | HDMI TOP |<= HPD
+ |___________________________________|
+ | | |
+ | Synopsys HDMI | HDMI PHY |=> TMDS
+ | Controller |________________|
+ |___________________________________|<=> DDC
+
+ The HDMI TOP block only supports HPD sensing.
+ The Synopsys HDMI Controller interrupt is routed through the
+ TOP Block interrupt.
+ Communication to the TOP Block and the Synopsys HDMI Controller is done
+ via a pair of dedicated addr+read/write registers.
+ The HDMI PHY is configured by registers in the HHI register block.
+
+ Pixel data arrives in "4:4:4" format from the VENC block and the VPU HDMI mux
+ selects either the ENCI encoder for the 576i or 480i formats or the ENCP
+ encoder for all the other formats including interlaced HD formats.
+
+ The VENC uses a DVI encoder on top of the ENCI or ENCP encoders to generate
+ DVI timings for the HDMI controller.
+
+ Amlogic Meson GXBB, GXL and GXM SoCs families embeds the Synopsys DesignWare
+ HDMI TX IP version 2.01a with HDCP and I2C & S/PDIF
+ audio source interfaces.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - amlogic,meson-gxbb-dw-hdmi # GXBB (S905)
+ - amlogic,meson-gxl-dw-hdmi # GXL (S905X, S905D)
+ - amlogic,meson-gxm-dw-hdmi # GXM (S912)
+ - const: amlogic,meson-gx-dw-hdmi
+ - enum:
+ - amlogic,meson-g12a-dw-hdmi # G12A (S905X2, S905Y2, S905D2)
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ minItems: 3
+
+ clock-names:
+ items:
+ - const: isfr
+ - const: iahb
+ - const: venci
+
+ resets:
+ minItems: 3
+
+ reset-names:
+ items:
+ - const: hdmitx_apb
+ - const: hdmitx
+ - const: hdmitx_phy
+
+ hdmi-supply:
+ description: phandle to an external 5V regulator to power the HDMI logic
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/phandle
+
+ port@0:
+ type: object
+ description:
+ A port node pointing to the VENC Input port node.
+
+ port@1:
+ type: object
+ description:
+ A port node pointing to the TMDS Output port node.
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+ "#sound-dai-cells":
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - resets
+ - reset-names
+ - port@0
+ - port@1
+ - "#address-cells"
+ - "#size-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ hdmi_tx: hdmi-tx@c883a000 {
+ compatible = "amlogic,meson-gxbb-dw-hdmi", "amlogic,meson-gx-dw-hdmi";
+ reg = <0xc883a000 0x1c>;
+ interrupts = <57>;
+ resets = <&reset_apb>, <&reset_hdmitx>, <&reset_hdmitx_phy>;
+ reset-names = "hdmitx_apb", "hdmitx", "hdmitx_phy";
+ clocks = <&clk_isfr>, <&clk_iahb>, <&clk_venci>;
+ clock-names = "isfr", "iahb", "venci";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* VPU VENC Input */
+ hdmi_tx_venc_port: port@0 {
+ reg = <0>;
+
+ hdmi_tx_in: endpoint {
+ remote-endpoint = <&hdmi_tx_out>;
+ };
+ };
+
+ /* TMDS Output */
+ hdmi_tx_tmds_port: port@1 {
+ reg = <1>;
+
+ hdmi_tx_tmds_out: endpoint {
+ remote-endpoint = <&hdmi_connector_in>;
+ };
+ };
+ };
+
diff --git a/Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt b/Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt
deleted file mode 100644
index be40a780501c..000000000000
--- a/Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt
+++ /dev/null
@@ -1,121 +0,0 @@
-Amlogic Meson Display Controller
-================================
-
-The Amlogic Meson Display controller is composed of several components
-that are going to be documented below:
-
-DMC|---------------VPU (Video Processing Unit)----------------|------HHI------|
- | vd1 _______ _____________ _________________ | |
-D |-------| |----| | | | | HDMI PLL |
-D | vd2 | VIU | | Video Post | | Video Encoders |<---|-----VCLK |
-R |-------| |----| Processing | | | | |
- | osd2 | | | |---| Enci ----------|----|-----VDAC------|
-R |-------| CSC |----| Scalers | | Encp ----------|----|----HDMI-TX----|
-A | osd1 | | | Blenders | | Encl ----------|----|---------------|
-M |-------|______|----|____________| |________________| | |
-___|__________________________________________________________|_______________|
-
-
-VIU: Video Input Unit
----------------------
-
-The Video Input Unit is in charge of the pixel scanout from the DDR memory.
-It fetches the frames addresses, stride and parameters from the "Canvas" memory.
-This part is also in charge of the CSC (Colorspace Conversion).
-It can handle 2 OSD Planes and 2 Video Planes.
-
-VPP: Video Post Processing
---------------------------
-
-The Video Post Processing is in charge of the scaling and blending of the
-various planes into a single pixel stream.
-There is a special "pre-blending" used by the video planes with a dedicated
-scaler and a "post-blending" to merge with the OSD Planes.
-The OSD planes also have a dedicated scaler for one of the OSD.
-
-VENC: Video Encoders
---------------------
-
-The VENC is composed of the multiple pixel encoders :
- - ENCI : Interlace Video encoder for CVBS and Interlace HDMI
- - ENCP : Progressive Video Encoder for HDMI
- - ENCL : LCD LVDS Encoder
-The VENC Unit gets a Pixel Clocks (VCLK) from a dedicated HDMI PLL and clock
-tree and provides the scanout clock to the VPP and VIU.
-The ENCI is connected to a single VDAC for Composite Output.
-The ENCI and ENCP are connected to an on-chip HDMI Transceiver.
-
-Device Tree Bindings:
----------------------
-
-VPU: Video Processing Unit
---------------------------
-
-Required properties:
-- compatible: value should be different for each SoC family as :
- - GXBB (S905) : "amlogic,meson-gxbb-vpu"
- - GXL (S905X, S905D) : "amlogic,meson-gxl-vpu"
- - GXM (S912) : "amlogic,meson-gxm-vpu"
- followed by the common "amlogic,meson-gx-vpu"
- - G12A (S905X2, S905Y2, S905D2) : "amlogic,meson-g12a-vpu"
-- reg: base address and size of he following memory-mapped regions :
- - vpu
- - hhi
-- reg-names: should contain the names of the previous memory regions
-- interrupts: should contain the VENC Vsync interrupt number
-- amlogic,canvas: phandle to canvas provider node as described in the file
- ../soc/amlogic/amlogic,canvas.txt
-
-Optional properties:
-- power-domains: Optional phandle to associated power domain as described in
- the file ../power/power_domain.txt
-
-Required nodes:
-
-The connections to the VPU output video ports are modeled using the OF graph
-bindings specified in Documentation/devicetree/bindings/graph.txt.
-
-The following table lists for each supported model the port number
-corresponding to each VPU output.
-
- Port 0 Port 1
------------------------------------------
- S905 (GXBB) CVBS VDAC HDMI-TX
- S905X (GXL) CVBS VDAC HDMI-TX
- S905D (GXL) CVBS VDAC HDMI-TX
- S912 (GXM) CVBS VDAC HDMI-TX
- S905X2 (G12A) CVBS VDAC HDMI-TX
- S905Y2 (G12A) CVBS VDAC HDMI-TX
- S905D2 (G12A) CVBS VDAC HDMI-TX
-
-Example:
-
-tv-connector {
- compatible = "composite-video-connector";
-
- port {
- tv_connector_in: endpoint {
- remote-endpoint = <&cvbs_vdac_out>;
- };
- };
-};
-
-vpu: vpu@d0100000 {
- compatible = "amlogic,meson-gxbb-vpu";
- reg = <0x0 0xd0100000 0x0 0x100000>,
- <0x0 0xc883c000 0x0 0x1000>,
- <0x0 0xc8838000 0x0 0x1000>;
- reg-names = "vpu", "hhi", "dmc";
- interrupts = <GIC_SPI 3 IRQ_TYPE_EDGE_RISING>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- /* CVBS VDAC output port */
- port@0 {
- reg = <0>;
-
- cvbs_vdac_out: endpoint {
- remote-endpoint = <&tv_connector_in>;
- };
- };
-};
diff --git a/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml b/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml
new file mode 100644
index 000000000000..d1205a6697a0
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml
@@ -0,0 +1,137 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2019 BayLibre, SAS
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/display/amlogic,meson-vpu.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Amlogic Meson Display Controller
+
+maintainers:
+ - Neil Armstrong <[email protected]>
+
+description: |
+ The Amlogic Meson Display controller is composed of several components
+ that are going to be documented below
+
+ DMC|---------------VPU (Video Processing Unit)----------------|------HHI------|
+ | vd1 _______ _____________ _________________ | |
+ D |-------| |----| | | | | HDMI PLL |
+ D | vd2 | VIU | | Video Post | | Video Encoders |<---|-----VCLK |
+ R |-------| |----| Processing | | | | |
+ | osd2 | | | |---| Enci ----------|----|-----VDAC------|
+ R |-------| CSC |----| Scalers | | Encp ----------|----|----HDMI-TX----|
+ A | osd1 | | | Blenders | | Encl ----------|----|---------------|
+ M |-------|______|----|____________| |________________| | |
+ ___|__________________________________________________________|_______________|
+
+
+ VIU: Video Input Unit
+ ---------------------
+
+ The Video Input Unit is in charge of the pixel scanout from the DDR memory.
+ It fetches the frames addresses, stride and parameters from the "Canvas" memory.
+ This part is also in charge of the CSC (Colorspace Conversion).
+ It can handle 2 OSD Planes and 2 Video Planes.
+
+ VPP: Video Post Processing
+ --------------------------
+
+ The Video Post Processing is in charge of the scaling and blending of the
+ various planes into a single pixel stream.
+ There is a special "pre-blending" used by the video planes with a dedicated
+ scaler and a "post-blending" to merge with the OSD Planes.
+ The OSD planes also have a dedicated scaler for one of the OSD.
+
+ VENC: Video Encoders
+ --------------------
+
+ The VENC is composed of the multiple pixel encoders
+ - ENCI : Interlace Video encoder for CVBS and Interlace HDMI
+ - ENCP : Progressive Video Encoder for HDMI
+ - ENCL : LCD LVDS Encoder
+ The VENC Unit gets a Pixel Clocks (VCLK) from a dedicated HDMI PLL and clock
+ tree and provides the scanout clock to the VPP and VIU.
+ The ENCI is connected to a single VDAC for Composite Output.
+ The ENCI and ENCP are connected to an on-chip HDMI Transceiver.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - amlogic,meson-gxbb-vpu # GXBB (S905)
+ - amlogic,meson-gxl-vpu # GXL (S905X, S905D)
+ - amlogic,meson-gxm-vpu # GXM (S912)
+ - const: amlogic,meson-gx-vpu
+ - enum:
+ - amlogic,meson-g12a-vpu # G12A (S905X2, S905Y2, S905D2)
+
+ reg:
+ maxItems: 2
+
+ reg-names:
+ items:
+ - const: vpu
+ - const: hhi
+
+ interrupts:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+ description: phandle to the associated power domain
+
+ port@0:
+ type: object
+ description:
+ A port node pointing to the CVBS VDAC port node.
+
+ port@1:
+ type: object
+ description:
+ A port node pointing to the HDMI-TX port node.
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - port@0
+ - port@1
+ - "#address-cells"
+ - "#size-cells"
+
+examples:
+ - |
+ vpu: vpu@d0100000 {
+ compatible = "amlogic,meson-gxbb-vpu", "amlogic,meson-gx-vpu";
+ reg = <0xd0100000 0x100000>, <0xc883c000 0x1000>;
+ reg-names = "vpu", "hhi";
+ interrupts = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* CVBS VDAC output port */
+ port@0 {
+ reg = <0>;
+
+ cvbs_vdac_out: endpoint {
+ remote-endpoint = <&tv_connector_in>;
+ };
+ };
+
+ /* HDMI TX output port */
+ port@1 {
+ reg = <1>;
+
+ hdmi_tx_out: endpoint {
+ remote-endpoint = <&hdmi_tx_in>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/connector/hdmi-connector.txt b/Documentation/devicetree/bindings/display/connector/hdmi-connector.txt
index 508aee461e0d..aeb07c4bd703 100644
--- a/Documentation/devicetree/bindings/display/connector/hdmi-connector.txt
+++ b/Documentation/devicetree/bindings/display/connector/hdmi-connector.txt
@@ -9,6 +9,7 @@ Optional properties:
- label: a symbolic name for the connector
- hpd-gpios: HPD GPIO number
- ddc-i2c-bus: phandle link to the I2C controller used for DDC EDID probing
+- ddc-en-gpios: signal to enable DDC bus
Required nodes:
- Video port for HDMI input
diff --git a/Documentation/devicetree/bindings/display/panel/nec,nl8048hl11.yaml b/Documentation/devicetree/bindings/display/panel/nec,nl8048hl11.yaml
new file mode 100644
index 000000000000..aa788eaa2f71
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/nec,nl8048hl11.yaml
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/nec,nl8048hl11.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NEC NL8048HL11 4.1" WVGA TFT LCD panel
+
+description:
+ The NEC NL8048HL11 is a 4.1" WVGA TFT LCD panel with a 24-bit RGB parallel
+ data interface and an SPI control interface.
+
+maintainers:
+ - Laurent Pinchart <[email protected]>
+
+allOf:
+ - $ref: panel-common.yaml#
+
+properties:
+ compatible:
+ const: nec,nl8048hl11
+
+ label: true
+ port: true
+ reg: true
+ reset-gpios: true
+
+ spi-max-frequency:
+ maximum: 10000000
+
+required:
+ - compatible
+ - reg
+ - reset-gpios
+ - port
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ spi0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ lcd_panel: panel@0 {
+ compatible = "nec,nl8048hl11";
+ reg = <0>;
+ spi-max-frequency = <10000000>;
+
+ reset-gpios = <&gpio7 7 GPIO_ACTIVE_LOW>;
+
+ port {
+ lcd_in: endpoint {
+ remote-endpoint = <&dpi_out>;
+ };
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/ti,nspire.yaml b/Documentation/devicetree/bindings/display/panel/ti,nspire.yaml
new file mode 100644
index 000000000000..5c5a3b519e31
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/ti,nspire.yaml
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/ti,nspire.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Texas Instruments NSPIRE Display Panels
+
+maintainers:
+ - Linus Walleij <[email protected]>
+
+allOf:
+ - $ref: panel-common.yaml#
+
+properties:
+ compatible:
+ enum:
+ - ti,nspire-cx-lcd-panel
+ - ti,nspire-classic-lcd-panel
+ port: true
+
+required:
+ - compatible
+
+additionalProperties: false
+
+examples:
+ - |
+ panel {
+ compatible = "ti,nspire-cx-lcd-panel";
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&pads>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/rockchip/dw_mipi_dsi_rockchip.txt b/Documentation/devicetree/bindings/display/rockchip/dw_mipi_dsi_rockchip.txt
index 6bb59ab39f2f..ce4c1fc9116c 100644
--- a/Documentation/devicetree/bindings/display/rockchip/dw_mipi_dsi_rockchip.txt
+++ b/Documentation/devicetree/bindings/display/rockchip/dw_mipi_dsi_rockchip.txt
@@ -14,6 +14,8 @@ Required properties:
- rockchip,grf: this soc should set GRF regs to mux vopl/vopb.
- ports: contain a port node with endpoint definitions as defined in [2].
For vopb,set the reg = <0> and set the reg = <1> for vopl.
+- video port 0 for the VOP input, the remote endpoint maybe vopb or vopl
+- video port 1 for either a panel or subsequent encoder
Optional properties:
- power-domains: a phandle to mipi dsi power domain node.
@@ -40,11 +42,12 @@ Example:
ports {
#address-cells = <1>;
#size-cells = <0>;
- reg = <1>;
- mipi_in: port {
+ mipi_in: port@0 {
+ reg = <0>;
#address-cells = <1>;
#size-cells = <0>;
+
mipi_in_vopb: endpoint@0 {
reg = <0>;
remote-endpoint = <&vopb_out_mipi>;
@@ -54,6 +57,16 @@ Example:
remote-endpoint = <&vopl_out_mipi>;
};
};
+
+ mipi_out: port@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mipi_out_panel: endpoint {
+ remote-endpoint = <&panel_in_mipi>;
+ };
+ };
};
panel {
@@ -64,5 +77,11 @@ Example:
pinctrl-names = "default";
pinctrl-0 = <&lcd_en>;
backlight = <&backlight>;
+
+ port {
+ panel_in_mipi: endpoint {
+ remote-endpoint = <&mipi_out_panel>;
+ };
+ };
};
};
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index 6992bbbbffab..29dcc6f8a64a 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -511,6 +511,8 @@ patternProperties:
description: Lenovo Group Ltd.
"^lg,.*":
description: LG Corporation
+ "^lgphilips,.*":
+ description: LG Display
"^libretech,.*":
description: Shenzhen Libre Technology Co., Ltd
"^licheepi,.*":
@@ -933,6 +935,9 @@ patternProperties:
description: Tecon Microprocessor Technologies, LLC.
"^topeet,.*":
description: Topeet
+ "^toppoly,.*":
+ description: TPO (deprecated, use tpo)
+ deprecated: true
"^toradex,.*":
description: Toradex AG
"^toshiba,.*":
diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst
index 0e322688be5c..3415255ad3dc 100644
--- a/Documentation/gpu/i915.rst
+++ b/Documentation/gpu/i915.rst
@@ -91,9 +91,6 @@ Frontbuffer Tracking
.. kernel-doc:: drivers/gpu/drm/i915/display/intel_frontbuffer.c
:internal:
-.. kernel-doc:: drivers/gpu/drm/i915/i915_gem.c
- :functions: i915_gem_track_fb
-
Display FIFO Underrun Reporting
-------------------------------
diff --git a/MAINTAINERS b/MAINTAINERS
index 1bd7b9c2d146..c2d975da561f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5334,8 +5334,8 @@ L: [email protected]
W: http://linux-meson.com/
S: Supported
F: drivers/gpu/drm/meson/
-F: Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt
-F: Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.txt
+F: Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml
+F: Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml
F: Documentation/gpu/meson.rst
T: git git://anongit.freedesktop.org/drm/drm-misc
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index b6a9c2f1bc41..a23b6752d11a 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -39,4 +39,9 @@ config UDMABUF
A driver to let userspace turn memfd regions into dma-bufs.
Qemu can use this to create host dmabufs for guest framebuffers.
+config DMABUF_SELFTESTS
+ tristate "Selftests for the dma-buf interfaces"
+ default n
+ depends on DMA_SHARED_BUFFER
+
endmenu
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index e8c7310cb800..03479da06422 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -1,6 +1,12 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \
- reservation.o seqno-fence.o
+ dma-resv.o seqno-fence.o
obj-$(CONFIG_SYNC_FILE) += sync_file.o
obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o
obj-$(CONFIG_UDMABUF) += udmabuf.o
+
+dmabuf_selftests-y := \
+ selftest.o \
+ st-dma-fence.o
+
+obj-$(CONFIG_DMABUF_SELFTESTS) += dmabuf_selftests.o
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index f45bfb29ef96..433d91d710e4 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -21,7 +21,7 @@
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/poll.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
#include <linux/mm.h>
#include <linux/mount.h>
#include <linux/pseudo_fs.h>
@@ -104,8 +104,8 @@ static int dma_buf_release(struct inode *inode, struct file *file)
list_del(&dmabuf->list_node);
mutex_unlock(&db_list.lock);
- if (dmabuf->resv == (struct reservation_object *)&dmabuf[1])
- reservation_object_fini(dmabuf->resv);
+ if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
+ dma_resv_fini(dmabuf->resv);
module_put(dmabuf->owner);
kfree(dmabuf);
@@ -165,7 +165,7 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
* To support cross-device and cross-driver synchronization of buffer access
* implicit fences (represented internally in the kernel with &struct fence) can
* be attached to a &dma_buf. The glue for that and a few related things are
- * provided in the &reservation_object structure.
+ * provided in the &dma_resv structure.
*
* Userspace can query the state of these implicitly tracked fences using poll()
* and related system calls:
@@ -195,8 +195,8 @@ static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
{
struct dma_buf *dmabuf;
- struct reservation_object *resv;
- struct reservation_object_list *fobj;
+ struct dma_resv *resv;
+ struct dma_resv_list *fobj;
struct dma_fence *fence_excl;
__poll_t events;
unsigned shared_count, seq;
@@ -506,13 +506,13 @@ err_alloc_file:
struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
{
struct dma_buf *dmabuf;
- struct reservation_object *resv = exp_info->resv;
+ struct dma_resv *resv = exp_info->resv;
struct file *file;
size_t alloc_size = sizeof(struct dma_buf);
int ret;
if (!exp_info->resv)
- alloc_size += sizeof(struct reservation_object);
+ alloc_size += sizeof(struct dma_resv);
else
/* prevent &dma_buf[1] == dma_buf->resv */
alloc_size += 1;
@@ -544,8 +544,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
if (!resv) {
- resv = (struct reservation_object *)&dmabuf[1];
- reservation_object_init(resv);
+ resv = (struct dma_resv *)&dmabuf[1];
+ dma_resv_init(resv);
}
dmabuf->resv = resv;
@@ -909,11 +909,11 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
{
bool write = (direction == DMA_BIDIRECTIONAL ||
direction == DMA_TO_DEVICE);
- struct reservation_object *resv = dmabuf->resv;
+ struct dma_resv *resv = dmabuf->resv;
long ret;
/* Wait on any implicit rendering fences */
- ret = reservation_object_wait_timeout_rcu(resv, write, true,
+ ret = dma_resv_wait_timeout_rcu(resv, write, true,
MAX_SCHEDULE_TIMEOUT);
if (ret < 0)
return ret;
@@ -1154,8 +1154,8 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
int ret;
struct dma_buf *buf_obj;
struct dma_buf_attachment *attach_obj;
- struct reservation_object *robj;
- struct reservation_object_list *fobj;
+ struct dma_resv *robj;
+ struct dma_resv_list *fobj;
struct dma_fence *fence;
unsigned seq;
int count = 0, attach_count, shared_count, i;
diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c
index 12c6f64c0bc2..d3fbd950be94 100644
--- a/drivers/dma-buf/dma-fence-array.c
+++ b/drivers/dma-buf/dma-fence-array.c
@@ -13,6 +13,8 @@
#include <linux/slab.h>
#include <linux/dma-fence-array.h>
+#define PENDING_ERROR 1
+
static const char *dma_fence_array_get_driver_name(struct dma_fence *fence)
{
return "dma_fence_array";
@@ -23,10 +25,29 @@ static const char *dma_fence_array_get_timeline_name(struct dma_fence *fence)
return "unbound";
}
+static void dma_fence_array_set_pending_error(struct dma_fence_array *array,
+ int error)
+{
+ /*
+ * Propagate the first error reported by any of our fences, but only
+ * before we ourselves are signaled.
+ */
+ if (error)
+ cmpxchg(&array->base.error, PENDING_ERROR, error);
+}
+
+static void dma_fence_array_clear_pending_error(struct dma_fence_array *array)
+{
+ /* Clear the error flag if not actually set. */
+ cmpxchg(&array->base.error, PENDING_ERROR, 0);
+}
+
static void irq_dma_fence_array_work(struct irq_work *wrk)
{
struct dma_fence_array *array = container_of(wrk, typeof(*array), work);
+ dma_fence_array_clear_pending_error(array);
+
dma_fence_signal(&array->base);
dma_fence_put(&array->base);
}
@@ -38,6 +59,8 @@ static void dma_fence_array_cb_func(struct dma_fence *f,
container_of(cb, struct dma_fence_array_cb, cb);
struct dma_fence_array *array = array_cb->array;
+ dma_fence_array_set_pending_error(array, f->error);
+
if (atomic_dec_and_test(&array->num_pending))
irq_work_queue(&array->work);
else
@@ -63,9 +86,14 @@ static bool dma_fence_array_enable_signaling(struct dma_fence *fence)
dma_fence_get(&array->base);
if (dma_fence_add_callback(array->fences[i], &cb[i].cb,
dma_fence_array_cb_func)) {
+ int error = array->fences[i]->error;
+
+ dma_fence_array_set_pending_error(array, error);
dma_fence_put(&array->base);
- if (atomic_dec_and_test(&array->num_pending))
+ if (atomic_dec_and_test(&array->num_pending)) {
+ dma_fence_array_clear_pending_error(array);
return false;
+ }
}
}
@@ -142,6 +170,8 @@ struct dma_fence_array *dma_fence_array_create(int num_fences,
atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences);
array->fences = fences;
+ array->base.error = PENDING_ERROR;
+
return array;
}
EXPORT_SYMBOL(dma_fence_array_create);
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 59ac96ec7ba8..2c136aee3e79 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -60,7 +60,7 @@ static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(1);
*
* - Then there's also implicit fencing, where the synchronization points are
* implicitly passed around as part of shared &dma_buf instances. Such
- * implicit fences are stored in &struct reservation_object through the
+ * implicit fences are stored in &struct dma_resv through the
* &dma_buf.resv pointer.
*/
@@ -129,31 +129,27 @@ EXPORT_SYMBOL(dma_fence_context_alloc);
int dma_fence_signal_locked(struct dma_fence *fence)
{
struct dma_fence_cb *cur, *tmp;
- int ret = 0;
+ struct list_head cb_list;
lockdep_assert_held(fence->lock);
- if (WARN_ON(!fence))
+ if (unlikely(test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ &fence->flags)))
return -EINVAL;
- if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
- ret = -EINVAL;
+ /* Stash the cb_list before replacing it with the timestamp */
+ list_replace(&fence->cb_list, &cb_list);
- /*
- * we might have raced with the unlocked dma_fence_signal,
- * still run through all callbacks
- */
- } else {
- fence->timestamp = ktime_get();
- set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
- trace_dma_fence_signaled(fence);
- }
+ fence->timestamp = ktime_get();
+ set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
+ trace_dma_fence_signaled(fence);
- list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
- list_del_init(&cur->node);
+ list_for_each_entry_safe(cur, tmp, &cb_list, node) {
+ INIT_LIST_HEAD(&cur->node);
cur->func(fence, cur);
}
- return ret;
+
+ return 0;
}
EXPORT_SYMBOL(dma_fence_signal_locked);
@@ -173,28 +169,16 @@ EXPORT_SYMBOL(dma_fence_signal_locked);
int dma_fence_signal(struct dma_fence *fence)
{
unsigned long flags;
+ int ret;
if (!fence)
return -EINVAL;
- if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
- return -EINVAL;
-
- fence->timestamp = ktime_get();
- set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
- trace_dma_fence_signaled(fence);
-
- if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
- struct dma_fence_cb *cur, *tmp;
+ spin_lock_irqsave(fence->lock, flags);
+ ret = dma_fence_signal_locked(fence);
+ spin_unlock_irqrestore(fence->lock, flags);
- spin_lock_irqsave(fence->lock, flags);
- list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
- list_del_init(&cur->node);
- cur->func(fence, cur);
- }
- spin_unlock_irqrestore(fence->lock, flags);
- }
- return 0;
+ return ret;
}
EXPORT_SYMBOL(dma_fence_signal);
@@ -248,7 +232,8 @@ void dma_fence_release(struct kref *kref)
trace_dma_fence_destroy(fence);
- if (WARN(!list_empty(&fence->cb_list),
+ if (WARN(!list_empty(&fence->cb_list) &&
+ !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags),
"Fence %s:%s:%llx:%llx released with pending signals!\n",
fence->ops->get_driver_name(fence),
fence->ops->get_timeline_name(fence),
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/dma-resv.c
index ad6775b32a73..42a8f3f11681 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -32,7 +32,7 @@
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
#include <linux/export.h>
/**
@@ -56,16 +56,15 @@ const char reservation_seqcount_string[] = "reservation_seqcount";
EXPORT_SYMBOL(reservation_seqcount_string);
/**
- * reservation_object_list_alloc - allocate fence list
+ * dma_resv_list_alloc - allocate fence list
* @shared_max: number of fences we need space for
*
- * Allocate a new reservation_object_list and make sure to correctly initialize
+ * Allocate a new dma_resv_list and make sure to correctly initialize
* shared_max.
*/
-static struct reservation_object_list *
-reservation_object_list_alloc(unsigned int shared_max)
+static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max)
{
- struct reservation_object_list *list;
+ struct dma_resv_list *list;
list = kmalloc(offsetof(typeof(*list), shared[shared_max]), GFP_KERNEL);
if (!list)
@@ -78,12 +77,12 @@ reservation_object_list_alloc(unsigned int shared_max)
}
/**
- * reservation_object_list_free - free fence list
+ * dma_resv_list_free - free fence list
* @list: list to free
*
- * Free a reservation_object_list and make sure to drop all references.
+ * Free a dma_resv_list and make sure to drop all references.
*/
-static void reservation_object_list_free(struct reservation_object_list *list)
+static void dma_resv_list_free(struct dma_resv_list *list)
{
unsigned int i;
@@ -97,10 +96,10 @@ static void reservation_object_list_free(struct reservation_object_list *list)
}
/**
- * reservation_object_init - initialize a reservation object
+ * dma_resv_init - initialize a reservation object
* @obj: the reservation object
*/
-void reservation_object_init(struct reservation_object *obj)
+void dma_resv_init(struct dma_resv *obj)
{
ww_mutex_init(&obj->lock, &reservation_ww_class);
@@ -109,15 +108,15 @@ void reservation_object_init(struct reservation_object *obj)
RCU_INIT_POINTER(obj->fence, NULL);
RCU_INIT_POINTER(obj->fence_excl, NULL);
}
-EXPORT_SYMBOL(reservation_object_init);
+EXPORT_SYMBOL(dma_resv_init);
/**
- * reservation_object_fini - destroys a reservation object
+ * dma_resv_fini - destroys a reservation object
* @obj: the reservation object
*/
-void reservation_object_fini(struct reservation_object *obj)
+void dma_resv_fini(struct dma_resv *obj)
{
- struct reservation_object_list *fobj;
+ struct dma_resv_list *fobj;
struct dma_fence *excl;
/*
@@ -129,32 +128,31 @@ void reservation_object_fini(struct reservation_object *obj)
dma_fence_put(excl);
fobj = rcu_dereference_protected(obj->fence, 1);
- reservation_object_list_free(fobj);
+ dma_resv_list_free(fobj);
ww_mutex_destroy(&obj->lock);
}
-EXPORT_SYMBOL(reservation_object_fini);
+EXPORT_SYMBOL(dma_resv_fini);
/**
- * reservation_object_reserve_shared - Reserve space to add shared fences to
- * a reservation_object.
+ * dma_resv_reserve_shared - Reserve space to add shared fences to
+ * a dma_resv.
* @obj: reservation object
* @num_fences: number of fences we want to add
*
- * Should be called before reservation_object_add_shared_fence(). Must
+ * Should be called before dma_resv_add_shared_fence(). Must
* be called with obj->lock held.
*
* RETURNS
* Zero for success, or -errno
*/
-int reservation_object_reserve_shared(struct reservation_object *obj,
- unsigned int num_fences)
+int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
{
- struct reservation_object_list *old, *new;
+ struct dma_resv_list *old, *new;
unsigned int i, j, k, max;
- reservation_object_assert_held(obj);
+ dma_resv_assert_held(obj);
- old = reservation_object_get_list(obj);
+ old = dma_resv_get_list(obj);
if (old && old->shared_max) {
if ((old->shared_count + num_fences) <= old->shared_max)
@@ -166,7 +164,7 @@ int reservation_object_reserve_shared(struct reservation_object *obj,
max = 4;
}
- new = reservation_object_list_alloc(max);
+ new = dma_resv_list_alloc(max);
if (!new)
return -ENOMEM;
@@ -180,7 +178,7 @@ int reservation_object_reserve_shared(struct reservation_object *obj,
struct dma_fence *fence;
fence = rcu_dereference_protected(old->shared[i],
- reservation_object_held(obj));
+ dma_resv_held(obj));
if (dma_fence_is_signaled(fence))
RCU_INIT_POINTER(new->shared[--k], fence);
else
@@ -206,35 +204,34 @@ int reservation_object_reserve_shared(struct reservation_object *obj,
struct dma_fence *fence;
fence = rcu_dereference_protected(new->shared[i],
- reservation_object_held(obj));
+ dma_resv_held(obj));
dma_fence_put(fence);
}
kfree_rcu(old, rcu);
return 0;
}
-EXPORT_SYMBOL(reservation_object_reserve_shared);
+EXPORT_SYMBOL(dma_resv_reserve_shared);
/**
- * reservation_object_add_shared_fence - Add a fence to a shared slot
+ * dma_resv_add_shared_fence - Add a fence to a shared slot
* @obj: the reservation object
* @fence: the shared fence to add
*
* Add a fence to a shared slot, obj->lock must be held, and
- * reservation_object_reserve_shared() has been called.
+ * dma_resv_reserve_shared() has been called.
*/
-void reservation_object_add_shared_fence(struct reservation_object *obj,
- struct dma_fence *fence)
+void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
{
- struct reservation_object_list *fobj;
+ struct dma_resv_list *fobj;
struct dma_fence *old;
unsigned int i, count;
dma_fence_get(fence);
- reservation_object_assert_held(obj);
+ dma_resv_assert_held(obj);
- fobj = reservation_object_get_list(obj);
+ fobj = dma_resv_get_list(obj);
count = fobj->shared_count;
preempt_disable();
@@ -243,7 +240,7 @@ void reservation_object_add_shared_fence(struct reservation_object *obj,
for (i = 0; i < count; ++i) {
old = rcu_dereference_protected(fobj->shared[i],
- reservation_object_held(obj));
+ dma_resv_held(obj));
if (old->context == fence->context ||
dma_fence_is_signaled(old))
goto replace;
@@ -262,25 +259,24 @@ replace:
preempt_enable();
dma_fence_put(old);
}
-EXPORT_SYMBOL(reservation_object_add_shared_fence);
+EXPORT_SYMBOL(dma_resv_add_shared_fence);
/**
- * reservation_object_add_excl_fence - Add an exclusive fence.
+ * dma_resv_add_excl_fence - Add an exclusive fence.
* @obj: the reservation object
* @fence: the shared fence to add
*
* Add a fence to the exclusive slot. The obj->lock must be held.
*/
-void reservation_object_add_excl_fence(struct reservation_object *obj,
- struct dma_fence *fence)
+void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
{
- struct dma_fence *old_fence = reservation_object_get_excl(obj);
- struct reservation_object_list *old;
+ struct dma_fence *old_fence = dma_resv_get_excl(obj);
+ struct dma_resv_list *old;
u32 i = 0;
- reservation_object_assert_held(obj);
+ dma_resv_assert_held(obj);
- old = reservation_object_get_list(obj);
+ old = dma_resv_get_list(obj);
if (old)
i = old->shared_count;
@@ -299,27 +295,26 @@ void reservation_object_add_excl_fence(struct reservation_object *obj,
/* inplace update, no shared fences */
while (i--)
dma_fence_put(rcu_dereference_protected(old->shared[i],
- reservation_object_held(obj)));
+ dma_resv_held(obj)));
dma_fence_put(old_fence);
}
-EXPORT_SYMBOL(reservation_object_add_excl_fence);
+EXPORT_SYMBOL(dma_resv_add_excl_fence);
/**
-* reservation_object_copy_fences - Copy all fences from src to dst.
+* dma_resv_copy_fences - Copy all fences from src to dst.
* @dst: the destination reservation object
* @src: the source reservation object
*
* Copy all fences from src to dst. dst-lock must be held.
*/
-int reservation_object_copy_fences(struct reservation_object *dst,
- struct reservation_object *src)
+int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
{
- struct reservation_object_list *src_list, *dst_list;
+ struct dma_resv_list *src_list, *dst_list;
struct dma_fence *old, *new;
unsigned i;
- reservation_object_assert_held(dst);
+ dma_resv_assert_held(dst);
rcu_read_lock();
src_list = rcu_dereference(src->fence);
@@ -330,7 +325,7 @@ retry:
rcu_read_unlock();
- dst_list = reservation_object_list_alloc(shared_count);
+ dst_list = dma_resv_list_alloc(shared_count);
if (!dst_list)
return -ENOMEM;
@@ -351,7 +346,7 @@ retry:
continue;
if (!dma_fence_get_rcu(fence)) {
- reservation_object_list_free(dst_list);
+ dma_resv_list_free(dst_list);
src_list = rcu_dereference(src->fence);
goto retry;
}
@@ -370,8 +365,8 @@ retry:
new = dma_fence_get_rcu_safe(&src->fence_excl);
rcu_read_unlock();
- src_list = reservation_object_get_list(dst);
- old = reservation_object_get_excl(dst);
+ src_list = dma_resv_get_list(dst);
+ old = dma_resv_get_excl(dst);
preempt_disable();
write_seqcount_begin(&dst->seq);
@@ -381,15 +376,15 @@ retry:
write_seqcount_end(&dst->seq);
preempt_enable();
- reservation_object_list_free(src_list);
+ dma_resv_list_free(src_list);
dma_fence_put(old);
return 0;
}
-EXPORT_SYMBOL(reservation_object_copy_fences);
+EXPORT_SYMBOL(dma_resv_copy_fences);
/**
- * reservation_object_get_fences_rcu - Get an object's shared and exclusive
+ * dma_resv_get_fences_rcu - Get an object's shared and exclusive
* fences without update side lock held
* @obj: the reservation object
* @pfence_excl: the returned exclusive fence (or NULL)
@@ -401,10 +396,10 @@ EXPORT_SYMBOL(reservation_object_copy_fences);
* exclusive fence is not specified the fence is put into the array of the
* shared fences as well. Returns either zero or -ENOMEM.
*/
-int reservation_object_get_fences_rcu(struct reservation_object *obj,
- struct dma_fence **pfence_excl,
- unsigned *pshared_count,
- struct dma_fence ***pshared)
+int dma_resv_get_fences_rcu(struct dma_resv *obj,
+ struct dma_fence **pfence_excl,
+ unsigned *pshared_count,
+ struct dma_fence ***pshared)
{
struct dma_fence **shared = NULL;
struct dma_fence *fence_excl;
@@ -412,7 +407,7 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
int ret = 1;
do {
- struct reservation_object_list *fobj;
+ struct dma_resv_list *fobj;
unsigned int i, seq;
size_t sz = 0;
@@ -487,10 +482,10 @@ unlock:
*pshared = shared;
return ret;
}
-EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu);
+EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu);
/**
- * reservation_object_wait_timeout_rcu - Wait on reservation's objects
+ * dma_resv_wait_timeout_rcu - Wait on reservation's objects
* shared and/or exclusive fences.
* @obj: the reservation object
* @wait_all: if true, wait on all fences, else wait on just exclusive fence
@@ -501,9 +496,9 @@ EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu);
* Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
* greater than zer on success.
*/
-long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
- bool wait_all, bool intr,
- unsigned long timeout)
+long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
+ bool wait_all, bool intr,
+ unsigned long timeout)
{
struct dma_fence *fence;
unsigned seq, shared_count;
@@ -531,8 +526,7 @@ retry:
}
if (wait_all) {
- struct reservation_object_list *fobj =
- rcu_dereference(obj->fence);
+ struct dma_resv_list *fobj = rcu_dereference(obj->fence);
if (fobj)
shared_count = fobj->shared_count;
@@ -575,11 +569,10 @@ unlock_retry:
rcu_read_unlock();
goto retry;
}
-EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu);
+EXPORT_SYMBOL_GPL(dma_resv_wait_timeout_rcu);
-static inline int
-reservation_object_test_signaled_single(struct dma_fence *passed_fence)
+static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
{
struct dma_fence *fence, *lfence = passed_fence;
int ret = 1;
@@ -596,7 +589,7 @@ reservation_object_test_signaled_single(struct dma_fence *passed_fence)
}
/**
- * reservation_object_test_signaled_rcu - Test if a reservation object's
+ * dma_resv_test_signaled_rcu - Test if a reservation object's
* fences have been signaled.
* @obj: the reservation object
* @test_all: if true, test all fences, otherwise only test the exclusive
@@ -605,8 +598,7 @@ reservation_object_test_signaled_single(struct dma_fence *passed_fence)
* RETURNS
* true if all fences signaled, else false
*/
-bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
- bool test_all)
+bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
{
unsigned seq, shared_count;
int ret;
@@ -620,8 +612,7 @@ retry:
if (test_all) {
unsigned i;
- struct reservation_object_list *fobj =
- rcu_dereference(obj->fence);
+ struct dma_resv_list *fobj = rcu_dereference(obj->fence);
if (fobj)
shared_count = fobj->shared_count;
@@ -629,7 +620,7 @@ retry:
for (i = 0; i < shared_count; ++i) {
struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
- ret = reservation_object_test_signaled_single(fence);
+ ret = dma_resv_test_signaled_single(fence);
if (ret < 0)
goto retry;
else if (!ret)
@@ -644,8 +635,7 @@ retry:
struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
if (fence_excl) {
- ret = reservation_object_test_signaled_single(
- fence_excl);
+ ret = dma_resv_test_signaled_single(fence_excl);
if (ret < 0)
goto retry;
@@ -657,4 +647,4 @@ retry:
rcu_read_unlock();
return ret;
}
-EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu);
+EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu);
diff --git a/drivers/dma-buf/selftest.c b/drivers/dma-buf/selftest.c
new file mode 100644
index 000000000000..c60b6944b4bd
--- /dev/null
+++ b/drivers/dma-buf/selftest.c
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: MIT */
+
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+
+#include "selftest.h"
+
+enum {
+#define selftest(n, func) __idx_##n,
+#include "selftests.h"
+#undef selftest
+};
+
+#define selftest(n, f) [__idx_##n] = { .name = #n, .func = f },
+static struct selftest {
+ bool enabled;
+ const char *name;
+ int (*func)(void);
+} selftests[] = {
+#include "selftests.h"
+};
+#undef selftest
+
+/* Embed the line number into the parameter name so that we can order tests */
+#define param(n) __PASTE(igt__, __PASTE(__PASTE(__LINE__, __), n))
+#define selftest_0(n, func, id) \
+module_param_named(id, selftests[__idx_##n].enabled, bool, 0400);
+#define selftest(n, func) selftest_0(n, func, param(n))
+#include "selftests.h"
+#undef selftest
+
+int __sanitycheck__(void)
+{
+ pr_debug("Hello World!\n");
+ return 0;
+}
+
+static char *__st_filter;
+
+static bool apply_subtest_filter(const char *caller, const char *name)
+{
+ char *filter, *sep, *tok;
+ bool result = true;
+
+ filter = kstrdup(__st_filter, GFP_KERNEL);
+ for (sep = filter; (tok = strsep(&sep, ","));) {
+ bool allow = true;
+ char *sl;
+
+ if (*tok == '!') {
+ allow = false;
+ tok++;
+ }
+
+ if (*tok == '\0')
+ continue;
+
+ sl = strchr(tok, '/');
+ if (sl) {
+ *sl++ = '\0';
+ if (strcmp(tok, caller)) {
+ if (allow)
+ result = false;
+ continue;
+ }
+ tok = sl;
+ }
+
+ if (strcmp(tok, name)) {
+ if (allow)
+ result = false;
+ continue;
+ }
+
+ result = allow;
+ break;
+ }
+ kfree(filter);
+
+ return result;
+}
+
+int
+__subtests(const char *caller, const struct subtest *st, int count, void *data)
+{
+ int err;
+
+ for (; count--; st++) {
+ cond_resched();
+ if (signal_pending(current))
+ return -EINTR;
+
+ if (!apply_subtest_filter(caller, st->name))
+ continue;
+
+ pr_info("dma-buf: Running %s/%s\n", caller, st->name);
+
+ err = st->func(data);
+ if (err && err != -EINTR) {
+ pr_err("dma-buf/%s: %s failed with error %d\n",
+ caller, st->name, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void set_default_test_all(struct selftest *st, unsigned long count)
+{
+ unsigned long i;
+
+ for (i = 0; i < count; i++)
+ if (st[i].enabled)
+ return;
+
+ for (i = 0; i < count; i++)
+ st[i].enabled = true;
+}
+
+static int run_selftests(struct selftest *st, unsigned long count)
+{
+ int err = 0;
+
+ set_default_test_all(st, count);
+
+ /* Tests are listed in natural order in selftests.h */
+ for (; count--; st++) {
+ if (!st->enabled)
+ continue;
+
+ pr_info("dma-buf: Running %s\n", st->name);
+ err = st->func();
+ if (err)
+ break;
+ }
+
+ if (WARN(err > 0 || err == -ENOTTY,
+ "%s returned %d, conflicting with selftest's magic values!\n",
+ st->name, err))
+ err = -1;
+
+ return err;
+}
+
+static int __init st_init(void)
+{
+ return run_selftests(selftests, ARRAY_SIZE(selftests));
+}
+
+static void __exit st_exit(void)
+{
+}
+
+module_param_named(st_filter, __st_filter, charp, 0400);
+module_init(st_init);
+module_exit(st_exit);
+
+MODULE_DESCRIPTION("Self-test harness for dma-buf");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/dma-buf/selftest.h b/drivers/dma-buf/selftest.h
new file mode 100644
index 000000000000..45793aff6142
--- /dev/null
+++ b/drivers/dma-buf/selftest.h
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: MIT
+
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __SELFTEST_H__
+#define __SELFTEST_H__
+
+#include <linux/compiler.h>
+
+#define selftest(name, func) int func(void);
+#include "selftests.h"
+#undef selftest
+
+struct subtest {
+ int (*func)(void *data);
+ const char *name;
+};
+
+int __subtests(const char *caller,
+ const struct subtest *st,
+ int count,
+ void *data);
+#define subtests(T, data) \
+ __subtests(__func__, T, ARRAY_SIZE(T), data)
+
+#define SUBTEST(x) { x, #x }
+
+#endif /* __SELFTEST_H__ */
diff --git a/drivers/dma-buf/selftests.h b/drivers/dma-buf/selftests.h
new file mode 100644
index 000000000000..5320386f02e5
--- /dev/null
+++ b/drivers/dma-buf/selftests.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/* List each unit test as selftest(name, function)
+ *
+ * The name is used as both an enum and expanded as subtest__name to create
+ * a module parameter. It must be unique and legal for a C identifier.
+ *
+ * The function should be of type int function(void). It may be conditionally
+ * compiled using #if IS_ENABLED(DRM_I915_SELFTEST).
+ *
+ * Tests are executed in order by igt/dmabuf_selftest
+ */
+selftest(sanitycheck, __sanitycheck__) /* keep first (igt selfcheck) */
+selftest(dma_fence, dma_fence)
diff --git a/drivers/dma-buf/st-dma-fence.c b/drivers/dma-buf/st-dma-fence.c
new file mode 100644
index 000000000000..e593064341c8
--- /dev/null
+++ b/drivers/dma-buf/st-dma-fence.c
@@ -0,0 +1,574 @@
+/* SPDX-License-Identifier: MIT */
+
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-fence.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "selftest.h"
+
+static struct kmem_cache *slab_fences;
+
+static struct mock_fence {
+ struct dma_fence base;
+ struct spinlock lock;
+} *to_mock_fence(struct dma_fence *f) {
+ return container_of(f, struct mock_fence, base);
+}
+
+static const char *mock_name(struct dma_fence *f)
+{
+ return "mock";
+}
+
+static void mock_fence_release(struct dma_fence *f)
+{
+ kmem_cache_free(slab_fences, to_mock_fence(f));
+}
+
+struct wait_cb {
+ struct dma_fence_cb cb;
+ struct task_struct *task;
+};
+
+static void mock_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
+{
+ wake_up_process(container_of(cb, struct wait_cb, cb)->task);
+}
+
+static long mock_wait(struct dma_fence *f, bool intr, long timeout)
+{
+ const int state = intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
+ struct wait_cb cb = { .task = current };
+
+ if (dma_fence_add_callback(f, &cb.cb, mock_wakeup))
+ return timeout;
+
+ while (timeout) {
+ set_current_state(state);
+
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
+ break;
+
+ if (signal_pending_state(state, current))
+ break;
+
+ timeout = schedule_timeout(timeout);
+ }
+ __set_current_state(TASK_RUNNING);
+
+ if (!dma_fence_remove_callback(f, &cb.cb))
+ return timeout;
+
+ if (signal_pending_state(state, current))
+ return -ERESTARTSYS;
+
+ return -ETIME;
+}
+
+static const struct dma_fence_ops mock_ops = {
+ .get_driver_name = mock_name,
+ .get_timeline_name = mock_name,
+ .wait = mock_wait,
+ .release = mock_fence_release,
+};
+
+static struct dma_fence *mock_fence(void)
+{
+ struct mock_fence *f;
+
+ f = kmem_cache_alloc(slab_fences, GFP_KERNEL);
+ if (!f)
+ return NULL;
+
+ spin_lock_init(&f->lock);
+ dma_fence_init(&f->base, &mock_ops, &f->lock, 0, 0);
+
+ return &f->base;
+}
+
+static int sanitycheck(void *arg)
+{
+ struct dma_fence *f;
+
+ f = mock_fence();
+ if (!f)
+ return -ENOMEM;
+
+ dma_fence_signal(f);
+ dma_fence_put(f);
+
+ return 0;
+}
+
+static int test_signaling(void *arg)
+{
+ struct dma_fence *f;
+ int err = -EINVAL;
+
+ f = mock_fence();
+ if (!f)
+ return -ENOMEM;
+
+ if (dma_fence_is_signaled(f)) {
+ pr_err("Fence unexpectedly signaled on creation\n");
+ goto err_free;
+ }
+
+ if (dma_fence_signal(f)) {
+ pr_err("Fence reported being already signaled\n");
+ goto err_free;
+ }
+
+ if (!dma_fence_is_signaled(f)) {
+ pr_err("Fence not reporting signaled\n");
+ goto err_free;
+ }
+
+ if (!dma_fence_signal(f)) {
+ pr_err("Fence reported not being already signaled\n");
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ dma_fence_put(f);
+ return err;
+}
+
+struct simple_cb {
+ struct dma_fence_cb cb;
+ bool seen;
+};
+
+static void simple_callback(struct dma_fence *f, struct dma_fence_cb *cb)
+{
+ smp_store_mb(container_of(cb, struct simple_cb, cb)->seen, true);
+}
+
+static int test_add_callback(void *arg)
+{
+ struct simple_cb cb = {};
+ struct dma_fence *f;
+ int err = -EINVAL;
+
+ f = mock_fence();
+ if (!f)
+ return -ENOMEM;
+
+ if (dma_fence_add_callback(f, &cb.cb, simple_callback)) {
+ pr_err("Failed to add callback, fence already signaled!\n");
+ goto err_free;
+ }
+
+ dma_fence_signal(f);
+ if (!cb.seen) {
+ pr_err("Callback failed!\n");
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ dma_fence_put(f);
+ return err;
+}
+
+static int test_late_add_callback(void *arg)
+{
+ struct simple_cb cb = {};
+ struct dma_fence *f;
+ int err = -EINVAL;
+
+ f = mock_fence();
+ if (!f)
+ return -ENOMEM;
+
+ dma_fence_signal(f);
+
+ if (!dma_fence_add_callback(f, &cb.cb, simple_callback)) {
+ pr_err("Added callback, but fence was already signaled!\n");
+ goto err_free;
+ }
+
+ dma_fence_signal(f);
+ if (cb.seen) {
+ pr_err("Callback called after failed attachment !\n");
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ dma_fence_put(f);
+ return err;
+}
+
+static int test_rm_callback(void *arg)
+{
+ struct simple_cb cb = {};
+ struct dma_fence *f;
+ int err = -EINVAL;
+
+ f = mock_fence();
+ if (!f)
+ return -ENOMEM;
+
+ if (dma_fence_add_callback(f, &cb.cb, simple_callback)) {
+ pr_err("Failed to add callback, fence already signaled!\n");
+ goto err_free;
+ }
+
+ if (!dma_fence_remove_callback(f, &cb.cb)) {
+ pr_err("Failed to remove callback!\n");
+ goto err_free;
+ }
+
+ dma_fence_signal(f);
+ if (cb.seen) {
+ pr_err("Callback still signaled after removal!\n");
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ dma_fence_put(f);
+ return err;
+}
+
+static int test_late_rm_callback(void *arg)
+{
+ struct simple_cb cb = {};
+ struct dma_fence *f;
+ int err = -EINVAL;
+
+ f = mock_fence();
+ if (!f)
+ return -ENOMEM;
+
+ if (dma_fence_add_callback(f, &cb.cb, simple_callback)) {
+ pr_err("Failed to add callback, fence already signaled!\n");
+ goto err_free;
+ }
+
+ dma_fence_signal(f);
+ if (!cb.seen) {
+ pr_err("Callback failed!\n");
+ goto err_free;
+ }
+
+ if (dma_fence_remove_callback(f, &cb.cb)) {
+ pr_err("Callback removal succeed after being executed!\n");
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ dma_fence_put(f);
+ return err;
+}
+
+static int test_status(void *arg)
+{
+ struct dma_fence *f;
+ int err = -EINVAL;
+
+ f = mock_fence();
+ if (!f)
+ return -ENOMEM;
+
+ if (dma_fence_get_status(f)) {
+ pr_err("Fence unexpectedly has signaled status on creation\n");
+ goto err_free;
+ }
+
+ dma_fence_signal(f);
+ if (!dma_fence_get_status(f)) {
+ pr_err("Fence not reporting signaled status\n");
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ dma_fence_put(f);
+ return err;
+}
+
+static int test_error(void *arg)
+{
+ struct dma_fence *f;
+ int err = -EINVAL;
+
+ f = mock_fence();
+ if (!f)
+ return -ENOMEM;
+
+ dma_fence_set_error(f, -EIO);
+
+ if (dma_fence_get_status(f)) {
+ pr_err("Fence unexpectedly has error status before signal\n");
+ goto err_free;
+ }
+
+ dma_fence_signal(f);
+ if (dma_fence_get_status(f) != -EIO) {
+ pr_err("Fence not reporting error status, got %d\n",
+ dma_fence_get_status(f));
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ dma_fence_put(f);
+ return err;
+}
+
+static int test_wait(void *arg)
+{
+ struct dma_fence *f;
+ int err = -EINVAL;
+
+ f = mock_fence();
+ if (!f)
+ return -ENOMEM;
+
+ if (dma_fence_wait_timeout(f, false, 0) != -ETIME) {
+ pr_err("Wait reported complete before being signaled\n");
+ goto err_free;
+ }
+
+ dma_fence_signal(f);
+
+ if (dma_fence_wait_timeout(f, false, 0) != 0) {
+ pr_err("Wait reported incomplete after being signaled\n");
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ dma_fence_signal(f);
+ dma_fence_put(f);
+ return err;
+}
+
+struct wait_timer {
+ struct timer_list timer;
+ struct dma_fence *f;
+};
+
+static void wait_timer(struct timer_list *timer)
+{
+ struct wait_timer *wt = from_timer(wt, timer, timer);
+
+ dma_fence_signal(wt->f);
+}
+
+static int test_wait_timeout(void *arg)
+{
+ struct wait_timer wt;
+ int err = -EINVAL;
+
+ timer_setup_on_stack(&wt.timer, wait_timer, 0);
+
+ wt.f = mock_fence();
+ if (!wt.f)
+ return -ENOMEM;
+
+ if (dma_fence_wait_timeout(wt.f, false, 1) != -ETIME) {
+ pr_err("Wait reported complete before being signaled\n");
+ goto err_free;
+ }
+
+ mod_timer(&wt.timer, jiffies + 1);
+
+ if (dma_fence_wait_timeout(wt.f, false, 2) == -ETIME) {
+ if (timer_pending(&wt.timer)) {
+ pr_notice("Timer did not fire within the jiffie!\n");
+ err = 0; /* not our fault! */
+ } else {
+ pr_err("Wait reported incomplete after timeout\n");
+ }
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ del_timer_sync(&wt.timer);
+ destroy_timer_on_stack(&wt.timer);
+ dma_fence_signal(wt.f);
+ dma_fence_put(wt.f);
+ return err;
+}
+
+static int test_stub(void *arg)
+{
+ struct dma_fence *f[64];
+ int err = -EINVAL;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(f); i++) {
+ f[i] = dma_fence_get_stub();
+ if (!dma_fence_is_signaled(f[i])) {
+ pr_err("Obtained unsignaled stub fence!\n");
+ goto err;
+ }
+ }
+
+ err = 0;
+err:
+ while (i--)
+ dma_fence_put(f[i]);
+ return err;
+}
+
+/* Now off to the races! */
+
+struct race_thread {
+ struct dma_fence __rcu **fences;
+ struct task_struct *task;
+ bool before;
+ int id;
+};
+
+static void __wait_for_callbacks(struct dma_fence *f)
+{
+ spin_lock_irq(f->lock);
+ spin_unlock_irq(f->lock);
+}
+
+static int thread_signal_callback(void *arg)
+{
+ const struct race_thread *t = arg;
+ unsigned long pass = 0;
+ unsigned long miss = 0;
+ int err = 0;
+
+ while (!err && !kthread_should_stop()) {
+ struct dma_fence *f1, *f2;
+ struct simple_cb cb;
+
+ f1 = mock_fence();
+ if (!f1) {
+ err = -ENOMEM;
+ break;
+ }
+
+ rcu_assign_pointer(t->fences[t->id], f1);
+ smp_wmb();
+
+ rcu_read_lock();
+ do {
+ f2 = dma_fence_get_rcu_safe(&t->fences[!t->id]);
+ } while (!f2 && !kthread_should_stop());
+ rcu_read_unlock();
+
+ if (t->before)
+ dma_fence_signal(f1);
+
+ smp_store_mb(cb.seen, false);
+ if (!f2 || dma_fence_add_callback(f2, &cb.cb, simple_callback))
+ miss++, cb.seen = true;
+
+ if (!t->before)
+ dma_fence_signal(f1);
+
+ if (!cb.seen) {
+ dma_fence_wait(f2, false);
+ __wait_for_callbacks(f2);
+ }
+
+ if (!READ_ONCE(cb.seen)) {
+ pr_err("Callback not seen on thread %d, pass %lu (%lu misses), signaling %s add_callback; fence signaled? %s\n",
+ t->id, pass, miss,
+ t->before ? "before" : "after",
+ dma_fence_is_signaled(f2) ? "yes" : "no");
+ err = -EINVAL;
+ }
+
+ dma_fence_put(f2);
+
+ rcu_assign_pointer(t->fences[t->id], NULL);
+ smp_wmb();
+
+ dma_fence_put(f1);
+
+ pass++;
+ }
+
+ pr_info("%s[%d] completed %lu passes, %lu misses\n",
+ __func__, t->id, pass, miss);
+ return err;
+}
+
+static int race_signal_callback(void *arg)
+{
+ struct dma_fence __rcu *f[2] = {};
+ int ret = 0;
+ int pass;
+
+ for (pass = 0; !ret && pass <= 1; pass++) {
+ struct race_thread t[2];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(t); i++) {
+ t[i].fences = f;
+ t[i].id = i;
+ t[i].before = pass;
+ t[i].task = kthread_run(thread_signal_callback, &t[i],
+ "dma-fence:%d", i);
+ get_task_struct(t[i].task);
+ }
+
+ msleep(50);
+
+ for (i = 0; i < ARRAY_SIZE(t); i++) {
+ int err;
+
+ err = kthread_stop(t[i].task);
+ if (err && !ret)
+ ret = err;
+
+ put_task_struct(t[i].task);
+ }
+ }
+
+ return ret;
+}
+
+int dma_fence(void)
+{
+ static const struct subtest tests[] = {
+ SUBTEST(sanitycheck),
+ SUBTEST(test_signaling),
+ SUBTEST(test_add_callback),
+ SUBTEST(test_late_add_callback),
+ SUBTEST(test_rm_callback),
+ SUBTEST(test_late_rm_callback),
+ SUBTEST(test_status),
+ SUBTEST(test_error),
+ SUBTEST(test_wait),
+ SUBTEST(test_wait_timeout),
+ SUBTEST(test_stub),
+ SUBTEST(race_signal_callback),
+ };
+ int ret;
+
+ pr_info("sizeof(dma_fence)=%zu\n", sizeof(struct dma_fence));
+
+ slab_fences = KMEM_CACHE(mock_fence,
+ SLAB_TYPESAFE_BY_RCU |
+ SLAB_HWCACHE_ALIGN);
+ if (!slab_fences)
+ return -ENOMEM;
+
+ ret = subtests(tests, NULL);
+
+ kmem_cache_destroy(slab_fences);
+
+ return ret;
+}
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index 051f6c2873c7..6713cfb1995c 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -132,17 +132,14 @@ static void timeline_fence_release(struct dma_fence *fence)
{
struct sync_pt *pt = dma_fence_to_sync_pt(fence);
struct sync_timeline *parent = dma_fence_parent(fence);
+ unsigned long flags;
+ spin_lock_irqsave(fence->lock, flags);
if (!list_empty(&pt->link)) {
- unsigned long flags;
-
- spin_lock_irqsave(fence->lock, flags);
- if (!list_empty(&pt->link)) {
- list_del(&pt->link);
- rb_erase(&pt->node, &parent->pt_tree);
- }
- spin_unlock_irqrestore(fence->lock, flags);
+ list_del(&pt->link);
+ rb_erase(&pt->node, &parent->pt_tree);
}
+ spin_unlock_irqrestore(fence->lock, flags);
sync_timeline_put(parent);
dma_fence_free(fence);
@@ -265,7 +262,8 @@ static struct sync_pt *sync_pt_create(struct sync_timeline *obj,
p = &parent->rb_left;
} else {
if (dma_fence_get_rcu(&other->base)) {
- dma_fence_put(&pt->base);
+ sync_timeline_put(obj);
+ kfree(pt);
pt = other;
goto unlock;
}
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index ee4d1a96d779..25c5c071645b 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -419,7 +419,7 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
* info->num_fences.
*/
if (!info.num_fences) {
- info.status = dma_fence_is_signaled(sync_file->fence);
+ info.status = dma_fence_get_status(sync_file->fence);
goto no_fences;
} else {
info.status = 1;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index e0c47ae52fc1..42b936b6bbf1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -218,14 +218,14 @@ void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
struct amdgpu_amdkfd_fence *ef)
{
- struct reservation_object *resv = bo->tbo.base.resv;
- struct reservation_object_list *old, *new;
+ struct dma_resv *resv = bo->tbo.base.resv;
+ struct dma_resv_list *old, *new;
unsigned int i, j, k;
if (!ef)
return -EINVAL;
- old = reservation_object_get_list(resv);
+ old = dma_resv_get_list(resv);
if (!old)
return 0;
@@ -241,7 +241,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
struct dma_fence *f;
f = rcu_dereference_protected(old->shared[i],
- reservation_object_held(resv));
+ dma_resv_held(resv));
if (f->context == ef->base.context)
RCU_INIT_POINTER(new->shared[--j], f);
@@ -263,7 +263,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
struct dma_fence *f;
f = rcu_dereference_protected(new->shared[i],
- reservation_object_held(resv));
+ dma_resv_held(resv));
dma_fence_put(f);
}
kfree_rcu(old, rcu);
@@ -887,7 +887,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
AMDGPU_FENCE_OWNER_KFD, false);
if (ret)
goto wait_pd_fail;
- ret = reservation_object_reserve_shared(vm->root.base.bo->tbo.base.resv, 1);
+ ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1);
if (ret)
goto reserve_shared_fail;
amdgpu_bo_fence(vm->root.base.bo,
@@ -2133,7 +2133,7 @@ int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem
* Add process eviction fence to bo so they can
* evict each other.
*/
- ret = reservation_object_reserve_shared(gws_bo->tbo.base.resv, 1);
+ ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1);
if (ret)
goto reserve_shared_fail;
amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 2fcdc6029413..2e53feed40e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -730,7 +730,7 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
list_for_each_entry(e, &p->validated, tv.head) {
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
- struct reservation_object *resv = bo->tbo.base.resv;
+ struct dma_resv *resv = bo->tbo.base.resv;
r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp,
amdgpu_bo_explicit_sync(bo));
@@ -1732,7 +1732,7 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
*map = mapping;
/* Double check that the BO is reserved by this CS */
- if (reservation_object_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket)
+ if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket)
return -EINVAL;
if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index f453e277ed24..1d4aaa9580f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -205,7 +205,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
goto unpin;
}
- r = reservation_object_get_fences_rcu(new_abo->tbo.base.resv, &work->excl,
+ r = dma_resv_get_fences_rcu(new_abo->tbo.base.resv, &work->excl,
&work->shared_count,
&work->shared);
if (unlikely(r != 0)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index f0db7ddcb61b..61f108ec2b5c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -137,23 +137,23 @@ int amdgpu_gem_prime_mmap(struct drm_gem_object *obj,
}
static int
-__reservation_object_make_exclusive(struct reservation_object *obj)
+__dma_resv_make_exclusive(struct dma_resv *obj)
{
struct dma_fence **fences;
unsigned int count;
int r;
- if (!reservation_object_get_list(obj)) /* no shared fences to convert */
+ if (!dma_resv_get_list(obj)) /* no shared fences to convert */
return 0;
- r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences);
+ r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences);
if (r)
return r;
if (count == 0) {
/* Now that was unexpected. */
} else if (count == 1) {
- reservation_object_add_excl_fence(obj, fences[0]);
+ dma_resv_add_excl_fence(obj, fences[0]);
dma_fence_put(fences[0]);
kfree(fences);
} else {
@@ -165,7 +165,7 @@ __reservation_object_make_exclusive(struct reservation_object *obj)
if (!array)
goto err_fences_put;
- reservation_object_add_excl_fence(obj, &array->base);
+ dma_resv_add_excl_fence(obj, &array->base);
dma_fence_put(&array->base);
}
@@ -216,7 +216,7 @@ static int amdgpu_dma_buf_map_attach(struct dma_buf *dma_buf,
* fences on the reservation object into a single exclusive
* fence.
*/
- r = __reservation_object_make_exclusive(bo->tbo.base.resv);
+ r = __dma_resv_make_exclusive(bo->tbo.base.resv);
if (r)
goto error_unreserve;
}
@@ -367,7 +367,7 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sg)
{
- struct reservation_object *resv = attach->dmabuf->resv;
+ struct dma_resv *resv = attach->dmabuf->resv;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_bo *bo;
struct amdgpu_bo_param bp;
@@ -380,7 +380,7 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
bp.flags = 0;
bp.type = ttm_bo_type_sg;
bp.resv = resv;
- reservation_object_lock(resv, NULL);
+ dma_resv_lock(resv, NULL);
ret = amdgpu_bo_create(adev, &bp, &bo);
if (ret)
goto error;
@@ -392,11 +392,11 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
if (attach->dmabuf->ops != &amdgpu_dmabuf_ops)
bo->prime_shared_count = 1;
- reservation_object_unlock(resv);
+ dma_resv_unlock(resv);
return &bo->tbo.base;
error:
- reservation_object_unlock(resv);
+ dma_resv_unlock(resv);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index d532e3d647ca..b174bd5eb38e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -50,7 +50,7 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj)
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
int alignment, u32 initial_domain,
u64 flags, enum ttm_bo_type type,
- struct reservation_object *resv,
+ struct dma_resv *resv,
struct drm_gem_object **obj)
{
struct amdgpu_bo *bo;
@@ -215,7 +215,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
union drm_amdgpu_gem_create *args = data;
uint64_t flags = args->in.domain_flags;
uint64_t size = args->in.bo_size;
- struct reservation_object *resv = NULL;
+ struct dma_resv *resv = NULL;
struct drm_gem_object *gobj;
uint32_t handle;
int r;
@@ -433,7 +433,7 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
}
robj = gem_to_amdgpu_bo(gobj);
- ret = reservation_object_wait_timeout_rcu(robj->tbo.base.resv, true, true,
+ ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true,
timeout);
/* ret == 0 means not signaled,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
index 2f17150e26e1..0b66d2e6b5d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
@@ -47,7 +47,7 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev);
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
int alignment, u32 initial_domain,
u64 flags, enum ttm_bo_type type,
- struct reservation_object *resv,
+ struct dma_resv *resv,
struct drm_gem_object **obj);
int amdgpu_mode_dumb_create(struct drm_file *file_priv,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 529065b83885..53734da1c2df 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -104,7 +104,7 @@ static void amdgpu_pasid_free_cb(struct dma_fence *fence,
*
* Free the pasid only after all the fences in resv are signaled.
*/
-void amdgpu_pasid_free_delayed(struct reservation_object *resv,
+void amdgpu_pasid_free_delayed(struct dma_resv *resv,
unsigned int pasid)
{
struct dma_fence *fence, **fences;
@@ -112,7 +112,7 @@ void amdgpu_pasid_free_delayed(struct reservation_object *resv,
unsigned count;
int r;
- r = reservation_object_get_fences_rcu(resv, NULL, &count, &fences);
+ r = dma_resv_get_fences_rcu(resv, NULL, &count, &fences);
if (r)
goto fallback;
@@ -156,7 +156,7 @@ fallback:
/* Not enough memory for the delayed delete, as last resort
* block for all the fences to complete.
*/
- reservation_object_wait_timeout_rcu(resv, true, false,
+ dma_resv_wait_timeout_rcu(resv, true, false,
MAX_SCHEDULE_TIMEOUT);
amdgpu_pasid_free(pasid);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
index 7625419f0fc2..8e58325bbca2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
@@ -72,7 +72,7 @@ struct amdgpu_vmid_mgr {
int amdgpu_pasid_alloc(unsigned int bits);
void amdgpu_pasid_free(unsigned int pasid);
-void amdgpu_pasid_free_delayed(struct reservation_object *resv,
+void amdgpu_pasid_free_delayed(struct dma_resv *resv,
unsigned int pasid);
bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 50022acc8a81..f1f8cdd695d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -179,7 +179,7 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end))
continue;
- r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv,
+ r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
true, false, MAX_SCHEDULE_TIMEOUT);
if (r <= 0)
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 008bf62044b0..1fead0e8b890 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -551,7 +551,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
fail_unreserve:
if (!bp->resv)
- reservation_object_unlock(bo->tbo.base.resv);
+ dma_resv_unlock(bo->tbo.base.resv);
amdgpu_bo_unref(&bo);
return r;
}
@@ -613,13 +613,13 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
if ((flags & AMDGPU_GEM_CREATE_SHADOW) && !(adev->flags & AMD_IS_APU)) {
if (!bp->resv)
- WARN_ON(reservation_object_lock((*bo_ptr)->tbo.base.resv,
+ WARN_ON(dma_resv_lock((*bo_ptr)->tbo.base.resv,
NULL));
r = amdgpu_bo_create_shadow(adev, bp->size, *bo_ptr);
if (!bp->resv)
- reservation_object_unlock((*bo_ptr)->tbo.base.resv);
+ dma_resv_unlock((*bo_ptr)->tbo.base.resv);
if (r)
amdgpu_bo_unref(bo_ptr);
@@ -716,7 +716,7 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
return 0;
}
- r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv, false, false,
+ r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false,
MAX_SCHEDULE_TIMEOUT);
if (r < 0)
return r;
@@ -1094,7 +1094,7 @@ int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
*/
void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
{
- reservation_object_assert_held(bo->tbo.base.resv);
+ dma_resv_assert_held(bo->tbo.base.resv);
if (tiling_flags)
*tiling_flags = bo->tiling_flags;
@@ -1243,7 +1243,7 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
!(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
return;
- reservation_object_lock(bo->base.resv, NULL);
+ dma_resv_lock(bo->base.resv, NULL);
r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence);
if (!WARN_ON(r)) {
@@ -1251,7 +1251,7 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
dma_fence_put(fence);
}
- reservation_object_unlock(bo->base.resv);
+ dma_resv_unlock(bo->base.resv);
}
/**
@@ -1326,12 +1326,12 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
bool shared)
{
- struct reservation_object *resv = bo->tbo.base.resv;
+ struct dma_resv *resv = bo->tbo.base.resv;
if (shared)
- reservation_object_add_shared_fence(resv, fence);
+ dma_resv_add_shared_fence(resv, fence);
else
- reservation_object_add_excl_fence(resv, fence);
+ dma_resv_add_excl_fence(resv, fence);
}
/**
@@ -1371,7 +1371,7 @@ int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
{
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
- WARN_ON_ONCE(!reservation_object_is_locked(bo->tbo.base.resv) &&
+ WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
!bo->pin_count && bo->tbo.type != ttm_bo_type_kernel);
WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 05dde0dd04ff..658f4c9779b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -41,7 +41,7 @@ struct amdgpu_bo_param {
u32 preferred_domain;
u64 flags;
enum ttm_bo_type type;
- struct reservation_object *resv;
+ struct dma_resv *resv;
};
/* bo virtual addresses in a vm */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 9828f3c7c655..95e5e93edd18 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -190,10 +190,10 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
*/
int amdgpu_sync_resv(struct amdgpu_device *adev,
struct amdgpu_sync *sync,
- struct reservation_object *resv,
+ struct dma_resv *resv,
void *owner, bool explicit_sync)
{
- struct reservation_object_list *flist;
+ struct dma_resv_list *flist;
struct dma_fence *f;
void *fence_owner;
unsigned i;
@@ -203,16 +203,16 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
return -EINVAL;
/* always sync to the exclusive fence */
- f = reservation_object_get_excl(resv);
+ f = dma_resv_get_excl(resv);
r = amdgpu_sync_fence(adev, sync, f, false);
- flist = reservation_object_get_list(resv);
+ flist = dma_resv_get_list(resv);
if (!flist || r)
return r;
for (i = 0; i < flist->shared_count; ++i) {
f = rcu_dereference_protected(flist->shared[i],
- reservation_object_held(resv));
+ dma_resv_held(resv));
/* We only want to trigger KFD eviction fences on
* evict or move jobs. Skip KFD fences otherwise.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
index 10cf23a57f17..b5f1778a2319 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
@@ -27,7 +27,7 @@
#include <linux/hashtable.h>
struct dma_fence;
-struct reservation_object;
+struct dma_resv;
struct amdgpu_device;
struct amdgpu_ring;
@@ -44,7 +44,7 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
struct dma_fence *f, bool explicit);
int amdgpu_sync_resv(struct amdgpu_device *adev,
struct amdgpu_sync *sync,
- struct reservation_object *resv,
+ struct dma_resv *resv,
void *owner,
bool explicit_sync);
struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index a29110a4e492..13b144c8f67d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -303,7 +303,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
struct amdgpu_copy_mem *src,
struct amdgpu_copy_mem *dst,
uint64_t size,
- struct reservation_object *resv,
+ struct dma_resv *resv,
struct dma_fence **f)
{
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
@@ -1486,7 +1486,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
{
unsigned long num_pages = bo->mem.num_pages;
struct drm_mm_node *node = bo->mem.mm_node;
- struct reservation_object_list *flist;
+ struct dma_resv_list *flist;
struct dma_fence *f;
int i;
@@ -1494,18 +1494,18 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
* cleanly handle page faults.
*/
if (bo->type == ttm_bo_type_kernel &&
- !reservation_object_test_signaled_rcu(bo->base.resv, true))
+ !dma_resv_test_signaled_rcu(bo->base.resv, true))
return false;
/* If bo is a KFD BO, check if the bo belongs to the current process.
* If true, then return false as any KFD process needs all its BOs to
* be resident to run successfully
*/
- flist = reservation_object_get_list(bo->base.resv);
+ flist = dma_resv_get_list(bo->base.resv);
if (flist) {
for (i = 0; i < flist->shared_count; ++i) {
f = rcu_dereference_protected(flist->shared[i],
- reservation_object_held(bo->base.resv));
+ dma_resv_held(bo->base.resv));
if (amdkfd_fence_check_mm(f, current->mm))
return false;
}
@@ -2011,7 +2011,7 @@ error_free:
int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
uint64_t dst_offset, uint32_t byte_count,
- struct reservation_object *resv,
+ struct dma_resv *resv,
struct dma_fence **fence, bool direct_submit,
bool vm_needs_flush)
{
@@ -2085,7 +2085,7 @@ error_free:
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t src_data,
- struct reservation_object *resv,
+ struct dma_resv *resv,
struct dma_fence **fence)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index bccb8c49e597..0dddedc06ae3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -85,18 +85,18 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev,
int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
uint64_t dst_offset, uint32_t byte_count,
- struct reservation_object *resv,
+ struct dma_resv *resv,
struct dma_fence **fence, bool direct_submit,
bool vm_needs_flush);
int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
struct amdgpu_copy_mem *src,
struct amdgpu_copy_mem *dst,
uint64_t size,
- struct reservation_object *resv,
+ struct dma_resv *resv,
struct dma_fence **f);
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t src_data,
- struct reservation_object *resv,
+ struct dma_resv *resv,
struct dma_fence **fence);
int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index f858607b17a5..b2c364b8695f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1073,7 +1073,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
ib->length_dw = 16;
if (direct) {
- r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv,
+ r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
true, false,
msecs_to_jiffies(10));
if (r == 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 1ba8cbc4d851..e2fb141ff2e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1702,7 +1702,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
pages_addr = ttm->dma_address;
}
- exclusive = reservation_object_get_excl(bo->tbo.base.resv);
+ exclusive = dma_resv_get_excl(bo->tbo.base.resv);
}
if (bo) {
@@ -1879,18 +1879,18 @@ static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
*/
static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
- struct reservation_object *resv = vm->root.base.bo->tbo.base.resv;
+ struct dma_resv *resv = vm->root.base.bo->tbo.base.resv;
struct dma_fence *excl, **shared;
unsigned i, shared_count;
int r;
- r = reservation_object_get_fences_rcu(resv, &excl,
+ r = dma_resv_get_fences_rcu(resv, &excl,
&shared_count, &shared);
if (r) {
/* Not enough memory to grab the fence list, as last resort
* block for all the fences to complete.
*/
- reservation_object_wait_timeout_rcu(resv, true, false,
+ dma_resv_wait_timeout_rcu(resv, true, false,
MAX_SCHEDULE_TIMEOUT);
return;
}
@@ -1978,7 +1978,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
struct amdgpu_vm *vm)
{
struct amdgpu_bo_va *bo_va, *tmp;
- struct reservation_object *resv;
+ struct dma_resv *resv;
bool clear;
int r;
@@ -1997,7 +1997,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
spin_unlock(&vm->invalidated_lock);
/* Try to reserve the BO to avoid clearing its ptes */
- if (!amdgpu_vm_debug && reservation_object_trylock(resv))
+ if (!amdgpu_vm_debug && dma_resv_trylock(resv))
clear = false;
/* Somebody else is using the BO right now */
else
@@ -2008,7 +2008,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
return r;
if (!clear)
- reservation_object_unlock(resv);
+ dma_resv_unlock(resv);
spin_lock(&vm->invalidated_lock);
}
spin_unlock(&vm->invalidated_lock);
@@ -2416,7 +2416,7 @@ void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
struct amdgpu_bo *bo;
bo = mapping->bo_va->base.bo;
- if (reservation_object_locking_ctx(bo->tbo.base.resv) !=
+ if (dma_resv_locking_ctx(bo->tbo.base.resv) !=
ticket)
continue;
}
@@ -2649,7 +2649,7 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
*/
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
{
- return reservation_object_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv,
+ return dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv,
true, true, timeout);
}
@@ -2724,7 +2724,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (r)
goto error_free_root;
- r = reservation_object_reserve_shared(root->tbo.base.resv, 1);
+ r = dma_resv_reserve_shared(root->tbo.base.resv, 1);
if (r)
goto error_unreserve;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index caf7008cbbd3..e1b09bb432bd 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -5737,7 +5737,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* deadlock during GPU reset when this fence will not signal
* but we hold reservation lock for the BO.
*/
- r = reservation_object_wait_timeout_rcu(abo->tbo.base.resv, true,
+ r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
false,
msecs_to_jiffies(5000));
if (unlikely(r <= 0))
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
index fa9a4593bb37..624d257da20f 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
@@ -27,7 +27,7 @@ static void komeda_crtc_update_clock_ratio(struct komeda_crtc_state *kcrtc_st)
return;
}
- pxlclk = kcrtc_st->base.adjusted_mode.crtc_clock * 1000;
+ pxlclk = kcrtc_st->base.adjusted_mode.crtc_clock * 1000ULL;
aclk = komeda_crtc_get_aclk(kcrtc_st);
kcrtc_st->clock_ratio = div64_u64(aclk << 32, pxlclk);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
index 1ff7f4b2c620..0142ee991957 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
@@ -8,6 +8,7 @@
#include <linux/iommu.h>
#include <linux/of_device.h>
#include <linux/of_graph.h>
+#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#ifdef CONFIG_DEBUG_FS
@@ -146,6 +147,12 @@ static int komeda_parse_dt(struct device *dev, struct komeda_dev *mdev)
return mdev->irq;
}
+ /* Get the optional framebuffer memory resource */
+ ret = of_reserved_mem_device_init(dev);
+ if (ret && ret != -ENODEV)
+ return ret;
+ ret = 0;
+
for_each_available_child_of_node(np, child) {
if (of_node_cmp(child->name, "pipeline") == 0) {
ret = komeda_parse_pipe_dt(mdev, child);
@@ -292,6 +299,8 @@ void komeda_dev_destroy(struct komeda_dev *mdev)
mdev->n_pipelines = 0;
+ of_reserved_mem_device_release(dev);
+
if (funcs && funcs->cleanup)
funcs->cleanup(mdev);
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index a3efa28436ea..af67fefed38d 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -9,7 +9,12 @@
* Implementation of a CRTC class for the HDLCD driver.
*/
-#include <drm/drmP.h>
+#include <linux/clk.h>
+#include <linux/of_graph.h>
+#include <linux/platform_data/simplefb.h>
+
+#include <video/videomode.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
@@ -19,10 +24,7 @@
#include <drm/drm_of.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
-#include <linux/clk.h>
-#include <linux/of_graph.h>
-#include <linux/platform_data/simplefb.h>
-#include <video/videomode.h>
+#include <drm/drm_vblank.h>
#include "hdlcd_drv.h"
#include "hdlcd_regs.h"
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index 27c46a2838c5..2e053815b54a 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -14,21 +14,26 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/console.h>
+#include <linux/dma-mapping.h>
#include <linux/list.h>
#include <linux/of_graph.h>
#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_irq.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "hdlcd_drv.h"
#include "hdlcd_regs.h"
diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c
index db4451260fff..587d94798f5c 100644
--- a/drivers/gpu/drm/arm/malidp_crtc.c
+++ b/drivers/gpu/drm/arm/malidp_crtc.c
@@ -6,14 +6,17 @@
* ARM Mali DP500/DP550/DP650 driver (crtc operations)
*/
-#include <drm/drmP.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+
+#include <video/videomode.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include <linux/clk.h>
-#include <linux/pm_runtime.h>
-#include <video/videomode.h>
+#include <drm/drm_vblank.h>
#include "malidp_drv.h"
#include "malidp_hw.h"
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index c27ff456eddc..333b88a5efb0 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -15,17 +15,19 @@
#include <linux/pm_runtime.h>
#include <linux/debugfs.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_probe_helper.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "malidp_drv.h"
#include "malidp_mw.h"
diff --git a/drivers/gpu/drm/arm/malidp_drv.h b/drivers/gpu/drm/arm/malidp_drv.h
index 0a639af8337e..cdfddfabf2d1 100644
--- a/drivers/gpu/drm/arm/malidp_drv.h
+++ b/drivers/gpu/drm/arm/malidp_drv.h
@@ -9,12 +9,13 @@
#ifndef __MALIDP_DRV_H__
#define __MALIDP_DRV_H__
-#include <drm/drm_writeback.h>
-#include <drm/drm_encoder.h>
#include <linux/mutex.h>
#include <linux/wait.h>
#include <linux/spinlock.h>
-#include <drm/drmP.h>
+
+#include <drm/drm_writeback.h>
+#include <drm/drm_encoder.h>
+
#include "malidp_hw.h"
#define MALIDP_CONFIG_VALID_INIT 0
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index 380be66d4c6e..bd8265f02e0b 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -9,12 +9,17 @@
*/
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/types.h>
#include <linux/io.h>
-#include <drm/drmP.h>
+
#include <video/videomode.h>
#include <video/display_timing.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_vblank.h>
+#include <drm/drm_print.h>
+
#include "malidp_drv.h"
#include "malidp_hw.h"
#include "malidp_mw.h"
diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c
index 2e812525025d..22c0847986df 100644
--- a/drivers/gpu/drm/arm/malidp_mw.c
+++ b/drivers/gpu/drm/arm/malidp_mw.c
@@ -5,13 +5,14 @@
*
* ARM Mali DP Writeback connector implementation
*/
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_probe_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
-#include <drm/drmP.h>
+#include <drm/drm_probe_helper.h>
#include <drm/drm_writeback.h>
#include "malidp_drv.h"
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 488375bd133d..3c70a53813bf 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -7,11 +7,13 @@
*/
#include <linux/iommu.h>
+#include <linux/platform_device.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index d44fca4e1655..c2b92acd1e9a 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -3,15 +3,19 @@
* Copyright (C) 2012 Russell King
* Rewritten from the dovefb driver, and Armada510 manuals.
*/
+
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
-#include <drm/drmP.h>
+
#include <drm/drm_atomic.h>
-#include <drm/drm_probe_helper.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
#include "armada_crtc.h"
#include "armada_drm.h"
#include "armada_fb.h"
diff --git a/drivers/gpu/drm/armada/armada_debugfs.c b/drivers/gpu/drm/armada/armada_debugfs.c
index dc3716dbb2c0..c6fc2f1d58e9 100644
--- a/drivers/gpu/drm/armada/armada_debugfs.c
+++ b/drivers/gpu/drm/armada/armada_debugfs.c
@@ -3,11 +3,15 @@
* Copyright (C) 2012 Russell King
* Rewritten from the dovefb driver, and Armada510 manuals.
*/
+
#include <linux/ctype.h>
-#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/seq_file.h>
-#include <drm/drmP.h>
+#include <linux/uaccess.h>
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_file.h>
+
#include "armada_crtc.h"
#include "armada_drm.h"
diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h
index c7794c8bdd90..a11bdaccbb33 100644
--- a/drivers/gpu/drm/armada/armada_drm.h
+++ b/drivers/gpu/drm/armada/armada_drm.h
@@ -8,11 +8,14 @@
#include <linux/kfifo.h>
#include <linux/io.h>
#include <linux/workqueue.h>
-#include <drm/drmP.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_mm.h>
struct armada_crtc;
struct armada_gem_object;
struct clk;
+struct drm_display_mode;
struct drm_fb_helper;
static inline void
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 055c92bc88bf..197dca3fc84c 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -2,14 +2,22 @@
/*
* Copyright (C) 2012 Russell King
*/
+
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/module.h>
#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_prime.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_of.h>
+#include <drm/drm_vblank.h>
+
#include "armada_crtc.h"
#include "armada_drm.h"
#include "armada_gem.h"
diff --git a/drivers/gpu/drm/armada/armada_fb.c b/drivers/gpu/drm/armada/armada_fb.c
index de030cb0aa90..426ca383d696 100644
--- a/drivers/gpu/drm/armada/armada_fb.c
+++ b/drivers/gpu/drm/armada/armada_fb.c
@@ -2,9 +2,12 @@
/*
* Copyright (C) 2012 Russell King
*/
+
#include <drm/drm_modeset_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
+
#include "armada_drm.h"
#include "armada_fb.h"
#include "armada_gem.h"
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index 096aff530b01..090cc0d699ae 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -3,11 +3,14 @@
* Copyright (C) 2012 Russell King
* Written from the i915 driver.
*/
+
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
+
#include "armada_crtc.h"
#include "armada_drm.h"
#include "armada_fb.h"
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 60c509784fa3..93cf8b8bfcff 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -2,12 +2,17 @@
/*
* Copyright (C) 2012 Russell King
*/
+
#include <linux/dma-buf.h>
#include <linux/dma-mapping.h>
+#include <linux/mman.h>
#include <linux/shmem_fs.h>
+
+#include <drm/armada_drm.h>
+#include <drm/drm_prime.h>
+
#include "armada_drm.h"
#include "armada_gem.h"
-#include <drm/armada_drm.h>
#include "armada_ioctlP.h"
static vm_fault_t armada_gem_vm_fault(struct vm_fault *vmf)
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index e8060216b389..07f0da4d9ba1 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -3,12 +3,14 @@
* Copyright (C) 2012 Russell King
* Rewritten from the dovefb driver, and Armada510 manuals.
*/
-#include <drm/drmP.h>
+
+#include <drm/armada_drm.h>
#include <drm/drm_atomic.h>
-#include <drm/drm_atomic_uapi.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
-#include <drm/armada_drm.h>
+
#include "armada_crtc.h"
#include "armada_drm.h"
#include "armada_fb.h"
diff --git a/drivers/gpu/drm/armada/armada_plane.c b/drivers/gpu/drm/armada/armada_plane.c
index f08b4f37816d..e7cc2b343bcb 100644
--- a/drivers/gpu/drm/armada/armada_plane.c
+++ b/drivers/gpu/drm/armada/armada_plane.c
@@ -3,10 +3,12 @@
* Copyright (C) 2012 Russell King
* Rewritten from the dovefb driver, and Armada510 manuals.
*/
-#include <drm/drmP.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
+
#include "armada_crtc.h"
#include "armada_drm.h"
#include "armada_fb.h"
diff --git a/drivers/gpu/drm/armada/armada_trace.h b/drivers/gpu/drm/armada/armada_trace.h
index f03a56bda596..528f20fe3147 100644
--- a/drivers/gpu/drm/armada/armada_trace.h
+++ b/drivers/gpu/drm/armada/armada_trace.h
@@ -3,7 +3,10 @@
#define ARMADA_TRACE_H
#include <linux/tracepoint.h>
-#include <drm/drmP.h>
+
+struct drm_crtc;
+struct drm_framebuffer;
+struct drm_plane;
#undef TRACE_SYSTEM
#define TRACE_SYSTEM armada
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c b/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
index 15db9e426ec4..2184b8be6fd4 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
@@ -215,7 +215,7 @@ static void aspeed_gfx_disable_vblank(struct drm_simple_display_pipe *pipe)
writel(reg | CRT_CTRL_VERTICAL_INTR_STS, priv->base + CRT_CTRL1);
}
-static struct drm_simple_display_pipe_funcs aspeed_gfx_funcs = {
+static const struct drm_simple_display_pipe_funcs aspeed_gfx_funcs = {
.enable = aspeed_gfx_pipe_enable,
.disable = aspeed_gfx_pipe_disable,
.update = aspeed_gfx_pipe_update,
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index f2f7f69d6cc3..22885dceaa17 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1780,8 +1780,7 @@ void analogix_dp_unbind(struct analogix_dp_device *dp)
if (dp->plat_data->panel) {
if (drm_panel_unprepare(dp->plat_data->panel))
DRM_ERROR("failed to turnoff the panel\n");
- if (drm_panel_detach(dp->plat_data->panel))
- DRM_ERROR("failed to detach the panel\n");
+ drm_panel_detach(dp->plat_data->panel);
}
drm_dp_aux_unregister(&dp->aux);
diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c
index 8ef6539ae78a..7aa789c35882 100644
--- a/drivers/gpu/drm/bridge/dumb-vga-dac.c
+++ b/drivers/gpu/drm/bridge/dumb-vga-dac.c
@@ -42,7 +42,7 @@ static int dumb_vga_get_modes(struct drm_connector *connector)
struct edid *edid;
int ret;
- if (IS_ERR(vga->ddc))
+ if (!vga->ddc)
goto fallback;
edid = drm_get_edid(connector, vga->ddc);
@@ -84,7 +84,7 @@ dumb_vga_connector_detect(struct drm_connector *connector, bool force)
* wire the DDC pins, or the I2C bus might not be working at
* all.
*/
- if (!IS_ERR(vga->ddc) && drm_probe_ddc(vga->ddc))
+ if (vga->ddc && drm_probe_ddc(vga->ddc))
return connector_status_connected;
return connector_status_unknown;
@@ -197,6 +197,7 @@ static int dumb_vga_probe(struct platform_device *pdev)
if (PTR_ERR(vga->ddc) == -ENODEV) {
dev_dbg(&pdev->dev,
"No i2c bus specified. Disabling EDID readout\n");
+ vga->ddc = NULL;
} else {
dev_err(&pdev->dev, "Couldn't retrieve i2c bus\n");
return PTR_ERR(vga->ddc);
@@ -218,7 +219,7 @@ static int dumb_vga_remove(struct platform_device *pdev)
drm_bridge_remove(&vga->bridge);
- if (!IS_ERR(vga->ddc))
+ if (vga->ddc)
i2c_put_adapter(vga->ddc);
return 0;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
index a494186ae6ce..2b7539701b42 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
@@ -63,10 +63,6 @@ enum {
HDMI_REVISION_ID = 0x0001,
HDMI_IH_AHBDMAAUD_STAT0 = 0x0109,
HDMI_IH_MUTE_AHBDMAAUD_STAT0 = 0x0189,
- HDMI_FC_AUDICONF2 = 0x1027,
- HDMI_FC_AUDSCONF = 0x1063,
- HDMI_FC_AUDSCONF_LAYOUT1 = 1 << 0,
- HDMI_FC_AUDSCONF_LAYOUT0 = 0 << 0,
HDMI_AHB_DMA_CONF0 = 0x3600,
HDMI_AHB_DMA_START = 0x3601,
HDMI_AHB_DMA_STOP = 0x3602,
@@ -403,7 +399,7 @@ static int dw_hdmi_prepare(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_dw_hdmi *dw = substream->private_data;
- u8 threshold, conf0, conf1, layout, ca;
+ u8 threshold, conf0, conf1, ca;
/* Setup as per 3.0.5 FSL 4.1.0 BSP */
switch (dw->revision) {
@@ -434,20 +430,12 @@ static int dw_hdmi_prepare(struct snd_pcm_substream *substream)
conf1 = default_hdmi_channel_config[runtime->channels - 2].conf1;
ca = default_hdmi_channel_config[runtime->channels - 2].ca;
- /*
- * For >2 channel PCM audio, we need to select layout 1
- * and set an appropriate channel map.
- */
- if (runtime->channels > 2)
- layout = HDMI_FC_AUDSCONF_LAYOUT1;
- else
- layout = HDMI_FC_AUDSCONF_LAYOUT0;
-
writeb_relaxed(threshold, dw->data.base + HDMI_AHB_DMA_THRSLD);
writeb_relaxed(conf0, dw->data.base + HDMI_AHB_DMA_CONF0);
writeb_relaxed(conf1, dw->data.base + HDMI_AHB_DMA_CONF1);
- writeb_relaxed(layout, dw->data.base + HDMI_FC_AUDSCONF);
- writeb_relaxed(ca, dw->data.base + HDMI_FC_AUDICONF2);
+
+ dw_hdmi_set_channel_count(dw->data.hdmi, runtime->channels);
+ dw_hdmi_set_channel_allocation(dw->data.hdmi, ca);
switch (runtime->format) {
case SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE:
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h
index 63b5756f463b..cb07dc0da5a7 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h
@@ -14,6 +14,7 @@ struct dw_hdmi_audio_data {
struct dw_hdmi_i2s_audio_data {
struct dw_hdmi *hdmi;
+ u8 *eld;
void (*write)(struct dw_hdmi *hdmi, u8 val, int offset);
u8 (*read)(struct dw_hdmi *hdmi, int offset);
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
index 0f949978d3fc..ac1e001d0882 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
@@ -256,8 +256,8 @@ static int dw_hdmi_cec_probe(struct platform_device *pdev)
dw_hdmi_write(cec, 0, HDMI_CEC_POLARITY);
cec->adap = cec_allocate_adapter(&dw_hdmi_cec_ops, cec, "dw_hdmi",
- CEC_CAP_LOG_ADDRS | CEC_CAP_TRANSMIT |
- CEC_CAP_RC | CEC_CAP_PASSTHROUGH,
+ CEC_CAP_DEFAULTS |
+ CEC_CAP_CONNECTOR_INFO,
CEC_MAX_LOG_ADDRS);
if (IS_ERR(cec->adap))
return PTR_ERR(cec->adap);
@@ -278,13 +278,14 @@ static int dw_hdmi_cec_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- cec->notify = cec_notifier_get(pdev->dev.parent);
+ cec->notify = cec_notifier_cec_adap_register(pdev->dev.parent,
+ NULL, cec->adap);
if (!cec->notify)
return -ENOMEM;
ret = cec_register_adapter(cec->adap, pdev->dev.parent);
if (ret < 0) {
- cec_notifier_put(cec->notify);
+ cec_notifier_cec_adap_unregister(cec->notify);
return ret;
}
@@ -294,8 +295,6 @@ static int dw_hdmi_cec_probe(struct platform_device *pdev)
*/
devm_remove_action(&pdev->dev, dw_hdmi_cec_del, cec);
- cec_register_cec_notifier(cec->adap, cec->notify);
-
return 0;
}
@@ -303,8 +302,8 @@ static int dw_hdmi_cec_remove(struct platform_device *pdev)
{
struct dw_hdmi_cec *cec = platform_get_drvdata(pdev);
+ cec_notifier_cec_adap_unregister(cec->notify);
cec_unregister_adapter(cec->adap);
- cec_notifier_put(cec->notify);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
index 5cbb71a866d5..1d15cf9b6821 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <drm/bridge/dw_hdmi.h>
+#include <drm/drm_crtc.h>
#include <sound/hdmi-codec.h>
@@ -44,14 +45,30 @@ static int dw_hdmi_i2s_hw_params(struct device *dev, void *data,
u8 inputclkfs = 0;
/* it cares I2S only */
- if ((fmt->fmt != HDMI_I2S) ||
- (fmt->bit_clk_master | fmt->frame_clk_master)) {
- dev_err(dev, "unsupported format/settings\n");
+ if (fmt->bit_clk_master | fmt->frame_clk_master) {
+ dev_err(dev, "unsupported clock settings\n");
return -EINVAL;
}
+ /* Reset the FIFOs before applying new params */
+ hdmi_write(audio, HDMI_AUD_CONF0_SW_RESET, HDMI_AUD_CONF0);
+ hdmi_write(audio, (u8)~HDMI_MC_SWRSTZ_I2SSWRST_REQ, HDMI_MC_SWRSTZ);
+
inputclkfs = HDMI_AUD_INPUTCLKFS_64FS;
- conf0 = HDMI_AUD_CONF0_I2S_ALL_ENABLE;
+ conf0 = (HDMI_AUD_CONF0_I2S_SELECT | HDMI_AUD_CONF0_I2S_EN0);
+
+ /* Enable the required i2s lanes */
+ switch (hparms->channels) {
+ case 7 ... 8:
+ conf0 |= HDMI_AUD_CONF0_I2S_EN3;
+ /* Fall-thru */
+ case 5 ... 6:
+ conf0 |= HDMI_AUD_CONF0_I2S_EN2;
+ /* Fall-thru */
+ case 3 ... 4:
+ conf0 |= HDMI_AUD_CONF0_I2S_EN1;
+ /* Fall-thru */
+ }
switch (hparms->sample_width) {
case 16:
@@ -63,7 +80,30 @@ static int dw_hdmi_i2s_hw_params(struct device *dev, void *data,
break;
}
+ switch (fmt->fmt) {
+ case HDMI_I2S:
+ conf1 |= HDMI_AUD_CONF1_MODE_I2S;
+ break;
+ case HDMI_RIGHT_J:
+ conf1 |= HDMI_AUD_CONF1_MODE_RIGHT_J;
+ break;
+ case HDMI_LEFT_J:
+ conf1 |= HDMI_AUD_CONF1_MODE_LEFT_J;
+ break;
+ case HDMI_DSP_A:
+ conf1 |= HDMI_AUD_CONF1_MODE_BURST_1;
+ break;
+ case HDMI_DSP_B:
+ conf1 |= HDMI_AUD_CONF1_MODE_BURST_2;
+ break;
+ default:
+ dev_err(dev, "unsupported format\n");
+ return -EINVAL;
+ }
+
dw_hdmi_set_sample_rate(hdmi, hparms->sample_rate);
+ dw_hdmi_set_channel_count(hdmi, hparms->channels);
+ dw_hdmi_set_channel_allocation(hdmi, hparms->cea.channel_allocation);
hdmi_write(audio, inputclkfs, HDMI_AUD_INPUTCLKFS);
hdmi_write(audio, conf0, HDMI_AUD_CONF0);
@@ -80,8 +120,15 @@ static void dw_hdmi_i2s_audio_shutdown(struct device *dev, void *data)
struct dw_hdmi *hdmi = audio->hdmi;
dw_hdmi_audio_disable(hdmi);
+}
- hdmi_write(audio, HDMI_AUD_CONF0_SW_RESET, HDMI_AUD_CONF0);
+static int dw_hdmi_i2s_get_eld(struct device *dev, void *data, uint8_t *buf,
+ size_t len)
+{
+ struct dw_hdmi_i2s_audio_data *audio = data;
+
+ memcpy(buf, audio->eld, min_t(size_t, MAX_ELD_BYTES, len));
+ return 0;
}
static int dw_hdmi_i2s_get_dai_id(struct snd_soc_component *component,
@@ -107,6 +154,7 @@ static int dw_hdmi_i2s_get_dai_id(struct snd_soc_component *component,
static struct hdmi_codec_ops dw_hdmi_i2s_ops = {
.hw_params = dw_hdmi_i2s_hw_params,
.audio_shutdown = dw_hdmi_i2s_audio_shutdown,
+ .get_eld = dw_hdmi_i2s_get_eld,
.get_dai_id = dw_hdmi_i2s_get_dai_id,
};
@@ -119,7 +167,7 @@ static int snd_dw_hdmi_probe(struct platform_device *pdev)
pdata.ops = &dw_hdmi_i2s_ops;
pdata.i2s = 1;
- pdata.max_i2s_channels = 6;
+ pdata.max_i2s_channels = 8;
pdata.data = audio;
memset(&pdevinfo, 0, sizeof(pdevinfo));
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 83b94b66e464..521d689413c8 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -27,7 +27,6 @@
#include <drm/bridge/dw_hdmi.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
-#include <drm/drm_encoder_slave.h>
#include <drm/drm_of.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -190,6 +189,7 @@ struct dw_hdmi {
void (*enable_audio)(struct dw_hdmi *hdmi);
void (*disable_audio)(struct dw_hdmi *hdmi);
+ struct mutex cec_notifier_mutex;
struct cec_notifier *cec_notifier;
};
@@ -645,6 +645,42 @@ void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate)
}
EXPORT_SYMBOL_GPL(dw_hdmi_set_sample_rate);
+void dw_hdmi_set_channel_count(struct dw_hdmi *hdmi, unsigned int cnt)
+{
+ u8 layout;
+
+ mutex_lock(&hdmi->audio_mutex);
+
+ /*
+ * For >2 channel PCM audio, we need to select layout 1
+ * and set an appropriate channel map.
+ */
+ if (cnt > 2)
+ layout = HDMI_FC_AUDSCONF_AUD_PACKET_LAYOUT_LAYOUT1;
+ else
+ layout = HDMI_FC_AUDSCONF_AUD_PACKET_LAYOUT_LAYOUT0;
+
+ hdmi_modb(hdmi, layout, HDMI_FC_AUDSCONF_AUD_PACKET_LAYOUT_MASK,
+ HDMI_FC_AUDSCONF);
+
+ /* Set the audio infoframes channel count */
+ hdmi_modb(hdmi, (cnt - 1) << HDMI_FC_AUDICONF0_CC_OFFSET,
+ HDMI_FC_AUDICONF0_CC_MASK, HDMI_FC_AUDICONF0);
+
+ mutex_unlock(&hdmi->audio_mutex);
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_set_channel_count);
+
+void dw_hdmi_set_channel_allocation(struct dw_hdmi *hdmi, unsigned int ca)
+{
+ mutex_lock(&hdmi->audio_mutex);
+
+ hdmi_writeb(hdmi, ca, HDMI_FC_AUDICONF2);
+
+ mutex_unlock(&hdmi->audio_mutex);
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_set_channel_allocation);
+
static void hdmi_enable_audio_clk(struct dw_hdmi *hdmi, bool enable)
{
if (enable)
@@ -2194,6 +2230,8 @@ static int dw_hdmi_bridge_attach(struct drm_bridge *bridge)
struct dw_hdmi *hdmi = bridge->driver_private;
struct drm_encoder *encoder = bridge->encoder;
struct drm_connector *connector = &hdmi->connector;
+ struct cec_connector_info conn_info;
+ struct cec_notifier *notifier;
connector->interlace_allowed = 1;
connector->polled = DRM_CONNECTOR_POLL_HPD;
@@ -2207,9 +2245,29 @@ static int dw_hdmi_bridge_attach(struct drm_bridge *bridge)
drm_connector_attach_encoder(connector, encoder);
+ cec_fill_conn_info_from_drm(&conn_info, connector);
+
+ notifier = cec_notifier_conn_register(hdmi->dev, NULL, &conn_info);
+ if (!notifier)
+ return -ENOMEM;
+
+ mutex_lock(&hdmi->cec_notifier_mutex);
+ hdmi->cec_notifier = notifier;
+ mutex_unlock(&hdmi->cec_notifier_mutex);
+
return 0;
}
+static void dw_hdmi_bridge_detach(struct drm_bridge *bridge)
+{
+ struct dw_hdmi *hdmi = bridge->driver_private;
+
+ mutex_lock(&hdmi->cec_notifier_mutex);
+ cec_notifier_conn_unregister(hdmi->cec_notifier);
+ hdmi->cec_notifier = NULL;
+ mutex_unlock(&hdmi->cec_notifier_mutex);
+}
+
static enum drm_mode_status
dw_hdmi_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_mode *mode)
@@ -2266,6 +2324,7 @@ static void dw_hdmi_bridge_enable(struct drm_bridge *bridge)
static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = {
.attach = dw_hdmi_bridge_attach,
+ .detach = dw_hdmi_bridge_detach,
.enable = dw_hdmi_bridge_enable,
.disable = dw_hdmi_bridge_disable,
.mode_set = dw_hdmi_bridge_mode_set,
@@ -2373,9 +2432,11 @@ static irqreturn_t dw_hdmi_irq(int irq, void *dev_id)
phy_stat & HDMI_PHY_HPD,
phy_stat & HDMI_PHY_RX_SENSE);
- if ((phy_stat & (HDMI_PHY_RX_SENSE | HDMI_PHY_HPD)) == 0)
- cec_notifier_set_phys_addr(hdmi->cec_notifier,
- CEC_PHYS_ADDR_INVALID);
+ if ((phy_stat & (HDMI_PHY_RX_SENSE | HDMI_PHY_HPD)) == 0) {
+ mutex_lock(&hdmi->cec_notifier_mutex);
+ cec_notifier_phys_addr_invalidate(hdmi->cec_notifier);
+ mutex_unlock(&hdmi->cec_notifier_mutex);
+ }
}
if (intr_stat & HDMI_IH_PHY_STAT0_HPD) {
@@ -2561,6 +2622,7 @@ __dw_hdmi_probe(struct platform_device *pdev,
mutex_init(&hdmi->mutex);
mutex_init(&hdmi->audio_mutex);
+ mutex_init(&hdmi->cec_notifier_mutex);
spin_lock_init(&hdmi->audio_lock);
ddc_node = of_parse_phandle(np, "ddc-i2c-bus", 0);
@@ -2693,12 +2755,6 @@ __dw_hdmi_probe(struct platform_device *pdev,
if (ret)
goto err_iahb;
- hdmi->cec_notifier = cec_notifier_get(dev);
- if (!hdmi->cec_notifier) {
- ret = -ENOMEM;
- goto err_iahb;
- }
-
/*
* To prevent overflows in HDMI_IH_FC_STAT2, set the clk regenerator
* N and cts values before enabling phy
@@ -2763,6 +2819,7 @@ __dw_hdmi_probe(struct platform_device *pdev,
struct dw_hdmi_i2s_audio_data audio;
audio.hdmi = hdmi;
+ audio.eld = hdmi->connector.eld;
audio.write = hdmi_writeb;
audio.read = hdmi_readb;
hdmi->enable_audio = dw_hdmi_i2s_audio_enable;
@@ -2796,9 +2853,6 @@ err_iahb:
hdmi->ddc = NULL;
}
- if (hdmi->cec_notifier)
- cec_notifier_put(hdmi->cec_notifier);
-
clk_disable_unprepare(hdmi->iahb_clk);
if (hdmi->cec_clk)
clk_disable_unprepare(hdmi->cec_clk);
@@ -2820,9 +2874,6 @@ static void __dw_hdmi_remove(struct dw_hdmi *hdmi)
/* Disable all interrupts */
hdmi_writeb(hdmi, ~0, HDMI_IH_MUTE_PHY_STAT0);
- if (hdmi->cec_notifier)
- cec_notifier_put(hdmi->cec_notifier);
-
clk_disable_unprepare(hdmi->iahb_clk);
clk_disable_unprepare(hdmi->isfr_clk);
if (hdmi->cec_clk)
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h
index 4e3ec09d3ca4..6988f12d89d9 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h
@@ -865,12 +865,18 @@ enum {
/* AUD_CONF0 field values */
HDMI_AUD_CONF0_SW_RESET = 0x80,
- HDMI_AUD_CONF0_I2S_ALL_ENABLE = 0x2F,
+ HDMI_AUD_CONF0_I2S_SELECT = 0x20,
+ HDMI_AUD_CONF0_I2S_EN3 = 0x08,
+ HDMI_AUD_CONF0_I2S_EN2 = 0x04,
+ HDMI_AUD_CONF0_I2S_EN1 = 0x02,
+ HDMI_AUD_CONF0_I2S_EN0 = 0x01,
/* AUD_CONF1 field values */
HDMI_AUD_CONF1_MODE_I2S = 0x00,
- HDMI_AUD_CONF1_MODE_RIGHT_J = 0x02,
- HDMI_AUD_CONF1_MODE_LEFT_J = 0x04,
+ HDMI_AUD_CONF1_MODE_RIGHT_J = 0x20,
+ HDMI_AUD_CONF1_MODE_LEFT_J = 0x40,
+ HDMI_AUD_CONF1_MODE_BURST_1 = 0x60,
+ HDMI_AUD_CONF1_MODE_BURST_2 = 0x80,
HDMI_AUD_CONF1_WIDTH_16 = 0x10,
HDMI_AUD_CONF1_WIDTH_24 = 0x18,
@@ -938,6 +944,7 @@ enum {
HDMI_MC_CLKDIS_PIXELCLK_DISABLE = 0x1,
/* MC_SWRSTZ field values */
+ HDMI_MC_SWRSTZ_I2SSWRST_REQ = 0x08,
HDMI_MC_SWRSTZ_TMDSSWRST_REQ = 0x02,
/* MC_FLOWCTRL field values */
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 42f03a985ac0..cebc8e620820 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -1312,7 +1312,7 @@ static int tc_connector_get_modes(struct drm_connector *connector)
{
struct tc_data *tc = connector_to_tc(connector);
struct edid *edid;
- unsigned int count;
+ int count;
int ret;
ret = tc_get_display_props(tc);
@@ -1321,11 +1321,9 @@ static int tc_connector_get_modes(struct drm_connector *connector)
return 0;
}
- if (tc->panel && tc->panel->funcs && tc->panel->funcs->get_modes) {
- count = tc->panel->funcs->get_modes(tc->panel);
- if (count > 0)
- return count;
- }
+ count = drm_panel_get_modes(tc->panel);
+ if (count > 0)
+ return count;
edid = drm_get_edid(connector, &tc->aux.ddc);
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index 19ae119f1a5d..5a5b42db6f2a 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -1037,7 +1037,7 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
* As a contrast, with implicit fencing the kernel keeps track of any
* ongoing rendering, and automatically ensures that the atomic update waits
* for any pending rendering to complete. For shared buffers represented with
- * a &struct dma_buf this is tracked in &struct reservation_object.
+ * a &struct dma_buf this is tracked in &struct dma_resv.
* Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org),
* whereas explicit fencing is what Android wants.
*
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 354798bad576..4c766624b20d 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -986,12 +986,14 @@ static const struct drm_prop_enum_list hdmi_colorspaces[] = {
* - Kernel sends uevent with the connector id and property id through
* @drm_hdcp_update_content_protection, upon below kernel triggered
* scenarios:
- * DESIRED -> ENABLED (authentication success)
- * ENABLED -> DESIRED (termination of authentication)
+ *
+ * - DESIRED -> ENABLED (authentication success)
+ * - ENABLED -> DESIRED (termination of authentication)
* - Please note no uevents for userspace triggered property state changes,
* which can't fail such as
- * DESIRED/ENABLED -> UNDESIRED
- * UNDESIRED -> DESIRED
+ *
+ * - DESIRED/ENABLED -> UNDESIRED
+ * - UNDESIRED -> DESIRED
* - Userspace is responsible for polling the property or listen to uevents
* to determine when the value transitions from ENABLED to DESIRED.
* This signifies the link is no longer protected and userspace should
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 6dd49a60deac..80ddf13ad996 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -159,14 +159,10 @@ drm_encoder_disable(struct drm_encoder *encoder)
if (!encoder_funcs)
return;
- drm_bridge_disable(encoder->bridge);
-
if (encoder_funcs->disable)
(*encoder_funcs->disable)(encoder);
else if (encoder_funcs->dpms)
(*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
-
- drm_bridge_post_disable(encoder->bridge);
}
static void __drm_helper_disable_unused_functions(struct drm_device *dev)
@@ -326,13 +322,6 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (!encoder_funcs)
continue;
- ret = drm_bridge_mode_fixup(encoder->bridge,
- mode, adjusted_mode);
- if (!ret) {
- DRM_DEBUG_KMS("Bridge fixup failed\n");
- goto done;
- }
-
encoder_funcs = encoder->helper_private;
if (encoder_funcs->mode_fixup) {
if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
@@ -364,13 +353,9 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (!encoder_funcs)
continue;
- drm_bridge_disable(encoder->bridge);
-
/* Disable the encoders as the first thing we do. */
if (encoder_funcs->prepare)
encoder_funcs->prepare(encoder);
-
- drm_bridge_post_disable(encoder->bridge);
}
drm_crtc_prepare_encoders(dev);
@@ -397,8 +382,6 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
encoder->base.id, encoder->name, mode->name);
if (encoder_funcs->mode_set)
encoder_funcs->mode_set(encoder, mode, adjusted_mode);
-
- drm_bridge_mode_set(encoder->bridge, mode, adjusted_mode);
}
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
@@ -413,12 +396,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (!encoder_funcs)
continue;
- drm_bridge_pre_enable(encoder->bridge);
-
if (encoder_funcs->commit)
encoder_funcs->commit(encoder);
-
- drm_bridge_enable(encoder->bridge);
}
/* Calculate and store various constants which
@@ -817,25 +796,14 @@ static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
/* Helper which handles bridge ordering around encoder dpms */
static void drm_helper_encoder_dpms(struct drm_encoder *encoder, int mode)
{
- struct drm_bridge *bridge = encoder->bridge;
const struct drm_encoder_helper_funcs *encoder_funcs;
encoder_funcs = encoder->helper_private;
if (!encoder_funcs)
return;
- if (mode == DRM_MODE_DPMS_ON)
- drm_bridge_pre_enable(bridge);
- else
- drm_bridge_disable(bridge);
-
if (encoder_funcs->dpms)
encoder_funcs->dpms(encoder, mode);
-
- if (mode == DRM_MODE_DPMS_ON)
- drm_bridge_enable(bridge);
- else
- drm_bridge_post_disable(bridge);
}
static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index e652305d8f98..c456c3d3def2 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -328,11 +328,9 @@ void drm_minor_release(struct drm_minor *minor)
* struct drm_device *drm;
* int ret;
*
- * [
- * devm_kzalloc() can't be used here because the drm_device
- * lifetime can exceed the device lifetime if driver unbind
- * happens when userspace still has open file descriptors.
- * ]
+ * // devm_kzalloc() can't be used here because the drm_device '
+ * // lifetime can exceed the device lifetime if driver unbind
+ * // happens when userspace still has open file descriptors.
* priv = kzalloc(sizeof(*priv), GFP_KERNEL);
* if (!priv)
* return -ENOMEM;
@@ -355,7 +353,7 @@ void drm_minor_release(struct drm_minor *minor)
* if (IS_ERR(priv->pclk))
* return PTR_ERR(priv->pclk);
*
- * [ Further setup, display pipeline etc ]
+ * // Further setup, display pipeline etc
*
* platform_set_drvdata(pdev, drm);
*
@@ -370,7 +368,7 @@ void drm_minor_release(struct drm_minor *minor)
* return 0;
* }
*
- * [ This function is called before the devm_ resources are released ]
+ * // This function is called before the devm_ resources are released
* static int driver_remove(struct platform_device *pdev)
* {
* struct drm_device *drm = platform_get_drvdata(pdev);
@@ -381,7 +379,7 @@ void drm_minor_release(struct drm_minor *minor)
* return 0;
* }
*
- * [ This function is called on kernel restart and shutdown ]
+ * // This function is called on kernel restart and shutdown
* static void driver_shutdown(struct platform_device *pdev)
* {
* drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index afc38cece3f5..6854f5867d51 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -159,7 +159,7 @@ void drm_gem_private_object_init(struct drm_device *dev,
kref_init(&obj->refcount);
obj->handle_count = 0;
obj->size = size;
- reservation_object_init(&obj->_resv);
+ dma_resv_init(&obj->_resv);
if (!obj->resv)
obj->resv = &obj->_resv;
@@ -633,6 +633,9 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
pagevec_init(&pvec);
for (i = 0; i < npages; i++) {
+ if (!pages[i])
+ continue;
+
if (dirty)
set_page_dirty(pages[i]);
@@ -752,7 +755,7 @@ drm_gem_object_lookup(struct drm_file *filp, u32 handle)
EXPORT_SYMBOL(drm_gem_object_lookup);
/**
- * drm_gem_reservation_object_wait - Wait on GEM object's reservation's objects
+ * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects
* shared and/or exclusive fences.
* @filep: DRM file private date
* @handle: userspace handle
@@ -764,7 +767,7 @@ EXPORT_SYMBOL(drm_gem_object_lookup);
* Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
* greater than 0 on success.
*/
-long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle,
+long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
bool wait_all, unsigned long timeout)
{
long ret;
@@ -776,7 +779,7 @@ long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle,
return -EINVAL;
}
- ret = reservation_object_wait_timeout_rcu(obj->resv, wait_all,
+ ret = dma_resv_wait_timeout_rcu(obj->resv, wait_all,
true, timeout);
if (ret == 0)
ret = -ETIME;
@@ -787,7 +790,7 @@ long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle,
return ret;
}
-EXPORT_SYMBOL(drm_gem_reservation_object_wait);
+EXPORT_SYMBOL(drm_gem_dma_resv_wait);
/**
* drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
@@ -953,7 +956,7 @@ drm_gem_object_release(struct drm_gem_object *obj)
if (obj->filp)
fput(obj->filp);
- reservation_object_fini(&obj->_resv);
+ dma_resv_fini(&obj->_resv);
drm_gem_free_mmap_offset(obj);
}
EXPORT_SYMBOL(drm_gem_object_release);
@@ -1288,7 +1291,7 @@ retry:
if (contended != -1) {
struct drm_gem_object *obj = objs[contended];
- ret = reservation_object_lock_slow_interruptible(obj->resv,
+ ret = dma_resv_lock_slow_interruptible(obj->resv,
acquire_ctx);
if (ret) {
ww_acquire_done(acquire_ctx);
@@ -1300,16 +1303,16 @@ retry:
if (i == contended)
continue;
- ret = reservation_object_lock_interruptible(objs[i]->resv,
+ ret = dma_resv_lock_interruptible(objs[i]->resv,
acquire_ctx);
if (ret) {
int j;
for (j = 0; j < i; j++)
- reservation_object_unlock(objs[j]->resv);
+ dma_resv_unlock(objs[j]->resv);
if (contended != -1 && contended >= i)
- reservation_object_unlock(objs[contended]->resv);
+ dma_resv_unlock(objs[contended]->resv);
if (ret == -EDEADLK) {
contended = i;
@@ -1334,7 +1337,7 @@ drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
int i;
for (i = 0; i < count; i++)
- reservation_object_unlock(objs[i]->resv);
+ dma_resv_unlock(objs[i]->resv);
ww_acquire_fini(acquire_ctx);
}
@@ -1410,12 +1413,12 @@ int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
if (!write) {
struct dma_fence *fence =
- reservation_object_get_excl_rcu(obj->resv);
+ dma_resv_get_excl_rcu(obj->resv);
return drm_gem_fence_array_add(fence_array, fence);
}
- ret = reservation_object_get_fences_rcu(obj->resv, NULL,
+ ret = dma_resv_get_fences_rcu(obj->resv, NULL,
&fence_count, &fences);
if (ret || !fence_count)
return ret;
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index f61304054786..b9bcd310ca2d 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -7,7 +7,7 @@
#include <linux/dma-buf.h>
#include <linux/dma-fence.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
#include <linux/slab.h>
#include <drm/drm_atomic.h>
@@ -294,7 +294,7 @@ int drm_gem_fb_prepare_fb(struct drm_plane *plane,
return 0;
obj = drm_gem_fb_get_obj(state->fb, 0);
- fence = reservation_object_get_excl_rcu(obj->resv);
+ fence = dma_resv_get_excl_rcu(obj->resv);
drm_atomic_set_fence_for_plane(state, fence);
return 0;
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 2f64667ac805..df8f2c8adb2b 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -75,6 +75,7 @@ struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t
shmem = to_drm_gem_shmem_obj(obj);
mutex_init(&shmem->pages_lock);
mutex_init(&shmem->vmap_lock);
+ INIT_LIST_HEAD(&shmem->madv_list);
/*
* Our buffers are kept pinned, so allocating them
@@ -118,11 +119,11 @@ void drm_gem_shmem_free_object(struct drm_gem_object *obj)
if (shmem->sgt) {
dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl,
shmem->sgt->nents, DMA_BIDIRECTIONAL);
-
- drm_gem_shmem_put_pages(shmem);
sg_free_table(shmem->sgt);
kfree(shmem->sgt);
}
+ if (shmem->pages)
+ drm_gem_shmem_put_pages(shmem);
}
WARN_ON(shmem->pages_use_count);
@@ -362,6 +363,62 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
}
EXPORT_SYMBOL(drm_gem_shmem_create_with_handle);
+/* Update madvise status, returns true if not purged, else
+ * false or -errno.
+ */
+int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ mutex_lock(&shmem->pages_lock);
+
+ if (shmem->madv >= 0)
+ shmem->madv = madv;
+
+ madv = shmem->madv;
+
+ mutex_unlock(&shmem->pages_lock);
+
+ return (madv >= 0);
+}
+EXPORT_SYMBOL(drm_gem_shmem_madvise);
+
+void drm_gem_shmem_purge_locked(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
+
+ drm_gem_shmem_put_pages_locked(shmem);
+
+ shmem->madv = -1;
+
+ drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
+ drm_gem_free_mmap_offset(obj);
+
+ /* Our goal here is to return as much of the memory as
+ * is possible back to the system as we are called from OOM.
+ * To do this we must instruct the shmfs to drop all of its
+ * backing pages, *now*.
+ */
+ shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
+
+ invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
+ 0, (loff_t)-1);
+}
+EXPORT_SYMBOL(drm_gem_shmem_purge_locked);
+
+void drm_gem_shmem_purge(struct drm_gem_object *obj)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ mutex_lock(&shmem->pages_lock);
+ drm_gem_shmem_purge_locked(obj);
+ mutex_unlock(&shmem->pages_lock);
+}
+EXPORT_SYMBOL(drm_gem_shmem_purge);
+
/**
* drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
* @file: DRM file structure to create the dumb buffer for
diff --git a/drivers/gpu/drm/drm_kms_helper_common.c b/drivers/gpu/drm/drm_kms_helper_common.c
index d9a5ac81949e..221a8528c993 100644
--- a/drivers/gpu/drm/drm_kms_helper_common.c
+++ b/drivers/gpu/drm/drm_kms_helper_common.c
@@ -40,7 +40,7 @@ MODULE_LICENSE("GPL and additional rights");
/* Backward compatibility for drm_kms_helper.edid_firmware */
static int edid_firmware_set(const char *val, const struct kernel_param *kp)
{
- DRM_NOTE("drm_kms_firmware.edid_firmware is deprecated, please use drm.edid_firmware instead.\n");
+ DRM_NOTE("drm_kms_helper.edid_firmware is deprecated, please use drm.edid_firmware instead.\n");
return __drm_set_edid_firmware_path(val);
}
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index dbd5b873e8f2..6b0bf42039cf 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -123,18 +123,110 @@ EXPORT_SYMBOL(drm_panel_attach);
*
* This function should not be called by the panel device itself. It
* is only for the drm device that called drm_panel_attach().
- *
- * Return: 0 on success or a negative error code on failure.
*/
-int drm_panel_detach(struct drm_panel *panel)
+void drm_panel_detach(struct drm_panel *panel)
{
panel->connector = NULL;
panel->drm = NULL;
-
- return 0;
}
EXPORT_SYMBOL(drm_panel_detach);
+/**
+ * drm_panel_prepare - power on a panel
+ * @panel: DRM panel
+ *
+ * Calling this function will enable power and deassert any reset signals to
+ * the panel. After this has completed it is possible to communicate with any
+ * integrated circuitry via a command bus.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int drm_panel_prepare(struct drm_panel *panel)
+{
+ if (panel && panel->funcs && panel->funcs->prepare)
+ return panel->funcs->prepare(panel);
+
+ return panel ? -ENOSYS : -EINVAL;
+}
+EXPORT_SYMBOL(drm_panel_prepare);
+
+/**
+ * drm_panel_unprepare - power off a panel
+ * @panel: DRM panel
+ *
+ * Calling this function will completely power off a panel (assert the panel's
+ * reset, turn off power supplies, ...). After this function has completed, it
+ * is usually no longer possible to communicate with the panel until another
+ * call to drm_panel_prepare().
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int drm_panel_unprepare(struct drm_panel *panel)
+{
+ if (panel && panel->funcs && panel->funcs->unprepare)
+ return panel->funcs->unprepare(panel);
+
+ return panel ? -ENOSYS : -EINVAL;
+}
+EXPORT_SYMBOL(drm_panel_unprepare);
+
+/**
+ * drm_panel_enable - enable a panel
+ * @panel: DRM panel
+ *
+ * Calling this function will cause the panel display drivers to be turned on
+ * and the backlight to be enabled. Content will be visible on screen after
+ * this call completes.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int drm_panel_enable(struct drm_panel *panel)
+{
+ if (panel && panel->funcs && panel->funcs->enable)
+ return panel->funcs->enable(panel);
+
+ return panel ? -ENOSYS : -EINVAL;
+}
+EXPORT_SYMBOL(drm_panel_enable);
+
+/**
+ * drm_panel_disable - disable a panel
+ * @panel: DRM panel
+ *
+ * This will typically turn off the panel's backlight or disable the display
+ * drivers. For smart panels it should still be possible to communicate with
+ * the integrated circuitry via any command bus after this call.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int drm_panel_disable(struct drm_panel *panel)
+{
+ if (panel && panel->funcs && panel->funcs->disable)
+ return panel->funcs->disable(panel);
+
+ return panel ? -ENOSYS : -EINVAL;
+}
+EXPORT_SYMBOL(drm_panel_disable);
+
+/**
+ * drm_panel_get_modes - probe the available display modes of a panel
+ * @panel: DRM panel
+ *
+ * The modes probed from the panel are automatically added to the connector
+ * that the panel is attached to.
+ *
+ * Return: The number of modes available from the panel on success or a
+ * negative error code on failure.
+ */
+int drm_panel_get_modes(struct drm_panel *panel)
+{
+ if (panel && panel->funcs && panel->funcs->get_modes)
+ return panel->funcs->get_modes(panel);
+
+ return panel ? -ENOSYS : -EINVAL;
+}
+EXPORT_SYMBOL(drm_panel_get_modes);
+
#ifdef CONFIG_OF
/**
* of_drm_find_panel - look up a panel using a device tree node
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 1438dcb3ebb1..4b5c7b0ed714 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -29,21 +29,97 @@
/**
* DOC: Overview
*
- * DRM synchronisation objects (syncobj, see struct &drm_syncobj) are
- * persistent objects that contain an optional fence. The fence can be updated
- * with a new fence, or be NULL.
+ * DRM synchronisation objects (syncobj, see struct &drm_syncobj) provide a
+ * container for a synchronization primitive which can be used by userspace
+ * to explicitly synchronize GPU commands, can be shared between userspace
+ * processes, and can be shared between different DRM drivers.
+ * Their primary use-case is to implement Vulkan fences and semaphores.
+ * The syncobj userspace API provides ioctls for several operations:
*
- * syncobj's can be waited upon, where it will wait for the underlying
- * fence.
+ * - Creation and destruction of syncobjs
+ * - Import and export of syncobjs to/from a syncobj file descriptor
+ * - Import and export a syncobj's underlying fence to/from a sync file
+ * - Reset a syncobj (set its fence to NULL)
+ * - Signal a syncobj (set a trivially signaled fence)
+ * - Wait for a syncobj's fence to appear and be signaled
*
- * syncobj's can be export to fd's and back, these fd's are opaque and
- * have no other use case, except passing the syncobj between processes.
+ * At it's core, a syncobj is simply a wrapper around a pointer to a struct
+ * &dma_fence which may be NULL.
+ * When a syncobj is first created, its pointer is either NULL or a pointer
+ * to an already signaled fence depending on whether the
+ * &DRM_SYNCOBJ_CREATE_SIGNALED flag is passed to
+ * &DRM_IOCTL_SYNCOBJ_CREATE.
+ * When GPU work which signals a syncobj is enqueued in a DRM driver,
+ * the syncobj fence is replaced with a fence which will be signaled by the
+ * completion of that work.
+ * When GPU work which waits on a syncobj is enqueued in a DRM driver, the
+ * driver retrieves syncobj's current fence at the time the work is enqueued
+ * waits on that fence before submitting the work to hardware.
+ * If the syncobj's fence is NULL, the enqueue operation is expected to fail.
+ * All manipulation of the syncobjs's fence happens in terms of the current
+ * fence at the time the ioctl is called by userspace regardless of whether
+ * that operation is an immediate host-side operation (signal or reset) or
+ * or an operation which is enqueued in some driver queue.
+ * &DRM_IOCTL_SYNCOBJ_RESET and &DRM_IOCTL_SYNCOBJ_SIGNAL can be used to
+ * manipulate a syncobj from the host by resetting its pointer to NULL or
+ * setting its pointer to a fence which is already signaled.
*
- * Their primary use-case is to implement Vulkan fences and semaphores.
*
- * syncobj have a kref reference count, but also have an optional file.
- * The file is only created once the syncobj is exported.
- * The file takes a reference on the kref.
+ * Host-side wait on syncobjs
+ * --------------------------
+ *
+ * &DRM_IOCTL_SYNCOBJ_WAIT takes an array of syncobj handles and does a
+ * host-side wait on all of the syncobj fences simultaneously.
+ * If &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL is set, the wait ioctl will wait on
+ * all of the syncobj fences to be signaled before it returns.
+ * Otherwise, it returns once at least one syncobj fence has been signaled
+ * and the index of a signaled fence is written back to the client.
+ *
+ * Unlike the enqueued GPU work dependencies which fail if they see a NULL
+ * fence in a syncobj, if &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT is set,
+ * the host-side wait will first wait for the syncobj to receive a non-NULL
+ * fence and then wait on that fence.
+ * If &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT is not set and any one of the
+ * syncobjs in the array has a NULL fence, -EINVAL will be returned.
+ * Assuming the syncobj starts off with a NULL fence, this allows a client
+ * to do a host wait in one thread (or process) which waits on GPU work
+ * submitted in another thread (or process) without having to manually
+ * synchronize between the two.
+ * This requirement is inherited from the Vulkan fence API.
+ *
+ *
+ * Import/export of syncobjs
+ * -------------------------
+ *
+ * &DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE and &DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD
+ * provide two mechanisms for import/export of syncobjs.
+ *
+ * The first lets the client import or export an entire syncobj to a file
+ * descriptor.
+ * These fd's are opaque and have no other use case, except passing the
+ * syncobj between processes.
+ * All exported file descriptors and any syncobj handles created as a
+ * result of importing those file descriptors own a reference to the
+ * same underlying struct &drm_syncobj and the syncobj can be used
+ * persistently across all the processes with which it is shared.
+ * The syncobj is freed only once the last reference is dropped.
+ * Unlike dma-buf, importing a syncobj creates a new handle (with its own
+ * reference) for every import instead of de-duplicating.
+ * The primary use-case of this persistent import/export is for shared
+ * Vulkan fences and semaphores.
+ *
+ * The second import/export mechanism, which is indicated by
+ * &DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE or
+ * &DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE lets the client
+ * import/export the syncobj's current fence from/to a &sync_file.
+ * When a syncobj is exported to a sync file, that sync file wraps the
+ * sycnobj's fence at the time of export and any later signal or reset
+ * operations on the syncobj will not affect the exported sync file.
+ * When a sync file is imported into a syncobj, the syncobj's fence is set
+ * to the fence wrapped by that sync file.
+ * Because sync files are immutable, resetting or signaling the syncobj
+ * will not affect any sync files whose fences have been imported into the
+ * syncobj.
*/
#include <linux/anon_inodes.h>
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
index 160ce3c060a5..7e4e2959bf4f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -3,6 +3,8 @@
* Copyright (C) 2014-2018 Etnaviv Project
*/
+#include <drm/drm_drv.h>
+
#include "etnaviv_cmdbuf.h"
#include "etnaviv_gpu.h"
#include "etnaviv_gem.h"
@@ -116,7 +118,9 @@ static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu,
u32 *ptr = buf->vaddr + off;
dev_info(gpu->dev, "virt %p phys 0x%08x free 0x%08x\n",
- ptr, etnaviv_cmdbuf_get_va(buf) + off, size - len * 4 - off);
+ ptr, etnaviv_cmdbuf_get_va(buf,
+ &gpu->mmu_context->cmdbuf_mapping) +
+ off, size - len * 4 - off);
print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
ptr, len * 4, 0);
@@ -149,7 +153,9 @@ static u32 etnaviv_buffer_reserve(struct etnaviv_gpu *gpu,
if (buffer->user_size + cmd_dwords * sizeof(u64) > buffer->size)
buffer->user_size = 0;
- return etnaviv_cmdbuf_get_va(buffer) + buffer->user_size;
+ return etnaviv_cmdbuf_get_va(buffer,
+ &gpu->mmu_context->cmdbuf_mapping) +
+ buffer->user_size;
}
u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
@@ -162,8 +168,9 @@ u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
buffer->user_size = 0;
CMD_WAIT(buffer);
- CMD_LINK(buffer, 2, etnaviv_cmdbuf_get_va(buffer) +
- buffer->user_size - 4);
+ CMD_LINK(buffer, 2,
+ etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
+ + buffer->user_size - 4);
return buffer->user_size / 8;
}
@@ -203,7 +210,7 @@ u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe
return buffer->user_size / 8;
}
-u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu)
+u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu, unsigned short id)
{
struct etnaviv_cmdbuf *buffer = &gpu->buffer;
@@ -212,7 +219,7 @@ u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu)
buffer->user_size = 0;
CMD_LOAD_STATE(buffer, VIVS_MMUv2_PTA_CONFIG,
- VIVS_MMUv2_PTA_CONFIG_INDEX(0));
+ VIVS_MMUv2_PTA_CONFIG_INDEX(id));
CMD_END(buffer);
@@ -289,8 +296,9 @@ void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event)
/* Append waitlink */
CMD_WAIT(buffer);
- CMD_LINK(buffer, 2, etnaviv_cmdbuf_get_va(buffer) +
- buffer->user_size - 4);
+ CMD_LINK(buffer, 2,
+ etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
+ + buffer->user_size - 4);
/*
* Kick off the 'sync point' command by replacing the previous
@@ -304,36 +312,41 @@ void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event)
/* Append a command buffer to the ring buffer. */
void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
- unsigned int event, struct etnaviv_cmdbuf *cmdbuf)
+ struct etnaviv_iommu_context *mmu_context, unsigned int event,
+ struct etnaviv_cmdbuf *cmdbuf)
{
struct etnaviv_cmdbuf *buffer = &gpu->buffer;
unsigned int waitlink_offset = buffer->user_size - 16;
u32 return_target, return_dwords;
u32 link_target, link_dwords;
bool switch_context = gpu->exec_state != exec_state;
+ bool switch_mmu_context = gpu->mmu_context != mmu_context;
+ unsigned int new_flush_seq = READ_ONCE(gpu->mmu_context->flush_seq);
+ bool need_flush = switch_mmu_context || gpu->flush_seq != new_flush_seq;
lockdep_assert_held(&gpu->lock);
if (drm_debug & DRM_UT_DRIVER)
etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
- link_target = etnaviv_cmdbuf_get_va(cmdbuf);
+ link_target = etnaviv_cmdbuf_get_va(cmdbuf,
+ &gpu->mmu_context->cmdbuf_mapping);
link_dwords = cmdbuf->size / 8;
/*
- * If we need maintanence prior to submitting this buffer, we will
+ * If we need maintenance prior to submitting this buffer, we will
* need to append a mmu flush load state, followed by a new
* link to this buffer - a total of four additional words.
*/
- if (gpu->mmu->need_flush || switch_context) {
+ if (need_flush || switch_context) {
u32 target, extra_dwords;
/* link command */
extra_dwords = 1;
/* flush command */
- if (gpu->mmu->need_flush) {
- if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
+ if (need_flush) {
+ if (gpu->mmu_context->global->version == ETNAVIV_IOMMU_V1)
extra_dwords += 1;
else
extra_dwords += 3;
@@ -343,11 +356,28 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
if (switch_context)
extra_dwords += 4;
+ /* PTA load command */
+ if (switch_mmu_context && gpu->sec_mode == ETNA_SEC_KERNEL)
+ extra_dwords += 1;
+
target = etnaviv_buffer_reserve(gpu, buffer, extra_dwords);
+ /*
+ * Switch MMU context if necessary. Must be done after the
+ * link target has been calculated, as the jump forward in the
+ * kernel ring still uses the last active MMU context before
+ * the switch.
+ */
+ if (switch_mmu_context) {
+ struct etnaviv_iommu_context *old_context = gpu->mmu_context;
+
+ etnaviv_iommu_context_get(mmu_context);
+ gpu->mmu_context = mmu_context;
+ etnaviv_iommu_context_put(old_context);
+ }
- if (gpu->mmu->need_flush) {
+ if (need_flush) {
/* Add the MMU flush */
- if (gpu->mmu->version == ETNAVIV_IOMMU_V1) {
+ if (gpu->mmu_context->global->version == ETNAVIV_IOMMU_V1) {
CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
VIVS_GL_FLUSH_MMU_FLUSH_FEMMU |
VIVS_GL_FLUSH_MMU_FLUSH_UNK1 |
@@ -355,17 +385,30 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
VIVS_GL_FLUSH_MMU_FLUSH_PEMMU |
VIVS_GL_FLUSH_MMU_FLUSH_UNK4);
} else {
+ u32 flush = VIVS_MMUv2_CONFIGURATION_MODE_MASK |
+ VIVS_MMUv2_CONFIGURATION_FLUSH_FLUSH;
+
+ if (switch_mmu_context &&
+ gpu->sec_mode == ETNA_SEC_KERNEL) {
+ unsigned short id =
+ etnaviv_iommuv2_get_pta_id(gpu->mmu_context);
+ CMD_LOAD_STATE(buffer,
+ VIVS_MMUv2_PTA_CONFIG,
+ VIVS_MMUv2_PTA_CONFIG_INDEX(id));
+ }
+
+ if (gpu->sec_mode == ETNA_SEC_NONE)
+ flush |= etnaviv_iommuv2_get_mtlb_addr(gpu->mmu_context);
+
CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
- VIVS_MMUv2_CONFIGURATION_MODE_MASK |
- VIVS_MMUv2_CONFIGURATION_ADDRESS_MASK |
- VIVS_MMUv2_CONFIGURATION_FLUSH_FLUSH);
+ flush);
CMD_SEM(buffer, SYNC_RECIPIENT_FE,
SYNC_RECIPIENT_PE);
CMD_STALL(buffer, SYNC_RECIPIENT_FE,
SYNC_RECIPIENT_PE);
}
- gpu->mmu->need_flush = false;
+ gpu->flush_seq = new_flush_seq;
}
if (switch_context) {
@@ -374,6 +417,8 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
}
/* And the link to the submitted buffer */
+ link_target = etnaviv_cmdbuf_get_va(cmdbuf,
+ &gpu->mmu_context->cmdbuf_mapping);
CMD_LINK(buffer, link_dwords, link_target);
/* Update the link target to point to above instructions */
@@ -410,12 +455,14 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
VIVS_GL_EVENT_FROM_PE);
CMD_WAIT(buffer);
- CMD_LINK(buffer, 2, etnaviv_cmdbuf_get_va(buffer) +
- buffer->user_size - 4);
+ CMD_LINK(buffer, 2,
+ etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
+ + buffer->user_size - 4);
if (drm_debug & DRM_UT_DRIVER)
pr_info("stream link to 0x%08x @ 0x%08x %p\n",
- return_target, etnaviv_cmdbuf_get_va(cmdbuf),
+ return_target,
+ etnaviv_cmdbuf_get_va(cmdbuf, &gpu->mmu_context->cmdbuf_mapping),
cmdbuf->vaddr);
if (drm_debug & DRM_UT_DRIVER) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
index a3c44f145c1d..9dc20d892c15 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
@@ -3,27 +3,26 @@
* Copyright (C) 2017-2018 Etnaviv Project
*/
+#include <linux/dma-mapping.h>
+
#include <drm/drm_mm.h>
#include "etnaviv_cmdbuf.h"
+#include "etnaviv_gem.h"
#include "etnaviv_gpu.h"
#include "etnaviv_mmu.h"
#include "etnaviv_perfmon.h"
-#define SUBALLOC_SIZE SZ_256K
+#define SUBALLOC_SIZE SZ_512K
#define SUBALLOC_GRANULE SZ_4K
#define SUBALLOC_GRANULES (SUBALLOC_SIZE / SUBALLOC_GRANULE)
struct etnaviv_cmdbuf_suballoc {
/* suballocated dma buffer properties */
- struct etnaviv_gpu *gpu;
+ struct device *dev;
void *vaddr;
dma_addr_t paddr;
- /* GPU mapping */
- u32 iova;
- struct drm_mm_node vram_node; /* only used on MMUv2 */
-
/* allocation management */
struct mutex lock;
DECLARE_BITMAP(granule_map, SUBALLOC_GRANULES);
@@ -32,7 +31,7 @@ struct etnaviv_cmdbuf_suballoc {
};
struct etnaviv_cmdbuf_suballoc *
-etnaviv_cmdbuf_suballoc_new(struct etnaviv_gpu * gpu)
+etnaviv_cmdbuf_suballoc_new(struct device *dev)
{
struct etnaviv_cmdbuf_suballoc *suballoc;
int ret;
@@ -41,36 +40,44 @@ etnaviv_cmdbuf_suballoc_new(struct etnaviv_gpu * gpu)
if (!suballoc)
return ERR_PTR(-ENOMEM);
- suballoc->gpu = gpu;
+ suballoc->dev = dev;
mutex_init(&suballoc->lock);
init_waitqueue_head(&suballoc->free_event);
- suballoc->vaddr = dma_alloc_wc(gpu->dev, SUBALLOC_SIZE,
+ BUILD_BUG_ON(ETNAVIV_SOFTPIN_START_ADDRESS < SUBALLOC_SIZE);
+ suballoc->vaddr = dma_alloc_wc(dev, SUBALLOC_SIZE,
&suballoc->paddr, GFP_KERNEL);
- if (!suballoc->vaddr)
+ if (!suballoc->vaddr) {
+ ret = -ENOMEM;
goto free_suballoc;
-
- ret = etnaviv_iommu_get_suballoc_va(gpu, suballoc->paddr,
- &suballoc->vram_node, SUBALLOC_SIZE,
- &suballoc->iova);
- if (ret)
- goto free_dma;
+ }
return suballoc;
-free_dma:
- dma_free_wc(gpu->dev, SUBALLOC_SIZE, suballoc->vaddr, suballoc->paddr);
free_suballoc:
kfree(suballoc);
- return NULL;
+ return ERR_PTR(ret);
+}
+
+int etnaviv_cmdbuf_suballoc_map(struct etnaviv_cmdbuf_suballoc *suballoc,
+ struct etnaviv_iommu_context *context,
+ struct etnaviv_vram_mapping *mapping,
+ u32 memory_base)
+{
+ return etnaviv_iommu_get_suballoc_va(context, mapping, memory_base,
+ suballoc->paddr, SUBALLOC_SIZE);
+}
+
+void etnaviv_cmdbuf_suballoc_unmap(struct etnaviv_iommu_context *context,
+ struct etnaviv_vram_mapping *mapping)
+{
+ etnaviv_iommu_put_suballoc_va(context, mapping);
}
void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc)
{
- etnaviv_iommu_put_suballoc_va(suballoc->gpu, &suballoc->vram_node,
- SUBALLOC_SIZE, suballoc->iova);
- dma_free_wc(suballoc->gpu->dev, SUBALLOC_SIZE, suballoc->vaddr,
+ dma_free_wc(suballoc->dev, SUBALLOC_SIZE, suballoc->vaddr,
suballoc->paddr);
kfree(suballoc);
}
@@ -95,7 +102,7 @@ retry:
suballoc->free_space,
msecs_to_jiffies(10 * 1000));
if (!ret) {
- dev_err(suballoc->gpu->dev,
+ dev_err(suballoc->dev,
"Timeout waiting for cmdbuf space\n");
return -ETIMEDOUT;
}
@@ -123,9 +130,10 @@ void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
wake_up_all(&suballoc->free_event);
}
-u32 etnaviv_cmdbuf_get_va(struct etnaviv_cmdbuf *buf)
+u32 etnaviv_cmdbuf_get_va(struct etnaviv_cmdbuf *buf,
+ struct etnaviv_vram_mapping *mapping)
{
- return buf->suballoc->iova + buf->suballoc_offset;
+ return mapping->iova + buf->suballoc_offset;
}
dma_addr_t etnaviv_cmdbuf_get_pa(struct etnaviv_cmdbuf *buf)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
index 4d5d1a77eb2a..ad6fd8eb0378 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
@@ -8,7 +8,9 @@
#include <linux/types.h>
-struct etnaviv_gpu;
+struct device;
+struct etnaviv_iommu_context;
+struct etnaviv_vram_mapping;
struct etnaviv_cmdbuf_suballoc;
struct etnaviv_perfmon_request;
@@ -23,15 +25,22 @@ struct etnaviv_cmdbuf {
};
struct etnaviv_cmdbuf_suballoc *
-etnaviv_cmdbuf_suballoc_new(struct etnaviv_gpu * gpu);
+etnaviv_cmdbuf_suballoc_new(struct device *dev);
void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc);
+int etnaviv_cmdbuf_suballoc_map(struct etnaviv_cmdbuf_suballoc *suballoc,
+ struct etnaviv_iommu_context *context,
+ struct etnaviv_vram_mapping *mapping,
+ u32 memory_base);
+void etnaviv_cmdbuf_suballoc_unmap(struct etnaviv_iommu_context *context,
+ struct etnaviv_vram_mapping *mapping);
int etnaviv_cmdbuf_init(struct etnaviv_cmdbuf_suballoc *suballoc,
struct etnaviv_cmdbuf *cmdbuf, u32 size);
void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf);
-u32 etnaviv_cmdbuf_get_va(struct etnaviv_cmdbuf *buf);
+u32 etnaviv_cmdbuf_get_va(struct etnaviv_cmdbuf *buf,
+ struct etnaviv_vram_mapping *mapping);
dma_addr_t etnaviv_cmdbuf_get_pa(struct etnaviv_cmdbuf *buf);
#endif /* __ETNAVIV_CMDBUF_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 08e033c1758d..1f9c01be40d7 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -4,8 +4,17 @@
*/
#include <linux/component.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
#include <linux/of_platform.h>
+#include <linux/uaccess.h>
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_ioctl.h>
#include <drm/drm_of.h>
+#include <drm/drm_prime.h>
#include "etnaviv_cmdbuf.h"
#include "etnaviv_drv.h"
@@ -41,12 +50,19 @@ static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
{
struct etnaviv_drm_private *priv = dev->dev_private;
struct etnaviv_file_private *ctx;
- int i;
+ int ret, i;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
+ ctx->mmu = etnaviv_iommu_context_init(priv->mmu_global,
+ priv->cmdbuf_suballoc);
+ if (!ctx->mmu) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
for (i = 0; i < ETNA_MAX_PIPES; i++) {
struct etnaviv_gpu *gpu = priv->gpu[i];
struct drm_sched_rq *rq;
@@ -61,6 +77,10 @@ static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
file->driver_priv = ctx;
return 0;
+
+out_free:
+ kfree(ctx);
+ return ret;
}
static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
@@ -76,6 +96,8 @@ static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
drm_sched_entity_destroy(&ctx->sched_entity[i]);
}
+ etnaviv_iommu_context_put(ctx->mmu);
+
kfree(ctx);
}
@@ -107,12 +129,29 @@ static int etnaviv_mm_show(struct drm_device *dev, struct seq_file *m)
static int etnaviv_mmu_show(struct etnaviv_gpu *gpu, struct seq_file *m)
{
struct drm_printer p = drm_seq_file_printer(m);
+ struct etnaviv_iommu_context *mmu_context;
seq_printf(m, "Active Objects (%s):\n", dev_name(gpu->dev));
- mutex_lock(&gpu->mmu->lock);
- drm_mm_print(&gpu->mmu->mm, &p);
- mutex_unlock(&gpu->mmu->lock);
+ /*
+ * Lock the GPU to avoid a MMU context switch just now and elevate
+ * the refcount of the current context to avoid it disappearing from
+ * under our feet.
+ */
+ mutex_lock(&gpu->lock);
+ mmu_context = gpu->mmu_context;
+ if (mmu_context)
+ etnaviv_iommu_context_get(mmu_context);
+ mutex_unlock(&gpu->lock);
+
+ if (!mmu_context)
+ return 0;
+
+ mutex_lock(&mmu_context->lock);
+ drm_mm_print(&mmu_context->mm, &p);
+ mutex_unlock(&mmu_context->lock);
+
+ etnaviv_iommu_context_put(mmu_context);
return 0;
}
@@ -486,7 +525,7 @@ static struct drm_driver etnaviv_drm_driver = {
.desc = "etnaviv DRM",
.date = "20151214",
.major = 1,
- .minor = 2,
+ .minor = 3,
};
/*
@@ -517,23 +556,32 @@ static int etnaviv_bind(struct device *dev)
INIT_LIST_HEAD(&priv->gem_list);
priv->num_gpus = 0;
+ priv->cmdbuf_suballoc = etnaviv_cmdbuf_suballoc_new(drm->dev);
+ if (IS_ERR(priv->cmdbuf_suballoc)) {
+ dev_err(drm->dev, "Failed to create cmdbuf suballocator\n");
+ ret = PTR_ERR(priv->cmdbuf_suballoc);
+ goto out_free_priv;
+ }
+
dev_set_drvdata(dev, drm);
ret = component_bind_all(dev, drm);
if (ret < 0)
- goto out_bind;
+ goto out_destroy_suballoc;
load_gpu(drm);
ret = drm_dev_register(drm, 0);
if (ret)
- goto out_register;
+ goto out_unbind;
return 0;
-out_register:
+out_unbind:
component_unbind_all(dev, drm);
-out_bind:
+out_destroy_suballoc:
+ etnaviv_cmdbuf_suballoc_destroy(priv->cmdbuf_suballoc);
+out_free_priv:
kfree(priv);
out_put:
drm_dev_put(drm);
@@ -552,6 +600,8 @@ static void etnaviv_unbind(struct device *dev)
dev->dma_parms = NULL;
+ etnaviv_cmdbuf_suballoc_destroy(priv->cmdbuf_suballoc);
+
drm->dev_private = NULL;
kfree(priv);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index 8798423705e1..32cfa5a48d42 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -6,21 +6,12 @@
#ifndef __ETNAVIV_DRV_H__
#define __ETNAVIV_DRV_H__
-#include <linux/kernel.h>
-#include <linux/clk.h>
-#include <linux/cpufreq.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/pm.h>
-#include <linux/pm_runtime.h>
-#include <linux/slab.h>
#include <linux/list.h>
+#include <linux/mm_types.h>
+#include <linux/sizes.h>
#include <linux/time64.h>
#include <linux/types.h>
-#include <linux/sizes.h>
-#include <linux/mm_types.h>
-#include <drm/drmP.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
#include <drm/etnaviv_drm.h>
@@ -31,12 +22,12 @@ struct etnaviv_gpu;
struct etnaviv_mmu;
struct etnaviv_gem_object;
struct etnaviv_gem_submit;
+struct etnaviv_iommu_global;
+
+#define ETNAVIV_SOFTPIN_START_ADDRESS SZ_4M /* must be >= SUBALLOC_SIZE */
struct etnaviv_file_private {
- /*
- * When per-context address spaces are supported we'd keep track of
- * the context's page-tables here.
- */
+ struct etnaviv_iommu_context *mmu;
struct drm_sched_entity sched_entity[ETNA_MAX_PIPES];
};
@@ -45,6 +36,9 @@ struct etnaviv_drm_private {
struct device_dma_parameters dma_parms;
struct etnaviv_gpu *gpu[ETNA_MAX_PIPES];
+ struct etnaviv_cmdbuf_suballoc *cmdbuf_suballoc;
+ struct etnaviv_iommu_global *mmu_global;
+
/* list of GEM objects: */
struct mutex gem_lock;
struct list_head gem_list;
@@ -76,10 +70,11 @@ int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
uintptr_t ptr, u32 size, u32 flags, u32 *handle);
u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu);
u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr);
-u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu);
+u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu, unsigned short id);
void etnaviv_buffer_end(struct etnaviv_gpu *gpu);
void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event);
void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
+ struct etnaviv_iommu_context *mmu,
unsigned int event, struct etnaviv_cmdbuf *cmdbuf);
void etnaviv_validate_init(void);
bool etnaviv_cmd_validate_one(struct etnaviv_gpu *gpu,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
index 9a6f5b65488f..698db540972c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
@@ -4,6 +4,8 @@
*/
#include <linux/devcoredump.h>
+#include <linux/moduleparam.h>
+
#include "etnaviv_cmdbuf.h"
#include "etnaviv_dump.h"
#include "etnaviv_gem.h"
@@ -91,9 +93,9 @@ static void etnaviv_core_dump_registers(struct core_dump_iterator *iter,
}
static void etnaviv_core_dump_mmu(struct core_dump_iterator *iter,
- struct etnaviv_gpu *gpu, size_t mmu_size)
+ struct etnaviv_iommu_context *mmu, size_t mmu_size)
{
- etnaviv_iommu_dump(gpu->mmu, iter->data);
+ etnaviv_iommu_dump(mmu, iter->data);
etnaviv_core_dump_header(iter, ETDUMP_BUF_MMU, iter->data + mmu_size);
}
@@ -108,46 +110,35 @@ static void etnaviv_core_dump_mem(struct core_dump_iterator *iter, u32 type,
etnaviv_core_dump_header(iter, type, iter->data + size);
}
-void etnaviv_core_dump(struct etnaviv_gpu *gpu)
+void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
{
+ struct etnaviv_gpu *gpu = submit->gpu;
struct core_dump_iterator iter;
- struct etnaviv_vram_mapping *vram;
struct etnaviv_gem_object *obj;
- struct etnaviv_gem_submit *submit;
- struct drm_sched_job *s_job;
unsigned int n_obj, n_bomap_pages;
size_t file_size, mmu_size;
__le64 *bomap, *bomap_start;
+ int i;
/* Only catch the first event, or when manually re-armed */
if (!etnaviv_dump_core)
return;
etnaviv_dump_core = false;
- mutex_lock(&gpu->mmu->lock);
+ mutex_lock(&gpu->mmu_context->lock);
- mmu_size = etnaviv_iommu_dump_size(gpu->mmu);
+ mmu_size = etnaviv_iommu_dump_size(gpu->mmu_context);
- /* We always dump registers, mmu, ring and end marker */
- n_obj = 4;
+ /* We always dump registers, mmu, ring, hanging cmdbuf and end marker */
+ n_obj = 5;
n_bomap_pages = 0;
file_size = ARRAY_SIZE(etnaviv_dump_registers) *
sizeof(struct etnaviv_dump_registers) +
- mmu_size + gpu->buffer.size;
-
- /* Add in the active command buffers */
- list_for_each_entry(s_job, &gpu->sched.ring_mirror_list, node) {
- submit = to_etnaviv_submit(s_job);
- file_size += submit->cmdbuf.size;
- n_obj++;
- }
+ mmu_size + gpu->buffer.size + submit->cmdbuf.size;
/* Add in the active buffer objects */
- list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) {
- if (!vram->use)
- continue;
-
- obj = vram->object;
+ for (i = 0; i < submit->nr_bos; i++) {
+ obj = submit->bos[i].obj;
file_size += obj->base.size;
n_bomap_pages += obj->base.size >> PAGE_SHIFT;
n_obj++;
@@ -166,7 +157,7 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
PAGE_KERNEL);
if (!iter.start) {
- mutex_unlock(&gpu->mmu->lock);
+ mutex_unlock(&gpu->mmu_context->lock);
dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
return;
}
@@ -178,17 +169,16 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
memset(iter.hdr, 0, iter.data - iter.start);
etnaviv_core_dump_registers(&iter, gpu);
- etnaviv_core_dump_mmu(&iter, gpu, mmu_size);
+ etnaviv_core_dump_mmu(&iter, gpu->mmu_context, mmu_size);
etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer.vaddr,
gpu->buffer.size,
- etnaviv_cmdbuf_get_va(&gpu->buffer));
+ etnaviv_cmdbuf_get_va(&gpu->buffer,
+ &gpu->mmu_context->cmdbuf_mapping));
- list_for_each_entry(s_job, &gpu->sched.ring_mirror_list, node) {
- submit = to_etnaviv_submit(s_job);
- etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD,
- submit->cmdbuf.vaddr, submit->cmdbuf.size,
- etnaviv_cmdbuf_get_va(&submit->cmdbuf));
- }
+ etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD,
+ submit->cmdbuf.vaddr, submit->cmdbuf.size,
+ etnaviv_cmdbuf_get_va(&submit->cmdbuf,
+ &gpu->mmu_context->cmdbuf_mapping));
/* Reserve space for the bomap */
if (n_bomap_pages) {
@@ -201,14 +191,13 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
bomap_start = bomap = NULL;
}
- list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) {
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct etnaviv_vram_mapping *vram;
struct page **pages;
void *vaddr;
- if (vram->use == 0)
- continue;
-
- obj = vram->object;
+ obj = submit->bos[i].obj;
+ vram = submit->bos[i].mapping;
mutex_lock(&obj->lock);
pages = etnaviv_gem_get_pages(obj);
@@ -232,7 +221,7 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
obj->base.size);
}
- mutex_unlock(&gpu->mmu->lock);
+ mutex_unlock(&gpu->mmu_context->lock);
etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.h b/drivers/gpu/drm/etnaviv/etnaviv_dump.h
index 2d916c2667ee..a125c46b895b 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_dump.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.h
@@ -35,8 +35,8 @@ struct etnaviv_dump_registers {
};
#ifdef __KERNEL__
-struct etnaviv_gpu;
-void etnaviv_core_dump(struct etnaviv_gpu *gpu);
+struct etnaviv_gem_submit;
+void etnaviv_core_dump(struct etnaviv_gem_submit *submit);
#endif
#endif
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 17ca602db60a..cb1faaac380a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -3,10 +3,11 @@
* Copyright (C) 2015-2018 Etnaviv Project
*/
-#include <linux/spinlock.h>
+#include <drm/drm_prime.h>
+#include <linux/dma-mapping.h>
#include <linux/shmem_fs.h>
-#include <linux/sched/mm.h>
-#include <linux/sched/task.h>
+#include <linux/spinlock.h>
+#include <linux/vmalloc.h>
#include "etnaviv_drv.h"
#include "etnaviv_gem.h"
@@ -222,30 +223,18 @@ int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
static struct etnaviv_vram_mapping *
etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
- struct etnaviv_iommu *mmu)
+ struct etnaviv_iommu_context *context)
{
struct etnaviv_vram_mapping *mapping;
list_for_each_entry(mapping, &obj->vram_list, obj_node) {
- if (mapping->mmu == mmu)
+ if (mapping->context == context)
return mapping;
}
return NULL;
}
-void etnaviv_gem_mapping_reference(struct etnaviv_vram_mapping *mapping)
-{
- struct etnaviv_gem_object *etnaviv_obj = mapping->object;
-
- drm_gem_object_get(&etnaviv_obj->base);
-
- mutex_lock(&etnaviv_obj->lock);
- WARN_ON(mapping->use == 0);
- mapping->use += 1;
- mutex_unlock(&etnaviv_obj->lock);
-}
-
void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
{
struct etnaviv_gem_object *etnaviv_obj = mapping->object;
@@ -259,7 +248,8 @@ void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
}
struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
- struct drm_gem_object *obj, struct etnaviv_gpu *gpu)
+ struct drm_gem_object *obj, struct etnaviv_iommu_context *mmu_context,
+ u64 va)
{
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
struct etnaviv_vram_mapping *mapping;
@@ -267,7 +257,7 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
int ret = 0;
mutex_lock(&etnaviv_obj->lock);
- mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu);
+ mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, mmu_context);
if (mapping) {
/*
* Holding the object lock prevents the use count changing
@@ -276,12 +266,12 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
* the MMU owns this mapping to close this race.
*/
if (mapping->use == 0) {
- mutex_lock(&gpu->mmu->lock);
- if (mapping->mmu == gpu->mmu)
+ mutex_lock(&mmu_context->lock);
+ if (mapping->context == mmu_context)
mapping->use += 1;
else
mapping = NULL;
- mutex_unlock(&gpu->mmu->lock);
+ mutex_unlock(&mmu_context->lock);
if (mapping)
goto out;
} else {
@@ -314,15 +304,19 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
list_del(&mapping->obj_node);
}
- mapping->mmu = gpu->mmu;
+ etnaviv_iommu_context_get(mmu_context);
+ mapping->context = mmu_context;
mapping->use = 1;
- ret = etnaviv_iommu_map_gem(gpu->mmu, etnaviv_obj, gpu->memory_base,
- mapping);
- if (ret < 0)
+ ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
+ mmu_context->global->memory_base,
+ mapping, va);
+ if (ret < 0) {
+ etnaviv_iommu_context_put(mmu_context);
kfree(mapping);
- else
+ } else {
list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
+ }
out:
mutex_unlock(&etnaviv_obj->lock);
@@ -397,13 +391,13 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
}
if (op & ETNA_PREP_NOSYNC) {
- if (!reservation_object_test_signaled_rcu(obj->resv,
+ if (!dma_resv_test_signaled_rcu(obj->resv,
write))
return -EBUSY;
} else {
unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
- ret = reservation_object_wait_timeout_rcu(obj->resv,
+ ret = dma_resv_wait_timeout_rcu(obj->resv,
write, true, remain);
if (ret <= 0)
return ret == 0 ? -ETIMEDOUT : ret;
@@ -459,8 +453,8 @@ static void etnaviv_gem_describe_fence(struct dma_fence *fence,
static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
- struct reservation_object *robj = obj->resv;
- struct reservation_object_list *fobj;
+ struct dma_resv *robj = obj->resv;
+ struct dma_resv_list *fobj;
struct dma_fence *fence;
unsigned long off = drm_vma_node_start(&obj->vma_node);
@@ -536,12 +530,14 @@ void etnaviv_gem_free_object(struct drm_gem_object *obj)
list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
obj_node) {
- struct etnaviv_iommu *mmu = mapping->mmu;
+ struct etnaviv_iommu_context *context = mapping->context;
WARN_ON(mapping->use);
- if (mmu)
- etnaviv_iommu_unmap_gem(mmu, mapping);
+ if (context) {
+ etnaviv_iommu_unmap_gem(context, mapping);
+ etnaviv_iommu_context_put(context);
+ }
list_del(&mapping->obj_node);
kfree(mapping);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index fcd5d71b502f..d6270acce619 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -6,7 +6,7 @@
#ifndef __ETNAVIV_GEM_H__
#define __ETNAVIV_GEM_H__
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
#include "etnaviv_cmdbuf.h"
#include "etnaviv_drv.h"
@@ -25,7 +25,7 @@ struct etnaviv_vram_mapping {
struct list_head scan_node;
struct list_head mmu_node;
struct etnaviv_gem_object *object;
- struct etnaviv_iommu *mmu;
+ struct etnaviv_iommu_context *context;
struct drm_mm_node vram_node;
unsigned int use;
u32 iova;
@@ -77,6 +77,7 @@ static inline bool is_active(struct etnaviv_gem_object *etnaviv_obj)
struct etnaviv_gem_submit_bo {
u32 flags;
+ u64 va;
struct etnaviv_gem_object *obj;
struct etnaviv_vram_mapping *mapping;
struct dma_fence *excl;
@@ -93,6 +94,7 @@ struct etnaviv_gem_submit {
struct kref refcount;
struct etnaviv_file_private *ctx;
struct etnaviv_gpu *gpu;
+ struct etnaviv_iommu_context *mmu_context, *prev_mmu_context;
struct dma_fence *out_fence, *in_fence;
int out_fence_id;
struct list_head node; /* GPU active submit list */
@@ -118,8 +120,8 @@ struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *obj);
void etnaviv_gem_put_pages(struct etnaviv_gem_object *obj);
struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
- struct drm_gem_object *obj, struct etnaviv_gpu *gpu);
-void etnaviv_gem_mapping_reference(struct etnaviv_vram_mapping *mapping);
+ struct drm_gem_object *obj, struct etnaviv_iommu_context *mmu_context,
+ u64 va);
void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping);
#endif /* __ETNAVIV_GEM_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index a05292e8ed6f..f24dd21c2363 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -3,7 +3,9 @@
* Copyright (C) 2014-2018 Etnaviv Project
*/
+#include <drm/drm_prime.h>
#include <linux/dma-buf.h>
+
#include "etnaviv_drv.h"
#include "etnaviv_gem.h"
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 1a636469eeda..aa3e4c3b063a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -3,9 +3,15 @@
* Copyright (C) 2015 Etnaviv Project
*/
+#include <drm/drm_file.h>
#include <linux/dma-fence-array.h>
-#include <linux/reservation.h>
+#include <linux/file.h>
+#include <linux/pm_runtime.h>
+#include <linux/dma-resv.h>
#include <linux/sync_file.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+
#include "etnaviv_cmdbuf.h"
#include "etnaviv_drv.h"
#include "etnaviv_gpu.h"
@@ -66,6 +72,14 @@ static int submit_lookup_objects(struct etnaviv_gem_submit *submit,
}
submit->bos[i].flags = bo->flags;
+ if (submit->flags & ETNA_SUBMIT_SOFTPIN) {
+ if (bo->presumed < ETNAVIV_SOFTPIN_START_ADDRESS) {
+ DRM_ERROR("invalid softpin address\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ submit->bos[i].va = bo->presumed;
+ }
/* normally use drm_gem_object_lookup(), but for bulk lookup
* all under single table_lock just hit object_idr directly:
@@ -165,10 +179,10 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
for (i = 0; i < submit->nr_bos; i++) {
struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
- struct reservation_object *robj = bo->obj->base.resv;
+ struct dma_resv *robj = bo->obj->base.resv;
if (!(bo->flags & ETNA_SUBMIT_BO_WRITE)) {
- ret = reservation_object_reserve_shared(robj, 1);
+ ret = dma_resv_reserve_shared(robj, 1);
if (ret)
return ret;
}
@@ -177,13 +191,13 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
continue;
if (bo->flags & ETNA_SUBMIT_BO_WRITE) {
- ret = reservation_object_get_fences_rcu(robj, &bo->excl,
+ ret = dma_resv_get_fences_rcu(robj, &bo->excl,
&bo->nr_shared,
&bo->shared);
if (ret)
return ret;
} else {
- bo->excl = reservation_object_get_excl_rcu(robj);
+ bo->excl = dma_resv_get_excl_rcu(robj);
}
}
@@ -199,10 +213,10 @@ static void submit_attach_object_fences(struct etnaviv_gem_submit *submit)
struct drm_gem_object *obj = &submit->bos[i].obj->base;
if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE)
- reservation_object_add_excl_fence(obj->resv,
+ dma_resv_add_excl_fence(obj->resv,
submit->out_fence);
else
- reservation_object_add_shared_fence(obj->resv,
+ dma_resv_add_shared_fence(obj->resv,
submit->out_fence);
submit_unlock_object(submit, i);
@@ -218,11 +232,17 @@ static int submit_pin_objects(struct etnaviv_gem_submit *submit)
struct etnaviv_vram_mapping *mapping;
mapping = etnaviv_gem_mapping_get(&etnaviv_obj->base,
- submit->gpu);
+ submit->mmu_context,
+ submit->bos[i].va);
if (IS_ERR(mapping)) {
ret = PTR_ERR(mapping);
break;
}
+
+ if ((submit->flags & ETNA_SUBMIT_SOFTPIN) &&
+ submit->bos[i].va != mapping->iova)
+ return -EINVAL;
+
atomic_inc(&etnaviv_obj->gpu_active);
submit->bos[i].flags |= BO_PINNED;
@@ -255,6 +275,10 @@ static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
u32 *ptr = stream;
int ret;
+ /* Submits using softpin don't blend with relocs */
+ if ((submit->flags & ETNA_SUBMIT_SOFTPIN) && nr_relocs != 0)
+ return -EINVAL;
+
for (i = 0; i < nr_relocs; i++) {
const struct drm_etnaviv_gem_submit_reloc *r = relocs + i;
struct etnaviv_gem_submit_bo *bo;
@@ -355,6 +379,12 @@ static void submit_cleanup(struct kref *kref)
if (submit->cmdbuf.suballoc)
etnaviv_cmdbuf_free(&submit->cmdbuf);
+ if (submit->mmu_context)
+ etnaviv_iommu_context_put(submit->mmu_context);
+
+ if (submit->prev_mmu_context)
+ etnaviv_iommu_context_put(submit->prev_mmu_context);
+
for (i = 0; i < submit->nr_bos; i++) {
struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
@@ -433,6 +463,12 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
return -EINVAL;
}
+ if ((args->flags & ETNA_SUBMIT_SOFTPIN) &&
+ priv->mmu_global->version != ETNAVIV_IOMMU_V2) {
+ DRM_ERROR("softpin requested on incompatible MMU\n");
+ return -EINVAL;
+ }
+
/*
* Copy the command submission and bo array to kernel space in
* one go, and do this outside of any locks.
@@ -490,12 +526,14 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
goto err_submit_ww_acquire;
}
- ret = etnaviv_cmdbuf_init(gpu->cmdbuf_suballoc, &submit->cmdbuf,
+ ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &submit->cmdbuf,
ALIGN(args->stream_size, 8) + 8);
if (ret)
goto err_submit_objects;
submit->ctx = file->driver_priv;
+ etnaviv_iommu_context_get(submit->ctx->mmu);
+ submit->mmu_context = submit->ctx->mmu;
submit->exec_state = args->exec_state;
submit->flags = args->flags;
@@ -503,7 +541,8 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
if (ret)
goto err_submit_objects;
- if (!etnaviv_cmd_validate_one(gpu, stream, args->stream_size / 4,
+ if ((priv->mmu_global->version != ETNAVIV_IOMMU_V2) &&
+ !etnaviv_cmd_validate_one(gpu, stream, args->stream_size / 4,
relocs, args->nr_relocs)) {
ret = -EINVAL;
goto err_submit_objects;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 5418a1a87b2c..d47d1a8e0219 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -5,9 +5,13 @@
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/delay.h>
#include <linux/dma-fence.h>
-#include <linux/moduleparam.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/thermal.h>
@@ -38,6 +42,8 @@ static const struct platform_device_id gpu_ids[] = {
int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
{
+ struct etnaviv_drm_private *priv = gpu->drm->dev_private;
+
switch (param) {
case ETNAVIV_PARAM_GPU_MODEL:
*value = gpu->identity.model;
@@ -143,6 +149,13 @@ int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
*value = gpu->identity.varyings_count;
break;
+ case ETNAVIV_PARAM_SOFTPIN_START_ADDR:
+ if (priv->mmu_global->version == ETNAVIV_IOMMU_V2)
+ *value = ETNAVIV_SOFTPIN_START_ADDRESS;
+ else
+ *value = ~0ULL;
+ break;
+
default:
DBG("%s: invalid param: %u", dev_name(gpu->dev), param);
return -EINVAL;
@@ -596,6 +609,21 @@ void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
}
}
+static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu)
+{
+ u32 address = etnaviv_cmdbuf_get_va(&gpu->buffer,
+ &gpu->mmu_context->cmdbuf_mapping);
+ u16 prefetch;
+
+ /* setup the MMU */
+ etnaviv_iommu_restore(gpu, gpu->mmu_context);
+
+ /* Start command processor */
+ prefetch = etnaviv_buffer_init(gpu);
+
+ etnaviv_gpu_start_fe(gpu, address, prefetch);
+}
+
static void etnaviv_gpu_setup_pulse_eater(struct etnaviv_gpu *gpu)
{
/*
@@ -629,8 +657,6 @@ static void etnaviv_gpu_setup_pulse_eater(struct etnaviv_gpu *gpu)
static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
{
- u16 prefetch;
-
if ((etnaviv_is_model_rev(gpu, GC320, 0x5007) ||
etnaviv_is_model_rev(gpu, GC320, 0x5220)) &&
gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400) {
@@ -676,19 +702,12 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
/* setup the pulse eater */
etnaviv_gpu_setup_pulse_eater(gpu);
- /* setup the MMU */
- etnaviv_iommu_restore(gpu);
-
- /* Start command processor */
- prefetch = etnaviv_buffer_init(gpu);
-
gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
- etnaviv_gpu_start_fe(gpu, etnaviv_cmdbuf_get_va(&gpu->buffer),
- prefetch);
}
int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
{
+ struct etnaviv_drm_private *priv = gpu->drm->dev_private;
int ret, i;
ret = pm_runtime_get_sync(gpu->dev);
@@ -714,6 +733,24 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
}
/*
+ * On cores with security features supported, we claim control over the
+ * security states.
+ */
+ if ((gpu->identity.minor_features7 & chipMinorFeatures7_BIT_SECURITY) &&
+ (gpu->identity.minor_features10 & chipMinorFeatures10_SECURITY_AHB))
+ gpu->sec_mode = ETNA_SEC_KERNEL;
+
+ ret = etnaviv_hw_reset(gpu);
+ if (ret) {
+ dev_err(gpu->dev, "GPU reset failed\n");
+ goto fail;
+ }
+
+ ret = etnaviv_iommu_global_init(gpu);
+ if (ret)
+ goto fail;
+
+ /*
* Set the GPU linear window to be at the end of the DMA window, where
* the CMA area is likely to reside. This ensures that we are able to
* map the command buffers while having the linear window overlap as
@@ -726,57 +763,21 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
(gpu->identity.minor_features0 & chipMinorFeatures0_MC20)) {
u32 dma_mask = (u32)dma_get_required_mask(gpu->dev);
if (dma_mask < PHYS_OFFSET + SZ_2G)
- gpu->memory_base = PHYS_OFFSET;
+ priv->mmu_global->memory_base = PHYS_OFFSET;
else
- gpu->memory_base = dma_mask - SZ_2G + 1;
+ priv->mmu_global->memory_base = dma_mask - SZ_2G + 1;
} else if (PHYS_OFFSET >= SZ_2G) {
dev_info(gpu->dev, "Need to move linear window on MC1.0, disabling TS\n");
- gpu->memory_base = PHYS_OFFSET;
+ priv->mmu_global->memory_base = PHYS_OFFSET;
gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
}
- /*
- * On cores with security features supported, we claim control over the
- * security states.
- */
- if ((gpu->identity.minor_features7 & chipMinorFeatures7_BIT_SECURITY) &&
- (gpu->identity.minor_features10 & chipMinorFeatures10_SECURITY_AHB))
- gpu->sec_mode = ETNA_SEC_KERNEL;
-
- ret = etnaviv_hw_reset(gpu);
- if (ret) {
- dev_err(gpu->dev, "GPU reset failed\n");
- goto fail;
- }
-
- gpu->mmu = etnaviv_iommu_new(gpu);
- if (IS_ERR(gpu->mmu)) {
- dev_err(gpu->dev, "Failed to instantiate GPU IOMMU\n");
- ret = PTR_ERR(gpu->mmu);
- goto fail;
- }
-
- gpu->cmdbuf_suballoc = etnaviv_cmdbuf_suballoc_new(gpu);
- if (IS_ERR(gpu->cmdbuf_suballoc)) {
- dev_err(gpu->dev, "Failed to create cmdbuf suballocator\n");
- ret = PTR_ERR(gpu->cmdbuf_suballoc);
- goto destroy_iommu;
- }
-
/* Create buffer: */
- ret = etnaviv_cmdbuf_init(gpu->cmdbuf_suballoc, &gpu->buffer,
+ ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &gpu->buffer,
PAGE_SIZE);
if (ret) {
dev_err(gpu->dev, "could not create command buffer\n");
- goto destroy_suballoc;
- }
-
- if (gpu->mmu->version == ETNAVIV_IOMMU_V1 &&
- etnaviv_cmdbuf_get_va(&gpu->buffer) > 0x80000000) {
- ret = -EINVAL;
- dev_err(gpu->dev,
- "command buffer outside valid memory window\n");
- goto free_buffer;
+ goto fail;
}
/* Setup event management */
@@ -795,17 +796,10 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
pm_runtime_mark_last_busy(gpu->dev);
pm_runtime_put_autosuspend(gpu->dev);
+ gpu->initialized = true;
+
return 0;
-free_buffer:
- etnaviv_cmdbuf_free(&gpu->buffer);
- gpu->buffer.suballoc = NULL;
-destroy_suballoc:
- etnaviv_cmdbuf_suballoc_destroy(gpu->cmdbuf_suballoc);
- gpu->cmdbuf_suballoc = NULL;
-destroy_iommu:
- etnaviv_iommu_destroy(gpu->mmu);
- gpu->mmu = NULL;
fail:
pm_runtime_mark_last_busy(gpu->dev);
pm_runtime_put_autosuspend(gpu->dev);
@@ -999,6 +993,7 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
etnaviv_gpu_hw_init(gpu);
gpu->exec_state = -1;
+ gpu->mmu_context = NULL;
mutex_unlock(&gpu->lock);
pm_runtime_mark_last_busy(gpu->dev);
@@ -1305,6 +1300,15 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
goto out_unlock;
}
+ if (!gpu->mmu_context) {
+ etnaviv_iommu_context_get(submit->mmu_context);
+ gpu->mmu_context = submit->mmu_context;
+ etnaviv_gpu_start_fe_idleloop(gpu);
+ } else {
+ etnaviv_iommu_context_get(gpu->mmu_context);
+ submit->prev_mmu_context = gpu->mmu_context;
+ }
+
if (submit->nr_pmrs) {
gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
kref_get(&submit->refcount);
@@ -1314,8 +1318,8 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
gpu->event[event[0]].fence = gpu_fence;
submit->cmdbuf.user_size = submit->cmdbuf.size - 8;
- etnaviv_buffer_queue(gpu, submit->exec_state, event[0],
- &submit->cmdbuf);
+ etnaviv_buffer_queue(gpu, submit->exec_state, submit->mmu_context,
+ event[0], &submit->cmdbuf);
if (submit->nr_pmrs) {
gpu->event[event[2]].sync_point = &sync_point_perfmon_sample_post;
@@ -1517,7 +1521,7 @@ int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms)
static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
{
- if (gpu->buffer.suballoc) {
+ if (gpu->initialized && gpu->mmu_context) {
/* Replace the last WAIT with END */
mutex_lock(&gpu->lock);
etnaviv_buffer_end(gpu);
@@ -1529,8 +1533,13 @@ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
* we fail, just warn and continue.
*/
etnaviv_gpu_wait_idle(gpu, 100);
+
+ etnaviv_iommu_context_put(gpu->mmu_context);
+ gpu->mmu_context = NULL;
}
+ gpu->exec_state = -1;
+
return etnaviv_gpu_clk_disable(gpu);
}
@@ -1546,8 +1555,6 @@ static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
etnaviv_gpu_update_clock(gpu);
etnaviv_gpu_hw_init(gpu);
- gpu->exec_state = -1;
-
mutex_unlock(&gpu->lock);
return 0;
@@ -1676,17 +1683,10 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
etnaviv_gpu_hw_suspend(gpu);
#endif
- if (gpu->buffer.suballoc)
+ if (gpu->initialized) {
etnaviv_cmdbuf_free(&gpu->buffer);
-
- if (gpu->cmdbuf_suballoc) {
- etnaviv_cmdbuf_suballoc_destroy(gpu->cmdbuf_suballoc);
- gpu->cmdbuf_suballoc = NULL;
- }
-
- if (gpu->mmu) {
- etnaviv_iommu_destroy(gpu->mmu);
- gpu->mmu = NULL;
+ etnaviv_iommu_global_fini(gpu);
+ gpu->initialized = false;
}
gpu->drm = NULL;
@@ -1714,7 +1714,6 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct etnaviv_gpu *gpu;
- struct resource *res;
int err;
gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
@@ -1726,8 +1725,7 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
mutex_init(&gpu->fence_lock);
/* Map registers: */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- gpu->mmio = devm_ioremap_resource(&pdev->dev, res);
+ gpu->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(gpu->mmio))
return PTR_ERR(gpu->mmio);
@@ -1825,7 +1823,7 @@ static int etnaviv_gpu_rpm_resume(struct device *dev)
return ret;
/* Re-initialise the basic hardware state */
- if (gpu->drm && gpu->buffer.suballoc) {
+ if (gpu->drm && gpu->initialized) {
ret = etnaviv_gpu_hw_resume(gpu);
if (ret) {
etnaviv_gpu_clk_disable(gpu);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 9bcf151f706b..8f9bd4edc96a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -7,6 +7,8 @@
#define __ETNAVIV_GPU_H__
#include "etnaviv_cmdbuf.h"
+#include "etnaviv_gem.h"
+#include "etnaviv_mmu.h"
#include "etnaviv_drv.h"
struct etnaviv_gem_submit;
@@ -84,7 +86,6 @@ struct etnaviv_event {
};
struct etnaviv_cmdbuf_suballoc;
-struct etnaviv_cmdbuf;
struct regulator;
struct clk;
@@ -99,14 +100,12 @@ struct etnaviv_gpu {
enum etnaviv_sec_mode sec_mode;
struct workqueue_struct *wq;
struct drm_gpu_scheduler sched;
+ bool initialized;
/* 'ring'-buffer: */
struct etnaviv_cmdbuf buffer;
int exec_state;
- /* bus base address of memory */
- u32 memory_base;
-
/* event management: */
DECLARE_BITMAP(event_bitmap, ETNA_NR_EVENTS);
struct etnaviv_event event[ETNA_NR_EVENTS];
@@ -134,8 +133,8 @@ struct etnaviv_gpu {
void __iomem *mmio;
int irq;
- struct etnaviv_iommu *mmu;
- struct etnaviv_cmdbuf_suballoc *cmdbuf_suballoc;
+ struct etnaviv_iommu_context *mmu_context;
+ unsigned int flush_seq;
/* Power Control: */
struct clk *clk_bus;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
index b163bdbcb880..aac8dbf3ea56 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
@@ -3,15 +3,14 @@
* Copyright (C) 2014-2018 Etnaviv Project
*/
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include <linux/slab.h>
-#include <linux/dma-mapping.h>
-#include <linux/bitops.h>
#include "etnaviv_gpu.h"
#include "etnaviv_mmu.h"
-#include "etnaviv_iommu.h"
#include "state_hi.xml.h"
#define PT_SIZE SZ_2M
@@ -19,124 +18,89 @@
#define GPU_MEM_START 0x80000000
-struct etnaviv_iommuv1_domain {
- struct etnaviv_iommu_domain base;
+struct etnaviv_iommuv1_context {
+ struct etnaviv_iommu_context base;
u32 *pgtable_cpu;
dma_addr_t pgtable_dma;
};
-static struct etnaviv_iommuv1_domain *
-to_etnaviv_domain(struct etnaviv_iommu_domain *domain)
+static struct etnaviv_iommuv1_context *
+to_v1_context(struct etnaviv_iommu_context *context)
{
- return container_of(domain, struct etnaviv_iommuv1_domain, base);
+ return container_of(context, struct etnaviv_iommuv1_context, base);
}
-static int __etnaviv_iommu_init(struct etnaviv_iommuv1_domain *etnaviv_domain)
+static void etnaviv_iommuv1_free(struct etnaviv_iommu_context *context)
{
- u32 *p;
- int i;
-
- etnaviv_domain->base.bad_page_cpu =
- dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
- &etnaviv_domain->base.bad_page_dma,
- GFP_KERNEL);
- if (!etnaviv_domain->base.bad_page_cpu)
- return -ENOMEM;
-
- p = etnaviv_domain->base.bad_page_cpu;
- for (i = 0; i < SZ_4K / 4; i++)
- *p++ = 0xdead55aa;
-
- etnaviv_domain->pgtable_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
- PT_SIZE,
- &etnaviv_domain->pgtable_dma,
- GFP_KERNEL);
- if (!etnaviv_domain->pgtable_cpu) {
- dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
- etnaviv_domain->base.bad_page_cpu,
- etnaviv_domain->base.bad_page_dma);
- return -ENOMEM;
- }
+ struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
- memset32(etnaviv_domain->pgtable_cpu, etnaviv_domain->base.bad_page_dma,
- PT_ENTRIES);
+ drm_mm_takedown(&context->mm);
- return 0;
-}
+ dma_free_wc(context->global->dev, PT_SIZE, v1_context->pgtable_cpu,
+ v1_context->pgtable_dma);
-static void etnaviv_iommuv1_domain_free(struct etnaviv_iommu_domain *domain)
-{
- struct etnaviv_iommuv1_domain *etnaviv_domain =
- to_etnaviv_domain(domain);
+ context->global->v1.shared_context = NULL;
- dma_free_wc(etnaviv_domain->base.dev, PT_SIZE,
- etnaviv_domain->pgtable_cpu, etnaviv_domain->pgtable_dma);
-
- dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
- etnaviv_domain->base.bad_page_cpu,
- etnaviv_domain->base.bad_page_dma);
-
- kfree(etnaviv_domain);
+ kfree(v1_context);
}
-static int etnaviv_iommuv1_map(struct etnaviv_iommu_domain *domain,
+static int etnaviv_iommuv1_map(struct etnaviv_iommu_context *context,
unsigned long iova, phys_addr_t paddr,
size_t size, int prot)
{
- struct etnaviv_iommuv1_domain *etnaviv_domain = to_etnaviv_domain(domain);
+ struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
if (size != SZ_4K)
return -EINVAL;
- etnaviv_domain->pgtable_cpu[index] = paddr;
+ v1_context->pgtable_cpu[index] = paddr;
return 0;
}
-static size_t etnaviv_iommuv1_unmap(struct etnaviv_iommu_domain *domain,
+static size_t etnaviv_iommuv1_unmap(struct etnaviv_iommu_context *context,
unsigned long iova, size_t size)
{
- struct etnaviv_iommuv1_domain *etnaviv_domain =
- to_etnaviv_domain(domain);
+ struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
if (size != SZ_4K)
return -EINVAL;
- etnaviv_domain->pgtable_cpu[index] = etnaviv_domain->base.bad_page_dma;
+ v1_context->pgtable_cpu[index] = context->global->bad_page_dma;
return SZ_4K;
}
-static size_t etnaviv_iommuv1_dump_size(struct etnaviv_iommu_domain *domain)
+static size_t etnaviv_iommuv1_dump_size(struct etnaviv_iommu_context *context)
{
return PT_SIZE;
}
-static void etnaviv_iommuv1_dump(struct etnaviv_iommu_domain *domain, void *buf)
+static void etnaviv_iommuv1_dump(struct etnaviv_iommu_context *context,
+ void *buf)
{
- struct etnaviv_iommuv1_domain *etnaviv_domain =
- to_etnaviv_domain(domain);
+ struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
- memcpy(buf, etnaviv_domain->pgtable_cpu, PT_SIZE);
+ memcpy(buf, v1_context->pgtable_cpu, PT_SIZE);
}
-void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu)
+static void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu,
+ struct etnaviv_iommu_context *context)
{
- struct etnaviv_iommuv1_domain *etnaviv_domain =
- to_etnaviv_domain(gpu->mmu->domain);
+ struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
u32 pgtable;
/* set base addresses */
- gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, gpu->memory_base);
- gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, gpu->memory_base);
- gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, gpu->memory_base);
- gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, gpu->memory_base);
- gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base);
+ gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, context->global->memory_base);
+ gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, context->global->memory_base);
+ gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, context->global->memory_base);
+ gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, context->global->memory_base);
+ gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, context->global->memory_base);
/* set page table address in MC */
- pgtable = (u32)etnaviv_domain->pgtable_dma;
+ pgtable = (u32)v1_context->pgtable_dma;
gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable);
gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable);
@@ -145,39 +109,62 @@ void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu)
gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable);
}
-static const struct etnaviv_iommu_domain_ops etnaviv_iommuv1_ops = {
- .free = etnaviv_iommuv1_domain_free,
+
+const struct etnaviv_iommu_ops etnaviv_iommuv1_ops = {
+ .free = etnaviv_iommuv1_free,
.map = etnaviv_iommuv1_map,
.unmap = etnaviv_iommuv1_unmap,
.dump_size = etnaviv_iommuv1_dump_size,
.dump = etnaviv_iommuv1_dump,
+ .restore = etnaviv_iommuv1_restore,
};
-struct etnaviv_iommu_domain *
-etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu)
+struct etnaviv_iommu_context *
+etnaviv_iommuv1_context_alloc(struct etnaviv_iommu_global *global)
{
- struct etnaviv_iommuv1_domain *etnaviv_domain;
- struct etnaviv_iommu_domain *domain;
- int ret;
+ struct etnaviv_iommuv1_context *v1_context;
+ struct etnaviv_iommu_context *context;
+
+ mutex_lock(&global->lock);
+
+ /*
+ * MMUv1 does not support switching between different contexts without
+ * a stop the world operation, so we only support a single shared
+ * context with this version.
+ */
+ if (global->v1.shared_context) {
+ context = global->v1.shared_context;
+ etnaviv_iommu_context_get(context);
+ mutex_unlock(&global->lock);
+ return context;
+ }
- etnaviv_domain = kzalloc(sizeof(*etnaviv_domain), GFP_KERNEL);
- if (!etnaviv_domain)
+ v1_context = kzalloc(sizeof(*v1_context), GFP_KERNEL);
+ if (!v1_context)
return NULL;
- domain = &etnaviv_domain->base;
+ v1_context->pgtable_cpu = dma_alloc_wc(global->dev, PT_SIZE,
+ &v1_context->pgtable_dma,
+ GFP_KERNEL);
+ if (!v1_context->pgtable_cpu)
+ goto out_free;
- domain->dev = gpu->dev;
- domain->base = GPU_MEM_START;
- domain->size = PT_ENTRIES * SZ_4K;
- domain->ops = &etnaviv_iommuv1_ops;
+ memset32(v1_context->pgtable_cpu, global->bad_page_dma, PT_ENTRIES);
- ret = __etnaviv_iommu_init(etnaviv_domain);
- if (ret)
- goto out_free;
+ context = &v1_context->base;
+ context->global = global;
+ kref_init(&context->refcount);
+ mutex_init(&context->lock);
+ INIT_LIST_HEAD(&context->mappings);
+ drm_mm_init(&context->mm, GPU_MEM_START, PT_ENTRIES * SZ_4K);
+ context->global->v1.shared_context = context;
+
+ mutex_unlock(&global->lock);
- return &etnaviv_domain->base;
+ return context;
out_free:
- kfree(etnaviv_domain);
+ mutex_unlock(&global->lock);
+ kfree(v1_context);
return NULL;
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.h b/drivers/gpu/drm/etnaviv/etnaviv_iommu.h
deleted file mode 100644
index b279404ce91a..000000000000
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2014-2018 Etnaviv Project
- */
-
-#ifndef __ETNAVIV_IOMMU_H__
-#define __ETNAVIV_IOMMU_H__
-
-struct etnaviv_gpu;
-struct etnaviv_iommu_domain;
-
-struct etnaviv_iommu_domain *
-etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu);
-void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu);
-
-struct etnaviv_iommu_domain *
-etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu);
-void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu);
-
-#endif /* __ETNAVIV_IOMMU_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
index f794e04be9e6..043111a1d60c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
@@ -3,16 +3,16 @@
* Copyright (C) 2016-2018 Etnaviv Project
*/
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include <linux/slab.h>
-#include <linux/dma-mapping.h>
-#include <linux/bitops.h>
+#include <linux/vmalloc.h>
#include "etnaviv_cmdbuf.h"
#include "etnaviv_gpu.h"
#include "etnaviv_mmu.h"
-#include "etnaviv_iommu.h"
#include "state.xml.h"
#include "state_hi.xml.h"
@@ -27,11 +27,9 @@
#define MMUv2_MAX_STLB_ENTRIES 1024
-struct etnaviv_iommuv2_domain {
- struct etnaviv_iommu_domain base;
- /* P(age) T(able) A(rray) */
- u64 *pta_cpu;
- dma_addr_t pta_dma;
+struct etnaviv_iommuv2_context {
+ struct etnaviv_iommu_context base;
+ unsigned short id;
/* M(aster) TLB aka first level pagetable */
u32 *mtlb_cpu;
dma_addr_t mtlb_dma;
@@ -40,41 +38,62 @@ struct etnaviv_iommuv2_domain {
dma_addr_t stlb_dma[MMUv2_MAX_STLB_ENTRIES];
};
-static struct etnaviv_iommuv2_domain *
-to_etnaviv_domain(struct etnaviv_iommu_domain *domain)
+static struct etnaviv_iommuv2_context *
+to_v2_context(struct etnaviv_iommu_context *context)
{
- return container_of(domain, struct etnaviv_iommuv2_domain, base);
+ return container_of(context, struct etnaviv_iommuv2_context, base);
}
+static void etnaviv_iommuv2_free(struct etnaviv_iommu_context *context)
+{
+ struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
+ int i;
+
+ drm_mm_takedown(&context->mm);
+
+ for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
+ if (v2_context->stlb_cpu[i])
+ dma_free_wc(context->global->dev, SZ_4K,
+ v2_context->stlb_cpu[i],
+ v2_context->stlb_dma[i]);
+ }
+
+ dma_free_wc(context->global->dev, SZ_4K, v2_context->mtlb_cpu,
+ v2_context->mtlb_dma);
+
+ clear_bit(v2_context->id, context->global->v2.pta_alloc);
+
+ vfree(v2_context);
+}
static int
-etnaviv_iommuv2_ensure_stlb(struct etnaviv_iommuv2_domain *etnaviv_domain,
+etnaviv_iommuv2_ensure_stlb(struct etnaviv_iommuv2_context *v2_context,
int stlb)
{
- if (etnaviv_domain->stlb_cpu[stlb])
+ if (v2_context->stlb_cpu[stlb])
return 0;
- etnaviv_domain->stlb_cpu[stlb] =
- dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
- &etnaviv_domain->stlb_dma[stlb],
+ v2_context->stlb_cpu[stlb] =
+ dma_alloc_wc(v2_context->base.global->dev, SZ_4K,
+ &v2_context->stlb_dma[stlb],
GFP_KERNEL);
- if (!etnaviv_domain->stlb_cpu[stlb])
+ if (!v2_context->stlb_cpu[stlb])
return -ENOMEM;
- memset32(etnaviv_domain->stlb_cpu[stlb], MMUv2_PTE_EXCEPTION,
+ memset32(v2_context->stlb_cpu[stlb], MMUv2_PTE_EXCEPTION,
SZ_4K / sizeof(u32));
- etnaviv_domain->mtlb_cpu[stlb] = etnaviv_domain->stlb_dma[stlb] |
- MMUv2_PTE_PRESENT;
+ v2_context->mtlb_cpu[stlb] =
+ v2_context->stlb_dma[stlb] | MMUv2_PTE_PRESENT;
+
return 0;
}
-static int etnaviv_iommuv2_map(struct etnaviv_iommu_domain *domain,
+static int etnaviv_iommuv2_map(struct etnaviv_iommu_context *context,
unsigned long iova, phys_addr_t paddr,
size_t size, int prot)
{
- struct etnaviv_iommuv2_domain *etnaviv_domain =
- to_etnaviv_domain(domain);
+ struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
int mtlb_entry, stlb_entry, ret;
u32 entry = lower_32_bits(paddr) | MMUv2_PTE_PRESENT;
@@ -90,20 +109,19 @@ static int etnaviv_iommuv2_map(struct etnaviv_iommu_domain *domain,
mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
- ret = etnaviv_iommuv2_ensure_stlb(etnaviv_domain, mtlb_entry);
+ ret = etnaviv_iommuv2_ensure_stlb(v2_context, mtlb_entry);
if (ret)
return ret;
- etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = entry;
+ v2_context->stlb_cpu[mtlb_entry][stlb_entry] = entry;
return 0;
}
-static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_domain *domain,
+static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_context *context,
unsigned long iova, size_t size)
{
- struct etnaviv_iommuv2_domain *etnaviv_domain =
- to_etnaviv_domain(domain);
+ struct etnaviv_iommuv2_context *etnaviv_domain = to_v2_context(context);
int mtlb_entry, stlb_entry;
if (size != SZ_4K)
@@ -117,118 +135,35 @@ static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_domain *domain,
return SZ_4K;
}
-static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
-{
- int ret;
-
- /* allocate scratch page */
- etnaviv_domain->base.bad_page_cpu =
- dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
- &etnaviv_domain->base.bad_page_dma,
- GFP_KERNEL);
- if (!etnaviv_domain->base.bad_page_cpu) {
- ret = -ENOMEM;
- goto fail_mem;
- }
-
- memset32(etnaviv_domain->base.bad_page_cpu, 0xdead55aa,
- SZ_4K / sizeof(u32));
-
- etnaviv_domain->pta_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
- SZ_4K, &etnaviv_domain->pta_dma,
- GFP_KERNEL);
- if (!etnaviv_domain->pta_cpu) {
- ret = -ENOMEM;
- goto fail_mem;
- }
-
- etnaviv_domain->mtlb_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
- SZ_4K, &etnaviv_domain->mtlb_dma,
- GFP_KERNEL);
- if (!etnaviv_domain->mtlb_cpu) {
- ret = -ENOMEM;
- goto fail_mem;
- }
-
- memset32(etnaviv_domain->mtlb_cpu, MMUv2_PTE_EXCEPTION,
- MMUv2_MAX_STLB_ENTRIES);
-
- return 0;
-
-fail_mem:
- if (etnaviv_domain->base.bad_page_cpu)
- dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
- etnaviv_domain->base.bad_page_cpu,
- etnaviv_domain->base.bad_page_dma);
-
- if (etnaviv_domain->pta_cpu)
- dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
- etnaviv_domain->pta_cpu, etnaviv_domain->pta_dma);
-
- if (etnaviv_domain->mtlb_cpu)
- dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
- etnaviv_domain->mtlb_cpu, etnaviv_domain->mtlb_dma);
-
- return ret;
-}
-
-static void etnaviv_iommuv2_domain_free(struct etnaviv_iommu_domain *domain)
-{
- struct etnaviv_iommuv2_domain *etnaviv_domain =
- to_etnaviv_domain(domain);
- int i;
-
- dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
- etnaviv_domain->base.bad_page_cpu,
- etnaviv_domain->base.bad_page_dma);
-
- dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
- etnaviv_domain->pta_cpu, etnaviv_domain->pta_dma);
-
- dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
- etnaviv_domain->mtlb_cpu, etnaviv_domain->mtlb_dma);
-
- for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
- if (etnaviv_domain->stlb_cpu[i])
- dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
- etnaviv_domain->stlb_cpu[i],
- etnaviv_domain->stlb_dma[i]);
- }
-
- vfree(etnaviv_domain);
-}
-
-static size_t etnaviv_iommuv2_dump_size(struct etnaviv_iommu_domain *domain)
+static size_t etnaviv_iommuv2_dump_size(struct etnaviv_iommu_context *context)
{
- struct etnaviv_iommuv2_domain *etnaviv_domain =
- to_etnaviv_domain(domain);
+ struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
size_t dump_size = SZ_4K;
int i;
for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++)
- if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
+ if (v2_context->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
dump_size += SZ_4K;
return dump_size;
}
-static void etnaviv_iommuv2_dump(struct etnaviv_iommu_domain *domain, void *buf)
+static void etnaviv_iommuv2_dump(struct etnaviv_iommu_context *context, void *buf)
{
- struct etnaviv_iommuv2_domain *etnaviv_domain =
- to_etnaviv_domain(domain);
+ struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
int i;
- memcpy(buf, etnaviv_domain->mtlb_cpu, SZ_4K);
+ memcpy(buf, v2_context->mtlb_cpu, SZ_4K);
buf += SZ_4K;
for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++, buf += SZ_4K)
- if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
- memcpy(buf, etnaviv_domain->stlb_cpu[i], SZ_4K);
+ if (v2_context->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
+ memcpy(buf, v2_context->stlb_cpu[i], SZ_4K);
}
-static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu)
+static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu,
+ struct etnaviv_iommu_context *context)
{
- struct etnaviv_iommuv2_domain *etnaviv_domain =
- to_etnaviv_domain(gpu->mmu->domain);
+ struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
u16 prefetch;
/* If the MMU is already enabled the state is still there. */
@@ -236,8 +171,8 @@ static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu)
return;
prefetch = etnaviv_buffer_config_mmuv2(gpu,
- (u32)etnaviv_domain->mtlb_dma,
- (u32)etnaviv_domain->base.bad_page_dma);
+ (u32)v2_context->mtlb_dma,
+ (u32)context->global->bad_page_dma);
etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
prefetch);
etnaviv_gpu_wait_idle(gpu, 100);
@@ -245,10 +180,10 @@ static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu)
gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE);
}
-static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu)
+static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu,
+ struct etnaviv_iommu_context *context)
{
- struct etnaviv_iommuv2_domain *etnaviv_domain =
- to_etnaviv_domain(gpu->mmu->domain);
+ struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
u16 prefetch;
/* If the MMU is already enabled the state is still there. */
@@ -256,26 +191,26 @@ static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu)
return;
gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW,
- lower_32_bits(etnaviv_domain->pta_dma));
+ lower_32_bits(context->global->v2.pta_dma));
gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH,
- upper_32_bits(etnaviv_domain->pta_dma));
+ upper_32_bits(context->global->v2.pta_dma));
gpu_write(gpu, VIVS_MMUv2_PTA_CONTROL, VIVS_MMUv2_PTA_CONTROL_ENABLE);
gpu_write(gpu, VIVS_MMUv2_NONSEC_SAFE_ADDR_LOW,
- lower_32_bits(etnaviv_domain->base.bad_page_dma));
+ lower_32_bits(context->global->bad_page_dma));
gpu_write(gpu, VIVS_MMUv2_SEC_SAFE_ADDR_LOW,
- lower_32_bits(etnaviv_domain->base.bad_page_dma));
+ lower_32_bits(context->global->bad_page_dma));
gpu_write(gpu, VIVS_MMUv2_SAFE_ADDRESS_CONFIG,
VIVS_MMUv2_SAFE_ADDRESS_CONFIG_NON_SEC_SAFE_ADDR_HIGH(
- upper_32_bits(etnaviv_domain->base.bad_page_dma)) |
+ upper_32_bits(context->global->bad_page_dma)) |
VIVS_MMUv2_SAFE_ADDRESS_CONFIG_SEC_SAFE_ADDR_HIGH(
- upper_32_bits(etnaviv_domain->base.bad_page_dma)));
+ upper_32_bits(context->global->bad_page_dma)));
- etnaviv_domain->pta_cpu[0] = etnaviv_domain->mtlb_dma |
- VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K;
+ context->global->v2.pta_cpu[v2_context->id] = v2_context->mtlb_dma |
+ VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K;
/* trigger a PTA load through the FE */
- prefetch = etnaviv_buffer_config_pta(gpu);
+ prefetch = etnaviv_buffer_config_pta(gpu, v2_context->id);
etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
prefetch);
etnaviv_gpu_wait_idle(gpu, 100);
@@ -283,14 +218,28 @@ static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu)
gpu_write(gpu, VIVS_MMUv2_SEC_CONTROL, VIVS_MMUv2_SEC_CONTROL_ENABLE);
}
-void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
+u32 etnaviv_iommuv2_get_mtlb_addr(struct etnaviv_iommu_context *context)
+{
+ struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
+
+ return v2_context->mtlb_dma;
+}
+
+unsigned short etnaviv_iommuv2_get_pta_id(struct etnaviv_iommu_context *context)
+{
+ struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
+
+ return v2_context->id;
+}
+static void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu,
+ struct etnaviv_iommu_context *context)
{
switch (gpu->sec_mode) {
case ETNA_SEC_NONE:
- etnaviv_iommuv2_restore_nonsec(gpu);
+ etnaviv_iommuv2_restore_nonsec(gpu, context);
break;
case ETNA_SEC_KERNEL:
- etnaviv_iommuv2_restore_sec(gpu);
+ etnaviv_iommuv2_restore_sec(gpu, context);
break;
default:
WARN(1, "unhandled GPU security mode\n");
@@ -298,39 +247,58 @@ void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
}
}
-static const struct etnaviv_iommu_domain_ops etnaviv_iommuv2_ops = {
- .free = etnaviv_iommuv2_domain_free,
+const struct etnaviv_iommu_ops etnaviv_iommuv2_ops = {
+ .free = etnaviv_iommuv2_free,
.map = etnaviv_iommuv2_map,
.unmap = etnaviv_iommuv2_unmap,
.dump_size = etnaviv_iommuv2_dump_size,
.dump = etnaviv_iommuv2_dump,
+ .restore = etnaviv_iommuv2_restore,
};
-struct etnaviv_iommu_domain *
-etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu)
+struct etnaviv_iommu_context *
+etnaviv_iommuv2_context_alloc(struct etnaviv_iommu_global *global)
{
- struct etnaviv_iommuv2_domain *etnaviv_domain;
- struct etnaviv_iommu_domain *domain;
- int ret;
+ struct etnaviv_iommuv2_context *v2_context;
+ struct etnaviv_iommu_context *context;
- etnaviv_domain = vzalloc(sizeof(*etnaviv_domain));
- if (!etnaviv_domain)
+ v2_context = vzalloc(sizeof(*v2_context));
+ if (!v2_context)
return NULL;
- domain = &etnaviv_domain->base;
+ mutex_lock(&global->lock);
+ v2_context->id = find_first_zero_bit(global->v2.pta_alloc,
+ ETNAVIV_PTA_ENTRIES);
+ if (v2_context->id < ETNAVIV_PTA_ENTRIES) {
+ set_bit(v2_context->id, global->v2.pta_alloc);
+ } else {
+ mutex_unlock(&global->lock);
+ goto out_free;
+ }
+ mutex_unlock(&global->lock);
- domain->dev = gpu->dev;
- domain->base = SZ_4K;
- domain->size = (u64)SZ_1G * 4 - SZ_4K;
- domain->ops = &etnaviv_iommuv2_ops;
+ v2_context->mtlb_cpu = dma_alloc_wc(global->dev, SZ_4K,
+ &v2_context->mtlb_dma, GFP_KERNEL);
+ if (!v2_context->mtlb_cpu)
+ goto out_free_id;
- ret = etnaviv_iommuv2_init(etnaviv_domain);
- if (ret)
- goto out_free;
+ memset32(v2_context->mtlb_cpu, MMUv2_PTE_EXCEPTION,
+ MMUv2_MAX_STLB_ENTRIES);
+
+ global->v2.pta_cpu[v2_context->id] = v2_context->mtlb_dma;
+
+ context = &v2_context->base;
+ context->global = global;
+ kref_init(&context->refcount);
+ mutex_init(&context->lock);
+ INIT_LIST_HEAD(&context->mappings);
+ drm_mm_init(&context->mm, SZ_4K, (u64)SZ_1G * 4 - SZ_4K);
- return &etnaviv_domain->base;
+ return context;
+out_free_id:
+ clear_bit(v2_context->id, global->v2.pta_alloc);
out_free:
- vfree(etnaviv_domain);
+ vfree(v2_context);
return NULL;
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index 8069f9f36a2e..35ebae6a1be7 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -3,15 +3,17 @@
* Copyright (C) 2015-2018 Etnaviv Project
*/
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+
#include "common.xml.h"
#include "etnaviv_cmdbuf.h"
#include "etnaviv_drv.h"
#include "etnaviv_gem.h"
#include "etnaviv_gpu.h"
-#include "etnaviv_iommu.h"
#include "etnaviv_mmu.h"
-static void etnaviv_domain_unmap(struct etnaviv_iommu_domain *domain,
+static void etnaviv_context_unmap(struct etnaviv_iommu_context *context,
unsigned long iova, size_t size)
{
size_t unmapped_page, unmapped = 0;
@@ -24,7 +26,8 @@ static void etnaviv_domain_unmap(struct etnaviv_iommu_domain *domain,
}
while (unmapped < size) {
- unmapped_page = domain->ops->unmap(domain, iova, pgsize);
+ unmapped_page = context->global->ops->unmap(context, iova,
+ pgsize);
if (!unmapped_page)
break;
@@ -33,7 +36,7 @@ static void etnaviv_domain_unmap(struct etnaviv_iommu_domain *domain,
}
}
-static int etnaviv_domain_map(struct etnaviv_iommu_domain *domain,
+static int etnaviv_context_map(struct etnaviv_iommu_context *context,
unsigned long iova, phys_addr_t paddr,
size_t size, int prot)
{
@@ -49,7 +52,8 @@ static int etnaviv_domain_map(struct etnaviv_iommu_domain *domain,
}
while (size) {
- ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
+ ret = context->global->ops->map(context, iova, paddr, pgsize,
+ prot);
if (ret)
break;
@@ -60,21 +64,19 @@ static int etnaviv_domain_map(struct etnaviv_iommu_domain *domain,
/* unroll mapping in case something went wrong */
if (ret)
- etnaviv_domain_unmap(domain, orig_iova, orig_size - size);
+ etnaviv_context_unmap(context, orig_iova, orig_size - size);
return ret;
}
-static int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
+static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
struct sg_table *sgt, unsigned len, int prot)
-{
- struct etnaviv_iommu_domain *domain = iommu->domain;
- struct scatterlist *sg;
+{ struct scatterlist *sg;
unsigned int da = iova;
unsigned int i, j;
int ret;
- if (!domain || !sgt)
+ if (!context || !sgt)
return -EINVAL;
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
@@ -83,7 +85,7 @@ static int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
- ret = etnaviv_domain_map(domain, da, pa, bytes, prot);
+ ret = etnaviv_context_map(context, da, pa, bytes, prot);
if (ret)
goto fail;
@@ -98,16 +100,15 @@ fail:
for_each_sg(sgt->sgl, sg, i, j) {
size_t bytes = sg_dma_len(sg) + sg->offset;
- etnaviv_domain_unmap(domain, da, bytes);
+ etnaviv_context_unmap(context, da, bytes);
da += bytes;
}
return ret;
}
-static void etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
+static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova,
struct sg_table *sgt, unsigned len)
{
- struct etnaviv_iommu_domain *domain = iommu->domain;
struct scatterlist *sg;
unsigned int da = iova;
int i;
@@ -115,7 +116,7 @@ static void etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
size_t bytes = sg_dma_len(sg) + sg->offset;
- etnaviv_domain_unmap(domain, da, bytes);
+ etnaviv_context_unmap(context, da, bytes);
VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
@@ -125,24 +126,24 @@ static void etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
}
}
-static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu,
+static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context,
struct etnaviv_vram_mapping *mapping)
{
struct etnaviv_gem_object *etnaviv_obj = mapping->object;
- etnaviv_iommu_unmap(mmu, mapping->vram_node.start,
+ etnaviv_iommu_unmap(context, mapping->vram_node.start,
etnaviv_obj->sgt, etnaviv_obj->base.size);
drm_mm_remove_node(&mapping->vram_node);
}
-static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
+static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
struct drm_mm_node *node, size_t size)
{
struct etnaviv_vram_mapping *free = NULL;
enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
int ret;
- lockdep_assert_held(&mmu->lock);
+ lockdep_assert_held(&context->lock);
while (1) {
struct etnaviv_vram_mapping *m, *n;
@@ -150,17 +151,17 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
struct list_head list;
bool found;
- ret = drm_mm_insert_node_in_range(&mmu->mm, node,
+ ret = drm_mm_insert_node_in_range(&context->mm, node,
size, 0, 0, 0, U64_MAX, mode);
if (ret != -ENOSPC)
break;
/* Try to retire some entries */
- drm_mm_scan_init(&scan, &mmu->mm, size, 0, 0, mode);
+ drm_mm_scan_init(&scan, &context->mm, size, 0, 0, mode);
found = 0;
INIT_LIST_HEAD(&list);
- list_for_each_entry(free, &mmu->mappings, mmu_node) {
+ list_for_each_entry(free, &context->mappings, mmu_node) {
/* If this vram node has not been used, skip this. */
if (!free->vram_node.mm)
continue;
@@ -202,8 +203,8 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
* this mapping.
*/
list_for_each_entry_safe(m, n, &list, scan_node) {
- etnaviv_iommu_remove_mapping(mmu, m);
- m->mmu = NULL;
+ etnaviv_iommu_remove_mapping(context, m);
+ m->context = NULL;
list_del_init(&m->mmu_node);
list_del_init(&m->scan_node);
}
@@ -219,9 +220,16 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
return ret;
}
-int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
+static int etnaviv_iommu_insert_exact(struct etnaviv_iommu_context *context,
+ struct drm_mm_node *node, size_t size, u64 va)
+{
+ return drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
+ va + size, DRM_MM_INSERT_LOWEST);
+}
+
+int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
- struct etnaviv_vram_mapping *mapping)
+ struct etnaviv_vram_mapping *mapping, u64 va)
{
struct sg_table *sgt = etnaviv_obj->sgt;
struct drm_mm_node *node;
@@ -229,17 +237,17 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
lockdep_assert_held(&etnaviv_obj->lock);
- mutex_lock(&mmu->lock);
+ mutex_lock(&context->lock);
/* v1 MMU can optimize single entry (contiguous) scatterlists */
- if (mmu->version == ETNAVIV_IOMMU_V1 &&
+ if (context->global->version == ETNAVIV_IOMMU_V1 &&
sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
u32 iova;
iova = sg_dma_address(sgt->sgl) - memory_base;
if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
mapping->iova = iova;
- list_add_tail(&mapping->mmu_node, &mmu->mappings);
+ list_add_tail(&mapping->mmu_node, &context->mappings);
ret = 0;
goto unlock;
}
@@ -247,12 +255,17 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
node = &mapping->vram_node;
- ret = etnaviv_iommu_find_iova(mmu, node, etnaviv_obj->base.size);
+ if (va)
+ ret = etnaviv_iommu_insert_exact(context, node,
+ etnaviv_obj->base.size, va);
+ else
+ ret = etnaviv_iommu_find_iova(context, node,
+ etnaviv_obj->base.size);
if (ret < 0)
goto unlock;
mapping->iova = node->start;
- ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size,
+ ret = etnaviv_iommu_map(context, node->start, sgt, etnaviv_obj->base.size,
ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
if (ret < 0) {
@@ -260,130 +273,233 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
goto unlock;
}
- list_add_tail(&mapping->mmu_node, &mmu->mappings);
- mmu->need_flush = true;
+ list_add_tail(&mapping->mmu_node, &context->mappings);
+ context->flush_seq++;
unlock:
- mutex_unlock(&mmu->lock);
+ mutex_unlock(&context->lock);
return ret;
}
-void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
+void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context,
struct etnaviv_vram_mapping *mapping)
{
WARN_ON(mapping->use);
- mutex_lock(&mmu->lock);
+ mutex_lock(&context->lock);
/* If the vram node is on the mm, unmap and remove the node */
- if (mapping->vram_node.mm == &mmu->mm)
- etnaviv_iommu_remove_mapping(mmu, mapping);
+ if (mapping->vram_node.mm == &context->mm)
+ etnaviv_iommu_remove_mapping(context, mapping);
list_del(&mapping->mmu_node);
- mmu->need_flush = true;
- mutex_unlock(&mmu->lock);
+ context->flush_seq++;
+ mutex_unlock(&context->lock);
}
-void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu)
+static void etnaviv_iommu_context_free(struct kref *kref)
{
- drm_mm_takedown(&mmu->mm);
- mmu->domain->ops->free(mmu->domain);
- kfree(mmu);
+ struct etnaviv_iommu_context *context =
+ container_of(kref, struct etnaviv_iommu_context, refcount);
+
+ etnaviv_cmdbuf_suballoc_unmap(context, &context->cmdbuf_mapping);
+
+ context->global->ops->free(context);
+}
+void etnaviv_iommu_context_put(struct etnaviv_iommu_context *context)
+{
+ kref_put(&context->refcount, etnaviv_iommu_context_free);
}
-struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu)
+struct etnaviv_iommu_context *
+etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
+ struct etnaviv_cmdbuf_suballoc *suballoc)
{
- enum etnaviv_iommu_version version;
- struct etnaviv_iommu *mmu;
+ struct etnaviv_iommu_context *ctx;
+ int ret;
- mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
- if (!mmu)
- return ERR_PTR(-ENOMEM);
+ if (global->version == ETNAVIV_IOMMU_V1)
+ ctx = etnaviv_iommuv1_context_alloc(global);
+ else
+ ctx = etnaviv_iommuv2_context_alloc(global);
- if (!(gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)) {
- mmu->domain = etnaviv_iommuv1_domain_alloc(gpu);
- version = ETNAVIV_IOMMU_V1;
- } else {
- mmu->domain = etnaviv_iommuv2_domain_alloc(gpu);
- version = ETNAVIV_IOMMU_V2;
- }
+ if (!ctx)
+ return NULL;
- if (!mmu->domain) {
- dev_err(gpu->dev, "Failed to allocate GPU IOMMU domain\n");
- kfree(mmu);
- return ERR_PTR(-ENOMEM);
+ ret = etnaviv_cmdbuf_suballoc_map(suballoc, ctx, &ctx->cmdbuf_mapping,
+ global->memory_base);
+ if (ret) {
+ global->ops->free(ctx);
+ return NULL;
}
- mmu->gpu = gpu;
- mmu->version = version;
- mutex_init(&mmu->lock);
- INIT_LIST_HEAD(&mmu->mappings);
-
- drm_mm_init(&mmu->mm, mmu->domain->base, mmu->domain->size);
-
- return mmu;
+ return ctx;
}
-void etnaviv_iommu_restore(struct etnaviv_gpu *gpu)
+void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
+ struct etnaviv_iommu_context *context)
{
- if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
- etnaviv_iommuv1_restore(gpu);
- else
- etnaviv_iommuv2_restore(gpu);
+ context->global->ops->restore(gpu, context);
}
-int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr,
- struct drm_mm_node *vram_node, size_t size,
- u32 *iova)
+int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context *context,
+ struct etnaviv_vram_mapping *mapping,
+ u32 memory_base, dma_addr_t paddr,
+ size_t size)
{
- struct etnaviv_iommu *mmu = gpu->mmu;
+ mutex_lock(&context->lock);
- if (mmu->version == ETNAVIV_IOMMU_V1) {
- *iova = paddr - gpu->memory_base;
+ if (mapping->use > 0) {
+ mapping->use++;
+ mutex_unlock(&context->lock);
return 0;
+ }
+
+ /*
+ * For MMUv1 we don't add the suballoc region to the pagetables, as
+ * those GPUs can only work with cmdbufs accessed through the linear
+ * window. Instead we manufacture a mapping to make it look uniform
+ * to the upper layers.
+ */
+ if (context->global->version == ETNAVIV_IOMMU_V1) {
+ mapping->iova = paddr - memory_base;
} else {
+ struct drm_mm_node *node = &mapping->vram_node;
int ret;
- mutex_lock(&mmu->lock);
- ret = etnaviv_iommu_find_iova(mmu, vram_node, size);
+ ret = etnaviv_iommu_find_iova(context, node, size);
if (ret < 0) {
- mutex_unlock(&mmu->lock);
+ mutex_unlock(&context->lock);
return ret;
}
- ret = etnaviv_domain_map(mmu->domain, vram_node->start, paddr,
- size, ETNAVIV_PROT_READ);
+
+ mapping->iova = node->start;
+ ret = etnaviv_context_map(context, node->start, paddr, size,
+ ETNAVIV_PROT_READ);
if (ret < 0) {
- drm_mm_remove_node(vram_node);
- mutex_unlock(&mmu->lock);
+ drm_mm_remove_node(node);
+ mutex_unlock(&context->lock);
return ret;
}
- gpu->mmu->need_flush = true;
- mutex_unlock(&mmu->lock);
- *iova = (u32)vram_node->start;
- return 0;
+ context->flush_seq++;
}
+
+ list_add_tail(&mapping->mmu_node, &context->mappings);
+ mapping->use = 1;
+
+ mutex_unlock(&context->lock);
+
+ return 0;
}
-void etnaviv_iommu_put_suballoc_va(struct etnaviv_gpu *gpu,
- struct drm_mm_node *vram_node, size_t size,
- u32 iova)
+void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context *context,
+ struct etnaviv_vram_mapping *mapping)
{
- struct etnaviv_iommu *mmu = gpu->mmu;
+ struct drm_mm_node *node = &mapping->vram_node;
- if (mmu->version == ETNAVIV_IOMMU_V2) {
- mutex_lock(&mmu->lock);
- etnaviv_domain_unmap(mmu->domain, iova, size);
- drm_mm_remove_node(vram_node);
- mutex_unlock(&mmu->lock);
+ mutex_lock(&context->lock);
+ mapping->use--;
+
+ if (mapping->use > 0 || context->global->version == ETNAVIV_IOMMU_V1) {
+ mutex_unlock(&context->lock);
+ return;
}
+
+ etnaviv_context_unmap(context, node->start, node->size);
+ drm_mm_remove_node(node);
+ mutex_unlock(&context->lock);
+}
+
+size_t etnaviv_iommu_dump_size(struct etnaviv_iommu_context *context)
+{
+ return context->global->ops->dump_size(context);
+}
+
+void etnaviv_iommu_dump(struct etnaviv_iommu_context *context, void *buf)
+{
+ context->global->ops->dump(context, buf);
}
-size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu)
+
+int etnaviv_iommu_global_init(struct etnaviv_gpu *gpu)
{
- return iommu->domain->ops->dump_size(iommu->domain);
+ enum etnaviv_iommu_version version = ETNAVIV_IOMMU_V1;
+ struct etnaviv_drm_private *priv = gpu->drm->dev_private;
+ struct etnaviv_iommu_global *global;
+ struct device *dev = gpu->drm->dev;
+
+ if (gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)
+ version = ETNAVIV_IOMMU_V2;
+
+ if (priv->mmu_global) {
+ if (priv->mmu_global->version != version) {
+ dev_err(gpu->dev,
+ "MMU version doesn't match global version\n");
+ return -ENXIO;
+ }
+
+ priv->mmu_global->use++;
+ return 0;
+ }
+
+ global = kzalloc(sizeof(*global), GFP_KERNEL);
+ if (!global)
+ return -ENOMEM;
+
+ global->bad_page_cpu = dma_alloc_wc(dev, SZ_4K, &global->bad_page_dma,
+ GFP_KERNEL);
+ if (!global->bad_page_cpu)
+ goto free_global;
+
+ memset32(global->bad_page_cpu, 0xdead55aa, SZ_4K / sizeof(u32));
+
+ if (version == ETNAVIV_IOMMU_V2) {
+ global->v2.pta_cpu = dma_alloc_wc(dev, ETNAVIV_PTA_SIZE,
+ &global->v2.pta_dma, GFP_KERNEL);
+ if (!global->v2.pta_cpu)
+ goto free_bad_page;
+ }
+
+ global->dev = dev;
+ global->version = version;
+ global->use = 1;
+ mutex_init(&global->lock);
+
+ if (version == ETNAVIV_IOMMU_V1)
+ global->ops = &etnaviv_iommuv1_ops;
+ else
+ global->ops = &etnaviv_iommuv2_ops;
+
+ priv->mmu_global = global;
+
+ return 0;
+
+free_bad_page:
+ dma_free_wc(dev, SZ_4K, global->bad_page_cpu, global->bad_page_dma);
+free_global:
+ kfree(global);
+
+ return -ENOMEM;
}
-void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf)
+void etnaviv_iommu_global_fini(struct etnaviv_gpu *gpu)
{
- iommu->domain->ops->dump(iommu->domain, buf);
+ struct etnaviv_drm_private *priv = gpu->drm->dev_private;
+ struct etnaviv_iommu_global *global = priv->mmu_global;
+
+ if (--global->use > 0)
+ return;
+
+ if (global->v2.pta_cpu)
+ dma_free_wc(global->dev, ETNAVIV_PTA_SIZE,
+ global->v2.pta_cpu, global->v2.pta_dma);
+
+ if (global->bad_page_cpu)
+ dma_free_wc(global->dev, SZ_4K,
+ global->bad_page_cpu, global->bad_page_dma);
+
+ mutex_destroy(&global->lock);
+ kfree(global);
+
+ priv->mmu_global = NULL;
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
index a0db17ffb686..d1d6902fd13b 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
@@ -16,61 +16,109 @@ enum etnaviv_iommu_version {
struct etnaviv_gpu;
struct etnaviv_vram_mapping;
-struct etnaviv_iommu_domain;
+struct etnaviv_iommu_global;
+struct etnaviv_iommu_context;
-struct etnaviv_iommu_domain_ops {
- void (*free)(struct etnaviv_iommu_domain *);
- int (*map)(struct etnaviv_iommu_domain *domain, unsigned long iova,
+struct etnaviv_iommu_ops {
+ struct etnaviv_iommu_context *(*init)(struct etnaviv_iommu_global *);
+ void (*free)(struct etnaviv_iommu_context *);
+ int (*map)(struct etnaviv_iommu_context *context, unsigned long iova,
phys_addr_t paddr, size_t size, int prot);
- size_t (*unmap)(struct etnaviv_iommu_domain *domain, unsigned long iova,
+ size_t (*unmap)(struct etnaviv_iommu_context *context, unsigned long iova,
size_t size);
- size_t (*dump_size)(struct etnaviv_iommu_domain *);
- void (*dump)(struct etnaviv_iommu_domain *, void *);
+ size_t (*dump_size)(struct etnaviv_iommu_context *);
+ void (*dump)(struct etnaviv_iommu_context *, void *);
+ void (*restore)(struct etnaviv_gpu *, struct etnaviv_iommu_context *);
};
-struct etnaviv_iommu_domain {
+extern const struct etnaviv_iommu_ops etnaviv_iommuv1_ops;
+extern const struct etnaviv_iommu_ops etnaviv_iommuv2_ops;
+
+#define ETNAVIV_PTA_SIZE SZ_4K
+#define ETNAVIV_PTA_ENTRIES (ETNAVIV_PTA_SIZE / sizeof(u64))
+
+struct etnaviv_iommu_global {
struct device *dev;
+ enum etnaviv_iommu_version version;
+ const struct etnaviv_iommu_ops *ops;
+ unsigned int use;
+ struct mutex lock;
+
void *bad_page_cpu;
dma_addr_t bad_page_dma;
- u64 base;
- u64 size;
- const struct etnaviv_iommu_domain_ops *ops;
+ u32 memory_base;
+
+ /*
+ * This union holds members needed by either MMUv1 or MMUv2, which
+ * can not exist at the same time.
+ */
+ union {
+ struct {
+ struct etnaviv_iommu_context *shared_context;
+ } v1;
+ struct {
+ /* P(age) T(able) A(rray) */
+ u64 *pta_cpu;
+ dma_addr_t pta_dma;
+ struct spinlock pta_lock;
+ DECLARE_BITMAP(pta_alloc, ETNAVIV_PTA_ENTRIES);
+ } v2;
+ };
};
-struct etnaviv_iommu {
- struct etnaviv_gpu *gpu;
- struct etnaviv_iommu_domain *domain;
-
- enum etnaviv_iommu_version version;
+struct etnaviv_iommu_context {
+ struct kref refcount;
+ struct etnaviv_iommu_global *global;
/* memory manager for GPU address area */
struct mutex lock;
struct list_head mappings;
struct drm_mm mm;
- bool need_flush;
+ unsigned int flush_seq;
+
+ /* Not part of the context, but needs to have the same lifetime */
+ struct etnaviv_vram_mapping cmdbuf_mapping;
};
+int etnaviv_iommu_global_init(struct etnaviv_gpu *gpu);
+void etnaviv_iommu_global_fini(struct etnaviv_gpu *gpu);
+
struct etnaviv_gem_object;
-int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
+int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
+ struct etnaviv_vram_mapping *mapping, u64 va);
+void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context,
struct etnaviv_vram_mapping *mapping);
-void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
- struct etnaviv_vram_mapping *mapping);
-
-int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr,
- struct drm_mm_node *vram_node, size_t size,
- u32 *iova);
-void etnaviv_iommu_put_suballoc_va(struct etnaviv_gpu *gpu,
- struct drm_mm_node *vram_node, size_t size,
- u32 iova);
-
-size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu);
-void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf);
-struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu);
-void etnaviv_iommu_destroy(struct etnaviv_iommu *iommu);
-void etnaviv_iommu_restore(struct etnaviv_gpu *gpu);
+int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context *ctx,
+ struct etnaviv_vram_mapping *mapping,
+ u32 memory_base, dma_addr_t paddr,
+ size_t size);
+void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context *ctx,
+ struct etnaviv_vram_mapping *mapping);
+
+size_t etnaviv_iommu_dump_size(struct etnaviv_iommu_context *ctx);
+void etnaviv_iommu_dump(struct etnaviv_iommu_context *ctx, void *buf);
+
+struct etnaviv_iommu_context *
+etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
+ struct etnaviv_cmdbuf_suballoc *suballoc);
+static inline void etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx)
+{
+ kref_get(&ctx->refcount);
+}
+void etnaviv_iommu_context_put(struct etnaviv_iommu_context *ctx);
+void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
+ struct etnaviv_iommu_context *ctx);
+
+struct etnaviv_iommu_context *
+etnaviv_iommuv1_context_alloc(struct etnaviv_iommu_global *global);
+struct etnaviv_iommu_context *
+etnaviv_iommuv2_context_alloc(struct etnaviv_iommu_global *global);
+
+u32 etnaviv_iommuv2_get_mtlb_addr(struct etnaviv_iommu_context *context);
+unsigned short etnaviv_iommuv2_get_pta_id(struct etnaviv_iommu_context *context);
#endif /* __ETNAVIV_MMU_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
index 4227a4006c34..8adbf2861bff 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
@@ -4,6 +4,7 @@
* Copyright (C) 2017 Zodiac Inflight Innovations
*/
+#include "common.xml.h"
#include "etnaviv_gpu.h"
#include "etnaviv_perfmon.h"
#include "state_hi.xml.h"
@@ -15,8 +16,8 @@ struct etnaviv_pm_signal {
u32 data;
u32 (*sample)(struct etnaviv_gpu *gpu,
- const struct etnaviv_pm_domain *domain,
- const struct etnaviv_pm_signal *signal);
+ const struct etnaviv_pm_domain *domain,
+ const struct etnaviv_pm_signal *signal);
};
struct etnaviv_pm_domain {
@@ -35,13 +36,6 @@ struct etnaviv_pm_domain_meta {
u32 nr_domains;
};
-static u32 simple_reg_read(struct etnaviv_gpu *gpu,
- const struct etnaviv_pm_domain *domain,
- const struct etnaviv_pm_signal *signal)
-{
- return gpu_read(gpu, signal->data);
-}
-
static u32 perf_reg_read(struct etnaviv_gpu *gpu,
const struct etnaviv_pm_domain *domain,
const struct etnaviv_pm_signal *signal)
@@ -75,6 +69,34 @@ static u32 pipe_reg_read(struct etnaviv_gpu *gpu,
return value;
}
+static u32 hi_total_cycle_read(struct etnaviv_gpu *gpu,
+ const struct etnaviv_pm_domain *domain,
+ const struct etnaviv_pm_signal *signal)
+{
+ u32 reg = VIVS_HI_PROFILE_TOTAL_CYCLES;
+
+ if (gpu->identity.model == chipModel_GC880 ||
+ gpu->identity.model == chipModel_GC2000 ||
+ gpu->identity.model == chipModel_GC2100)
+ reg = VIVS_MC_PROFILE_CYCLE_COUNTER;
+
+ return gpu_read(gpu, reg);
+}
+
+static u32 hi_total_idle_cycle_read(struct etnaviv_gpu *gpu,
+ const struct etnaviv_pm_domain *domain,
+ const struct etnaviv_pm_signal *signal)
+{
+ u32 reg = VIVS_HI_PROFILE_IDLE_CYCLES;
+
+ if (gpu->identity.model == chipModel_GC880 ||
+ gpu->identity.model == chipModel_GC2000 ||
+ gpu->identity.model == chipModel_GC2100)
+ reg = VIVS_HI_PROFILE_TOTAL_CYCLES;
+
+ return gpu_read(gpu, reg);
+}
+
static const struct etnaviv_pm_domain doms_3d[] = {
{
.name = "HI",
@@ -84,13 +106,13 @@ static const struct etnaviv_pm_domain doms_3d[] = {
.signal = (const struct etnaviv_pm_signal[]) {
{
"TOTAL_CYCLES",
- VIVS_HI_PROFILE_TOTAL_CYCLES,
- &simple_reg_read
+ 0,
+ &hi_total_cycle_read
},
{
"IDLE_CYCLES",
- VIVS_HI_PROFILE_IDLE_CYCLES,
- &simple_reg_read
+ 0,
+ &hi_total_idle_cycle_read
},
{
"AXI_CYCLES_READ_REQUEST_STALLED",
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index a813c824e154..4e3e95dce6d8 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -3,7 +3,7 @@
* Copyright (C) 2017 Etnaviv Project
*/
-#include <linux/kthread.h>
+#include <linux/moduleparam.h>
#include "etnaviv_drv.h"
#include "etnaviv_dump.h"
@@ -115,7 +115,7 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
drm_sched_increase_karma(sched_job);
/* get the GPU back into the init state */
- etnaviv_core_dump(gpu);
+ etnaviv_core_dump(submit);
etnaviv_gpu_recover_hang(gpu);
drm_sched_resubmit_jobs(&gpu->sched);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index cc53dcad25e4..8a03a33c32cb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -8,12 +8,20 @@
*/
#include <linux/component.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/uaccess.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_file.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_ioctl.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 164d914cbe9a..8ea2e1d77802 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -17,6 +17,8 @@
#include <linux/regmap.h>
#include <linux/spinlock.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 1c524db9570f..7ae087b0504d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -16,6 +16,8 @@
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index d45bfab6fe40..4f2b7551b251 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -16,7 +16,10 @@
* all copies or substantial portions of the Software.
*/
-#include <drm/drmP.h>
+#include <linux/uaccess.h>
+
+#include <drm/drm_file.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_mode.h>
#include <drm/exynos_drm.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.h b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
index 9cbbc301bec9..67a0805ee009 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
@@ -6,8 +6,6 @@
#ifndef _EXYNOS_DRM_IPP_H_
#define _EXYNOS_DRM_IPP_H_
-#include <drm/drmP.h>
-
struct exynos_drm_ipp;
struct exynos_drm_ipp_task;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 8ebad2740ad5..b98482990d1a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -15,7 +15,9 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/sizes.h>
+#include <drm/drm_fourcc.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index b24ba948b725..497973e9b2c5 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_fourcc.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index 279d83eaffc0..a92fd6c70b09 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -65,17 +65,9 @@ static const struct drm_connector_funcs fsl_dcu_drm_connector_funcs = {
static int fsl_dcu_drm_connector_get_modes(struct drm_connector *connector)
{
struct fsl_dcu_drm_connector *fsl_connector;
- int (*get_modes)(struct drm_panel *panel);
- int num_modes = 0;
fsl_connector = to_fsl_dcu_connector(connector);
- if (fsl_connector->panel && fsl_connector->panel->funcs &&
- fsl_connector->panel->funcs->get_modes) {
- get_modes = fsl_connector->panel->funcs->get_modes;
- num_modes = get_modes(fsl_connector->panel);
- }
-
- return num_modes;
+ return drm_panel_get_modes(fsl_connector->panel);
}
static int fsl_dcu_drm_connector_mode_valid(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
index f20eedf0073a..35a3c5f0c38c 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Kconfig
+++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_HISI_HIBMC
tristate "DRM Support for Hisilicon Hibmc"
- depends on DRM && PCI && MMU
+ depends on DRM && PCI && MMU && ARM64
select DRM_KMS_HELPER
select DRM_VRAM_HELPER
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index 2ae538835781..c103005b0a33 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -66,16 +66,14 @@ static struct drm_driver hibmc_driver = {
static int __maybe_unused hibmc_pm_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
return drm_mode_config_helper_suspend(drm_dev);
}
static int __maybe_unused hibmc_pm_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
return drm_mode_config_helper_resume(drm_dev);
}
@@ -394,18 +392,7 @@ static struct pci_driver hibmc_pci_driver = {
.driver.pm = &hibmc_pm_ops,
};
-static int __init hibmc_init(void)
-{
- return pci_register_driver(&hibmc_pci_driver);
-}
-
-static void __exit hibmc_exit(void)
-{
- return pci_unregister_driver(&hibmc_pci_driver);
-}
-
-module_init(hibmc_init);
-module_exit(hibmc_exit);
+module_pci_driver(hibmc_pci_driver);
MODULE_DEVICE_TABLE(pci, hibmc_pci_table);
MODULE_AUTHOR("RongrongZou <[email protected]>");
diff --git a/drivers/gpu/drm/hisilicon/kirin/Kconfig b/drivers/gpu/drm/hisilicon/kirin/Kconfig
index 0fa29af08ad0..290553e2f6b4 100644
--- a/drivers/gpu/drm/hisilicon/kirin/Kconfig
+++ b/drivers/gpu/drm/hisilicon/kirin/Kconfig
@@ -5,16 +5,8 @@ config DRM_HISI_KIRIN
select DRM_KMS_HELPER
select DRM_GEM_CMA_HELPER
select DRM_KMS_CMA_HELPER
- select HISI_KIRIN_DW_DSI
+ select DRM_MIPI_DSI
help
Choose this option if you have a hisilicon Kirin chipsets(hi6220).
If M is selected the module will be called kirin-drm.
-config HISI_KIRIN_DW_DSI
- tristate "HiSilicon Kirin specific extensions for Synopsys DW MIPI DSI"
- depends on DRM_HISI_KIRIN
- select DRM_MIPI_DSI
- help
- This selects support for HiSilicon Kirin SoC specific extensions for
- the Synopsys DesignWare DSI driver. If you want to enable MIPI DSI on
- hi6220 based SoC, you should selet this option.
diff --git a/drivers/gpu/drm/hisilicon/kirin/Makefile b/drivers/gpu/drm/hisilicon/kirin/Makefile
index c0501fa3fe53..d9323f66a7d4 100644
--- a/drivers/gpu/drm/hisilicon/kirin/Makefile
+++ b/drivers/gpu/drm/hisilicon/kirin/Makefile
@@ -2,6 +2,5 @@
kirin-drm-y := kirin_drm_drv.o \
kirin_drm_ade.o
-obj-$(CONFIG_DRM_HISI_KIRIN) += kirin-drm.o
+obj-$(CONFIG_DRM_HISI_KIRIN) += kirin-drm.o dw_drm_dsi.o
-obj-$(CONFIG_HISI_KIRIN_DW_DSI) += dw_drm_dsi.o
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h b/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h
index e2ac09894a6d..0da860200410 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h
@@ -83,6 +83,7 @@
#define VSIZE_OFST 20
#define LDI_INT_EN 0x741C
#define FRAME_END_INT_EN_OFST 1
+#define UNDERFLOW_INT_EN_OFST 2
#define LDI_CTRL 0x7420
#define BPP_OFST 3
#define DATA_GATE_EN BIT(2)
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index 0df1afdf319d..73cd28a6ea07 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -30,19 +30,14 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "kirin_drm_drv.h"
#include "kirin_ade_reg.h"
-#define PRIMARY_CH ADE_CH1 /* primary plane */
#define OUT_OVLY ADE_OVLY2 /* output overlay compositor */
#define ADE_DEBUG 1
-#define to_ade_crtc(crtc) \
- container_of(crtc, struct ade_crtc, base)
-
-#define to_ade_plane(plane) \
- container_of(plane, struct ade_plane, base)
struct ade_hw_ctx {
void __iomem *base;
@@ -51,36 +46,14 @@ struct ade_hw_ctx {
struct clk *media_noc_clk;
struct clk *ade_pix_clk;
struct reset_control *reset;
+ struct work_struct display_reset_wq;
bool power_on;
int irq;
-};
-
-struct ade_crtc {
- struct drm_crtc base;
- struct ade_hw_ctx *ctx;
- bool enable;
- u32 out_format;
-};
-
-struct ade_plane {
- struct drm_plane base;
- void *ctx;
- u8 ch; /* channel */
-};
-struct ade_data {
- struct ade_crtc acrtc;
- struct ade_plane aplane[ADE_CH_NUM];
- struct ade_hw_ctx ctx;
+ struct drm_crtc *crtc;
};
-/* ade-format info: */
-struct ade_format {
- u32 pixel_format;
- enum ade_fb_format ade_format;
-};
-
-static const struct ade_format ade_formats[] = {
+static const struct kirin_format ade_formats[] = {
/* 16bpp RGB: */
{ DRM_FORMAT_RGB565, ADE_RGB_565 },
{ DRM_FORMAT_BGR565, ADE_BGR_565 },
@@ -96,7 +69,7 @@ static const struct ade_format ade_formats[] = {
{ DRM_FORMAT_ABGR8888, ADE_ABGR_8888 },
};
-static const u32 channel_formats1[] = {
+static const u32 channel_formats[] = {
/* channel 1,2,3,4 */
DRM_FORMAT_RGB565, DRM_FORMAT_BGR565, DRM_FORMAT_RGB888,
DRM_FORMAT_BGR888, DRM_FORMAT_XRGB8888, DRM_FORMAT_XBGR8888,
@@ -104,19 +77,6 @@ static const u32 channel_formats1[] = {
DRM_FORMAT_ABGR8888
};
-u32 ade_get_channel_formats(u8 ch, const u32 **formats)
-{
- switch (ch) {
- case ADE_CH1:
- *formats = channel_formats1;
- return ARRAY_SIZE(channel_formats1);
- default:
- DRM_ERROR("no this channel %d\n", ch);
- *formats = NULL;
- return 0;
- }
-}
-
/* convert from fourcc format to ade format */
static u32 ade_get_format(u32 pixel_format)
{
@@ -124,7 +84,7 @@ static u32 ade_get_format(u32 pixel_format)
for (i = 0; i < ARRAY_SIZE(ade_formats); i++)
if (ade_formats[i].pixel_format == pixel_format)
- return ade_formats[i].ade_format;
+ return ade_formats[i].hw_format;
/* not found */
DRM_ERROR("Not found pixel format!!fourcc_format= %d\n",
@@ -176,14 +136,15 @@ static void ade_init(struct ade_hw_ctx *ctx)
*/
ade_update_bits(base + ADE_CTRL, FRM_END_START_OFST,
FRM_END_START_MASK, REG_EFFECTIVE_IN_ADEEN_FRMEND);
+ ade_update_bits(base + LDI_INT_EN, UNDERFLOW_INT_EN_OFST, MASK(1), 1);
}
static bool ade_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct ade_crtc *acrtc = to_ade_crtc(crtc);
- struct ade_hw_ctx *ctx = acrtc->ctx;
+ struct kirin_crtc *kcrtc = to_kirin_crtc(crtc);
+ struct ade_hw_ctx *ctx = kcrtc->hw_ctx;
adjusted_mode->clock =
clk_round_rate(ctx->ade_pix_clk, mode->clock * 1000) / 1000;
@@ -208,11 +169,10 @@ static void ade_set_pix_clk(struct ade_hw_ctx *ctx,
adj_mode->clock = clk_get_rate(ctx->ade_pix_clk) / 1000;
}
-static void ade_ldi_set_mode(struct ade_crtc *acrtc,
+static void ade_ldi_set_mode(struct ade_hw_ctx *ctx,
struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
- struct ade_hw_ctx *ctx = acrtc->ctx;
void __iomem *base = ctx->base;
u32 width = mode->hdisplay;
u32 height = mode->vdisplay;
@@ -299,9 +259,8 @@ static void ade_power_down(struct ade_hw_ctx *ctx)
ctx->power_on = false;
}
-static void ade_set_medianoc_qos(struct ade_crtc *acrtc)
+static void ade_set_medianoc_qos(struct ade_hw_ctx *ctx)
{
- struct ade_hw_ctx *ctx = acrtc->ctx;
struct regmap *map = ctx->noc_regmap;
regmap_update_bits(map, ADE0_QOSGENERATOR_MODE,
@@ -317,8 +276,8 @@ static void ade_set_medianoc_qos(struct ade_crtc *acrtc)
static int ade_crtc_enable_vblank(struct drm_crtc *crtc)
{
- struct ade_crtc *acrtc = to_ade_crtc(crtc);
- struct ade_hw_ctx *ctx = acrtc->ctx;
+ struct kirin_crtc *kcrtc = to_kirin_crtc(crtc);
+ struct ade_hw_ctx *ctx = kcrtc->hw_ctx;
void __iomem *base = ctx->base;
if (!ctx->power_on)
@@ -332,8 +291,8 @@ static int ade_crtc_enable_vblank(struct drm_crtc *crtc)
static void ade_crtc_disable_vblank(struct drm_crtc *crtc)
{
- struct ade_crtc *acrtc = to_ade_crtc(crtc);
- struct ade_hw_ctx *ctx = acrtc->ctx;
+ struct kirin_crtc *kcrtc = to_kirin_crtc(crtc);
+ struct ade_hw_ctx *ctx = kcrtc->hw_ctx;
void __iomem *base = ctx->base;
if (!ctx->power_on) {
@@ -345,11 +304,21 @@ static void ade_crtc_disable_vblank(struct drm_crtc *crtc)
MASK(1), 0);
}
+static void drm_underflow_wq(struct work_struct *work)
+{
+ struct ade_hw_ctx *ctx = container_of(work, struct ade_hw_ctx,
+ display_reset_wq);
+ struct drm_device *drm_dev = ctx->crtc->dev;
+ struct drm_atomic_state *state;
+
+ state = drm_atomic_helper_suspend(drm_dev);
+ drm_atomic_helper_resume(drm_dev, state);
+}
+
static irqreturn_t ade_irq_handler(int irq, void *data)
{
- struct ade_crtc *acrtc = data;
- struct ade_hw_ctx *ctx = acrtc->ctx;
- struct drm_crtc *crtc = &acrtc->base;
+ struct ade_hw_ctx *ctx = data;
+ struct drm_crtc *crtc = ctx->crtc;
void __iomem *base = ctx->base;
u32 status;
@@ -362,15 +331,20 @@ static irqreturn_t ade_irq_handler(int irq, void *data)
MASK(1), 1);
drm_crtc_handle_vblank(crtc);
}
+ if (status & BIT(UNDERFLOW_INT_EN_OFST)) {
+ ade_update_bits(base + LDI_INT_CLR, UNDERFLOW_INT_EN_OFST,
+ MASK(1), 1);
+ DRM_ERROR("LDI underflow!");
+ schedule_work(&ctx->display_reset_wq);
+ }
return IRQ_HANDLED;
}
-static void ade_display_enable(struct ade_crtc *acrtc)
+static void ade_display_enable(struct ade_hw_ctx *ctx)
{
- struct ade_hw_ctx *ctx = acrtc->ctx;
void __iomem *base = ctx->base;
- u32 out_fmt = acrtc->out_format;
+ u32 out_fmt = LDI_OUT_RGB_888;
/* enable output overlay compositor */
writel(ADE_ENABLE, base + ADE_OVLYX_CTL(OUT_OVLY));
@@ -483,11 +457,11 @@ static void ade_dump_regs(void __iomem *base) { }
static void ade_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
- struct ade_crtc *acrtc = to_ade_crtc(crtc);
- struct ade_hw_ctx *ctx = acrtc->ctx;
+ struct kirin_crtc *kcrtc = to_kirin_crtc(crtc);
+ struct ade_hw_ctx *ctx = kcrtc->hw_ctx;
int ret;
- if (acrtc->enable)
+ if (kcrtc->enable)
return;
if (!ctx->power_on) {
@@ -496,63 +470,63 @@ static void ade_crtc_atomic_enable(struct drm_crtc *crtc,
return;
}
- ade_set_medianoc_qos(acrtc);
- ade_display_enable(acrtc);
+ ade_set_medianoc_qos(ctx);
+ ade_display_enable(ctx);
ade_dump_regs(ctx->base);
drm_crtc_vblank_on(crtc);
- acrtc->enable = true;
+ kcrtc->enable = true;
}
static void ade_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
- struct ade_crtc *acrtc = to_ade_crtc(crtc);
- struct ade_hw_ctx *ctx = acrtc->ctx;
+ struct kirin_crtc *kcrtc = to_kirin_crtc(crtc);
+ struct ade_hw_ctx *ctx = kcrtc->hw_ctx;
- if (!acrtc->enable)
+ if (!kcrtc->enable)
return;
drm_crtc_vblank_off(crtc);
ade_power_down(ctx);
- acrtc->enable = false;
+ kcrtc->enable = false;
}
static void ade_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
- struct ade_crtc *acrtc = to_ade_crtc(crtc);
- struct ade_hw_ctx *ctx = acrtc->ctx;
+ struct kirin_crtc *kcrtc = to_kirin_crtc(crtc);
+ struct ade_hw_ctx *ctx = kcrtc->hw_ctx;
struct drm_display_mode *mode = &crtc->state->mode;
struct drm_display_mode *adj_mode = &crtc->state->adjusted_mode;
if (!ctx->power_on)
(void)ade_power_up(ctx);
- ade_ldi_set_mode(acrtc, mode, adj_mode);
+ ade_ldi_set_mode(ctx, mode, adj_mode);
}
static void ade_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
- struct ade_crtc *acrtc = to_ade_crtc(crtc);
- struct ade_hw_ctx *ctx = acrtc->ctx;
+ struct kirin_crtc *kcrtc = to_kirin_crtc(crtc);
+ struct ade_hw_ctx *ctx = kcrtc->hw_ctx;
struct drm_display_mode *mode = &crtc->state->mode;
struct drm_display_mode *adj_mode = &crtc->state->adjusted_mode;
if (!ctx->power_on)
(void)ade_power_up(ctx);
- ade_ldi_set_mode(acrtc, mode, adj_mode);
+ ade_ldi_set_mode(ctx, mode, adj_mode);
}
static void ade_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
- struct ade_crtc *acrtc = to_ade_crtc(crtc);
- struct ade_hw_ctx *ctx = acrtc->ctx;
+ struct kirin_crtc *kcrtc = to_kirin_crtc(crtc);
+ struct ade_hw_ctx *ctx = kcrtc->hw_ctx;
struct drm_pending_vblank_event *event = crtc->state->event;
void __iomem *base = ctx->base;
/* only crtc is enabled regs take effect */
- if (acrtc->enable) {
+ if (kcrtc->enable) {
ade_dump_regs(base);
/* flush ade registers */
writel(ADE_ENABLE, base + ADE_EN);
@@ -590,35 +564,6 @@ static const struct drm_crtc_funcs ade_crtc_funcs = {
.disable_vblank = ade_crtc_disable_vblank,
};
-static int ade_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
- struct drm_plane *plane)
-{
- struct device_node *port;
- int ret;
-
- /* set crtc port so that
- * drm_of_find_possible_crtcs call works
- */
- port = of_get_child_by_name(dev->dev->of_node, "port");
- if (!port) {
- DRM_ERROR("no port node found in %pOF\n", dev->dev->of_node);
- return -EINVAL;
- }
- of_node_put(port);
- crtc->port = port;
-
- ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL,
- &ade_crtc_funcs, NULL);
- if (ret) {
- DRM_ERROR("failed to init crtc.\n");
- return ret;
- }
-
- drm_crtc_helper_add(crtc, &ade_crtc_helper_funcs);
-
- return 0;
-}
-
static void ade_rdma_set(void __iomem *base, struct drm_framebuffer *fb,
u32 ch, u32 y, u32 in_h, u32 fmt)
{
@@ -780,16 +725,16 @@ static void ade_compositor_routing_disable(void __iomem *base, u32 ch)
/*
* Typicaly, a channel looks like: DMA-->clip-->scale-->ctrans-->compositor
*/
-static void ade_update_channel(struct ade_plane *aplane,
+static void ade_update_channel(struct kirin_plane *kplane,
struct drm_framebuffer *fb, int crtc_x,
int crtc_y, unsigned int crtc_w,
unsigned int crtc_h, u32 src_x,
u32 src_y, u32 src_w, u32 src_h)
{
- struct ade_hw_ctx *ctx = aplane->ctx;
+ struct ade_hw_ctx *ctx = kplane->hw_ctx;
void __iomem *base = ctx->base;
u32 fmt = ade_get_format(fb->format->format);
- u32 ch = aplane->ch;
+ u32 ch = kplane->ch;
u32 in_w;
u32 in_h;
@@ -813,11 +758,11 @@ static void ade_update_channel(struct ade_plane *aplane,
ade_compositor_routing_set(base, ch, crtc_x, crtc_y, in_w, in_h, fmt);
}
-static void ade_disable_channel(struct ade_plane *aplane)
+static void ade_disable_channel(struct kirin_plane *kplane)
{
- struct ade_hw_ctx *ctx = aplane->ctx;
+ struct ade_hw_ctx *ctx = kplane->hw_ctx;
void __iomem *base = ctx->base;
- u32 ch = aplane->ch;
+ u32 ch = kplane->ch;
DRM_DEBUG_DRIVER("disable channel%d\n", ch + 1);
@@ -879,10 +824,10 @@ static int ade_plane_atomic_check(struct drm_plane *plane,
static void ade_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct drm_plane_state *state = plane->state;
- struct ade_plane *aplane = to_ade_plane(plane);
+ struct drm_plane_state *state = plane->state;
+ struct kirin_plane *kplane = to_kirin_plane(plane);
- ade_update_channel(aplane, state->fb, state->crtc_x, state->crtc_y,
+ ade_update_channel(kplane, state->fb, state->crtc_x, state->crtc_y,
state->crtc_w, state->crtc_h,
state->src_x >> 16, state->src_y >> 16,
state->src_w >> 16, state->src_h >> 16);
@@ -891,9 +836,9 @@ static void ade_plane_atomic_update(struct drm_plane *plane,
static void ade_plane_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct ade_plane *aplane = to_ade_plane(plane);
+ struct kirin_plane *kplane = to_kirin_plane(plane);
- ade_disable_channel(aplane);
+ ade_disable_channel(kplane);
}
static const struct drm_plane_helper_funcs ade_plane_helper_funcs = {
@@ -911,144 +856,124 @@ static struct drm_plane_funcs ade_plane_funcs = {
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
};
-static int ade_plane_init(struct drm_device *dev, struct ade_plane *aplane,
- enum drm_plane_type type)
-{
- const u32 *fmts;
- u32 fmts_cnt;
- int ret = 0;
-
- /* get properties */
- fmts_cnt = ade_get_channel_formats(aplane->ch, &fmts);
- if (ret)
- return ret;
-
- ret = drm_universal_plane_init(dev, &aplane->base, 1, &ade_plane_funcs,
- fmts, fmts_cnt, NULL, type, NULL);
- if (ret) {
- DRM_ERROR("fail to init plane, ch=%d\n", aplane->ch);
- return ret;
- }
-
- drm_plane_helper_add(&aplane->base, &ade_plane_helper_funcs);
-
- return 0;
-}
-
-static int ade_dts_parse(struct platform_device *pdev, struct ade_hw_ctx *ctx)
+static void *ade_hw_ctx_alloc(struct platform_device *pdev,
+ struct drm_crtc *crtc)
{
struct resource *res;
struct device *dev = &pdev->dev;
struct device_node *np = pdev->dev.of_node;
+ struct ade_hw_ctx *ctx = NULL;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ DRM_ERROR("failed to alloc ade_hw_ctx\n");
+ return ERR_PTR(-ENOMEM);
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ctx->base = devm_ioremap_resource(dev, res);
if (IS_ERR(ctx->base)) {
DRM_ERROR("failed to remap ade io base\n");
- return PTR_ERR(ctx->base);
+ return ERR_PTR(-EIO);
}
ctx->reset = devm_reset_control_get(dev, NULL);
if (IS_ERR(ctx->reset))
- return PTR_ERR(ctx->reset);
+ return ERR_PTR(-ENODEV);
ctx->noc_regmap =
syscon_regmap_lookup_by_phandle(np, "hisilicon,noc-syscon");
if (IS_ERR(ctx->noc_regmap)) {
DRM_ERROR("failed to get noc regmap\n");
- return PTR_ERR(ctx->noc_regmap);
+ return ERR_PTR(-ENODEV);
}
ctx->irq = platform_get_irq(pdev, 0);
if (ctx->irq < 0) {
DRM_ERROR("failed to get irq\n");
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
}
ctx->ade_core_clk = devm_clk_get(dev, "clk_ade_core");
if (IS_ERR(ctx->ade_core_clk)) {
DRM_ERROR("failed to parse clk ADE_CORE\n");
- return PTR_ERR(ctx->ade_core_clk);
+ return ERR_PTR(-ENODEV);
}
ctx->media_noc_clk = devm_clk_get(dev, "clk_codec_jpeg");
if (IS_ERR(ctx->media_noc_clk)) {
DRM_ERROR("failed to parse clk CODEC_JPEG\n");
- return PTR_ERR(ctx->media_noc_clk);
+ return ERR_PTR(-ENODEV);
}
ctx->ade_pix_clk = devm_clk_get(dev, "clk_ade_pix");
if (IS_ERR(ctx->ade_pix_clk)) {
DRM_ERROR("failed to parse clk ADE_PIX\n");
- return PTR_ERR(ctx->ade_pix_clk);
+ return ERR_PTR(-ENODEV);
}
- return 0;
-}
-
-static int ade_drm_init(struct platform_device *pdev)
-{
- struct drm_device *dev = platform_get_drvdata(pdev);
- struct ade_data *ade;
- struct ade_hw_ctx *ctx;
- struct ade_crtc *acrtc;
- struct ade_plane *aplane;
- enum drm_plane_type type;
- int ret;
- int i;
-
- ade = devm_kzalloc(dev->dev, sizeof(*ade), GFP_KERNEL);
- if (!ade) {
- DRM_ERROR("failed to alloc ade_data\n");
- return -ENOMEM;
- }
- platform_set_drvdata(pdev, ade);
-
- ctx = &ade->ctx;
- acrtc = &ade->acrtc;
- acrtc->ctx = ctx;
- acrtc->out_format = LDI_OUT_RGB_888;
-
- ret = ade_dts_parse(pdev, ctx);
- if (ret)
- return ret;
-
- /*
- * plane init
- * TODO: Now only support primary plane, overlay planes
- * need to do.
- */
- for (i = 0; i < ADE_CH_NUM; i++) {
- aplane = &ade->aplane[i];
- aplane->ch = i;
- aplane->ctx = ctx;
- type = i == PRIMARY_CH ? DRM_PLANE_TYPE_PRIMARY :
- DRM_PLANE_TYPE_OVERLAY;
-
- ret = ade_plane_init(dev, aplane, type);
- if (ret)
- return ret;
- }
-
- /* crtc init */
- ret = ade_crtc_init(dev, &acrtc->base, &ade->aplane[PRIMARY_CH].base);
- if (ret)
- return ret;
-
/* vblank irq init */
- ret = devm_request_irq(dev->dev, ctx->irq, ade_irq_handler,
- IRQF_SHARED, dev->driver->name, acrtc);
+ ret = devm_request_irq(dev, ctx->irq, ade_irq_handler,
+ IRQF_SHARED, dev->driver->name, ctx);
if (ret)
- return ret;
+ return ERR_PTR(-EIO);
- return 0;
+ INIT_WORK(&ctx->display_reset_wq, drm_underflow_wq);
+ ctx->crtc = crtc;
+
+ return ctx;
}
-static void ade_drm_cleanup(struct platform_device *pdev)
+static void ade_hw_ctx_cleanup(void *hw_ctx)
{
}
-const struct kirin_dc_ops ade_dc_ops = {
- .init = ade_drm_init,
- .cleanup = ade_drm_cleanup
+static const struct drm_mode_config_funcs ade_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+
+};
+
+DEFINE_DRM_GEM_CMA_FOPS(ade_fops);
+
+static struct drm_driver ade_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
+ .fops = &ade_fops,
+ .gem_free_object_unlocked = drm_gem_cma_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .dumb_create = drm_gem_cma_dumb_create_internal,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
+
+ .name = "kirin",
+ .desc = "Hisilicon Kirin620 SoC DRM Driver",
+ .date = "20150718",
+ .major = 1,
+ .minor = 0,
+};
+
+struct kirin_drm_data ade_driver_data = {
+ .register_connects = false,
+ .num_planes = ADE_CH_NUM,
+ .prim_plane = ADE_CH1,
+ .channel_formats = channel_formats,
+ .channel_formats_cnt = ARRAY_SIZE(channel_formats),
+ .config_max_width = 2048,
+ .config_max_height = 2048,
+ .driver = &ade_driver,
+ .crtc_helper_funcs = &ade_crtc_helper_funcs,
+ .crtc_funcs = &ade_crtc_funcs,
+ .plane_helper_funcs = &ade_plane_helper_funcs,
+ .plane_funcs = &ade_plane_funcs,
+ .mode_config_funcs = &ade_mode_config_funcs,
+
+ .alloc_hw_ctx = ade_hw_ctx_alloc,
+ .cleanup_hw_ctx = ade_hw_ctx_cleanup,
};
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
index 204c94c01e3d..d3145ae877d7 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -29,46 +29,146 @@
#include "kirin_drm_drv.h"
-static struct kirin_dc_ops *dc_ops;
+#define KIRIN_MAX_PLANE 2
-static int kirin_drm_kms_cleanup(struct drm_device *dev)
+struct kirin_drm_private {
+ struct kirin_crtc crtc;
+ struct kirin_plane planes[KIRIN_MAX_PLANE];
+ void *hw_ctx;
+};
+
+static int kirin_drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+ struct drm_plane *plane,
+ const struct kirin_drm_data *driver_data)
{
- drm_kms_helper_poll_fini(dev);
- dc_ops->cleanup(to_platform_device(dev->dev));
- drm_mode_config_cleanup(dev);
+ struct device_node *port;
+ int ret;
+
+ /* set crtc port so that
+ * drm_of_find_possible_crtcs call works
+ */
+ port = of_get_child_by_name(dev->dev->of_node, "port");
+ if (!port) {
+ DRM_ERROR("no port node found in %pOF\n", dev->dev->of_node);
+ return -EINVAL;
+ }
+ of_node_put(port);
+ crtc->port = port;
+
+ ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL,
+ driver_data->crtc_funcs, NULL);
+ if (ret) {
+ DRM_ERROR("failed to init crtc.\n");
+ return ret;
+ }
+
+ drm_crtc_helper_add(crtc, driver_data->crtc_helper_funcs);
return 0;
}
-static const struct drm_mode_config_funcs kirin_drm_mode_config_funcs = {
- .fb_create = drm_gem_fb_create,
- .atomic_check = drm_atomic_helper_check,
- .atomic_commit = drm_atomic_helper_commit,
-};
+static int kirin_drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
+ enum drm_plane_type type,
+ const struct kirin_drm_data *data)
+{
+ int ret = 0;
+
+ ret = drm_universal_plane_init(dev, plane, 1, data->plane_funcs,
+ data->channel_formats,
+ data->channel_formats_cnt,
+ NULL, type, NULL);
+ if (ret) {
+ DRM_ERROR("fail to init plane, ch=%d\n", 0);
+ return ret;
+ }
+
+ drm_plane_helper_add(plane, data->plane_helper_funcs);
-static void kirin_drm_mode_config_init(struct drm_device *dev)
+ return 0;
+}
+
+static void kirin_drm_private_cleanup(struct drm_device *dev)
{
- dev->mode_config.min_width = 0;
- dev->mode_config.min_height = 0;
+ struct kirin_drm_private *kirin_priv = dev->dev_private;
+ struct kirin_drm_data *data;
- dev->mode_config.max_width = 2048;
- dev->mode_config.max_height = 2048;
+ data = (struct kirin_drm_data *)of_device_get_match_data(dev->dev);
+ if (data->cleanup_hw_ctx)
+ data->cleanup_hw_ctx(kirin_priv->hw_ctx);
- dev->mode_config.funcs = &kirin_drm_mode_config_funcs;
+ devm_kfree(dev->dev, kirin_priv);
+ dev->dev_private = NULL;
}
-static int kirin_drm_kms_init(struct drm_device *dev)
+static int kirin_drm_private_init(struct drm_device *dev,
+ const struct kirin_drm_data *driver_data)
{
+ struct platform_device *pdev = to_platform_device(dev->dev);
+ struct kirin_drm_private *kirin_priv;
+ struct drm_plane *prim_plane;
+ enum drm_plane_type type;
+ void *ctx;
int ret;
+ u32 ch;
+
+ kirin_priv = devm_kzalloc(dev->dev, sizeof(*kirin_priv), GFP_KERNEL);
+ if (!kirin_priv) {
+ DRM_ERROR("failed to alloc kirin_drm_private\n");
+ return -ENOMEM;
+ }
+
+ ctx = driver_data->alloc_hw_ctx(pdev, &kirin_priv->crtc.base);
+ if (IS_ERR(ctx)) {
+ DRM_ERROR("failed to initialize kirin_priv hw ctx\n");
+ return -EINVAL;
+ }
+ kirin_priv->hw_ctx = ctx;
+
+ /*
+ * plane init
+ * TODO: Now only support primary plane, overlay planes
+ * need to do.
+ */
+ for (ch = 0; ch < driver_data->num_planes; ch++) {
+ if (ch == driver_data->prim_plane)
+ type = DRM_PLANE_TYPE_PRIMARY;
+ else
+ type = DRM_PLANE_TYPE_OVERLAY;
+ ret = kirin_drm_plane_init(dev, &kirin_priv->planes[ch].base,
+ type, driver_data);
+ if (ret)
+ return ret;
+ kirin_priv->planes[ch].ch = ch;
+ kirin_priv->planes[ch].hw_ctx = ctx;
+ }
- dev_set_drvdata(dev->dev, dev);
+ /* crtc init */
+ prim_plane = &kirin_priv->planes[driver_data->prim_plane].base;
+ ret = kirin_drm_crtc_init(dev, &kirin_priv->crtc.base,
+ prim_plane, driver_data);
+ if (ret)
+ return ret;
+ kirin_priv->crtc.hw_ctx = ctx;
+ dev->dev_private = kirin_priv;
+
+ return 0;
+}
+
+static int kirin_drm_kms_init(struct drm_device *dev,
+ const struct kirin_drm_data *driver_data)
+{
+ int ret;
/* dev->mode_config initialization */
drm_mode_config_init(dev);
- kirin_drm_mode_config_init(dev);
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ dev->mode_config.max_width = driver_data->config_max_width;
+ dev->mode_config.max_height = driver_data->config_max_width;
+ dev->mode_config.funcs = driver_data->mode_config_funcs;
/* display controller init */
- ret = dc_ops->init(to_platform_device(dev->dev));
+ ret = kirin_drm_private_init(dev, driver_data);
if (ret)
goto err_mode_config_cleanup;
@@ -76,7 +176,7 @@ static int kirin_drm_kms_init(struct drm_device *dev)
ret = component_bind_all(dev->dev, dev);
if (ret) {
DRM_ERROR("failed to bind all component.\n");
- goto err_dc_cleanup;
+ goto err_private_cleanup;
}
/* vblank init */
@@ -98,62 +198,78 @@ static int kirin_drm_kms_init(struct drm_device *dev)
err_unbind_all:
component_unbind_all(dev->dev, dev);
-err_dc_cleanup:
- dc_ops->cleanup(to_platform_device(dev->dev));
+err_private_cleanup:
+ kirin_drm_private_cleanup(dev);
err_mode_config_cleanup:
drm_mode_config_cleanup(dev);
-
return ret;
}
-DEFINE_DRM_GEM_CMA_FOPS(kirin_drm_fops);
-
-static int kirin_gem_cma_dumb_create(struct drm_file *file,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args)
+static int compare_of(struct device *dev, void *data)
{
- return drm_gem_cma_dumb_create_internal(file, dev, args);
+ return dev->of_node == data;
}
-static struct drm_driver kirin_drm_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
- .fops = &kirin_drm_fops,
-
- .gem_free_object_unlocked = drm_gem_cma_free_object,
- .gem_vm_ops = &drm_gem_cma_vm_ops,
- .dumb_create = kirin_gem_cma_dumb_create,
-
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
- .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
- .gem_prime_vmap = drm_gem_cma_prime_vmap,
- .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
- .gem_prime_mmap = drm_gem_cma_prime_mmap,
-
- .name = "kirin",
- .desc = "Hisilicon Kirin SoCs' DRM Driver",
- .date = "20150718",
- .major = 1,
- .minor = 0,
-};
+static int kirin_drm_kms_cleanup(struct drm_device *dev)
+{
+ drm_kms_helper_poll_fini(dev);
+ kirin_drm_private_cleanup(dev);
+ drm_mode_config_cleanup(dev);
-static int compare_of(struct device *dev, void *data)
+ return 0;
+}
+
+static int kirin_drm_connectors_register(struct drm_device *dev)
{
- return dev->of_node == data;
+ struct drm_connector *connector;
+ struct drm_connector *failed_connector;
+ struct drm_connector_list_iter conn_iter;
+ int ret;
+
+ mutex_lock(&dev->mode_config.mutex);
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ ret = drm_connector_register(connector);
+ if (ret) {
+ failed_connector = connector;
+ goto err;
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return 0;
+
+err:
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ if (failed_connector == connector)
+ break;
+ drm_connector_unregister(connector);
+ }
+ drm_connector_list_iter_end(&conn_iter);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return ret;
}
static int kirin_drm_bind(struct device *dev)
{
- struct drm_driver *driver = &kirin_drm_driver;
+ struct kirin_drm_data *driver_data;
struct drm_device *drm_dev;
int ret;
- drm_dev = drm_dev_alloc(driver, dev);
+ driver_data = (struct kirin_drm_data *)of_device_get_match_data(dev);
+ if (!driver_data)
+ return -EINVAL;
+
+ drm_dev = drm_dev_alloc(driver_data->driver, dev);
if (IS_ERR(drm_dev))
return PTR_ERR(drm_dev);
+ dev_set_drvdata(dev, drm_dev);
- ret = kirin_drm_kms_init(drm_dev);
+ /* display controller init */
+ ret = kirin_drm_kms_init(drm_dev, driver_data);
if (ret)
goto err_drm_dev_put;
@@ -163,8 +279,17 @@ static int kirin_drm_bind(struct device *dev)
drm_fbdev_generic_setup(drm_dev, 32);
+ /* connectors should be registered after drm device register */
+ if (driver_data->register_connects) {
+ ret = kirin_drm_connectors_register(drm_dev);
+ if (ret)
+ goto err_drm_dev_unregister;
+ }
+
return 0;
+err_drm_dev_unregister:
+ drm_dev_unregister(drm_dev);
err_kms_cleanup:
kirin_drm_kms_cleanup(drm_dev);
err_drm_dev_put:
@@ -194,12 +319,6 @@ static int kirin_drm_platform_probe(struct platform_device *pdev)
struct component_match *match = NULL;
struct device_node *remote;
- dc_ops = (struct kirin_dc_ops *)of_device_get_match_data(dev);
- if (!dc_ops) {
- DRM_ERROR("failed to get dt id data\n");
- return -EINVAL;
- }
-
remote = of_graph_get_remote_node(np, 0, 0);
if (!remote)
return -ENODEV;
@@ -208,20 +327,17 @@ static int kirin_drm_platform_probe(struct platform_device *pdev)
of_node_put(remote);
return component_master_add_with_match(dev, &kirin_drm_ops, match);
-
- return 0;
}
static int kirin_drm_platform_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &kirin_drm_ops);
- dc_ops = NULL;
return 0;
}
static const struct of_device_id kirin_drm_dt_ids[] = {
{ .compatible = "hisilicon,hi6220-ade",
- .data = &ade_dc_ops,
+ .data = &ade_driver_data,
},
{ /* end node */ },
};
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h
index 22d1291668cd..4d5c05a24065 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h
@@ -7,14 +7,52 @@
#ifndef __KIRIN_DRM_DRV_H__
#define __KIRIN_DRM_DRV_H__
-#define MAX_CRTC 2
+#define to_kirin_crtc(crtc) \
+ container_of(crtc, struct kirin_crtc, base)
+
+#define to_kirin_plane(plane) \
+ container_of(plane, struct kirin_plane, base)
+
+/* kirin-format translate table */
+struct kirin_format {
+ u32 pixel_format;
+ u32 hw_format;
+};
+
+struct kirin_crtc {
+ struct drm_crtc base;
+ void *hw_ctx;
+ bool enable;
+};
+
+struct kirin_plane {
+ struct drm_plane base;
+ void *hw_ctx;
+ u32 ch;
+};
/* display controller init/cleanup ops */
-struct kirin_dc_ops {
- int (*init)(struct platform_device *pdev);
- void (*cleanup)(struct platform_device *pdev);
+struct kirin_drm_data {
+ const u32 *channel_formats;
+ u32 channel_formats_cnt;
+ int config_max_width;
+ int config_max_height;
+ bool register_connects;
+ u32 num_planes;
+ u32 prim_plane;
+
+ struct drm_driver *driver;
+ const struct drm_crtc_helper_funcs *crtc_helper_funcs;
+ const struct drm_crtc_funcs *crtc_funcs;
+ const struct drm_plane_helper_funcs *plane_helper_funcs;
+ const struct drm_plane_funcs *plane_funcs;
+ const struct drm_mode_config_funcs *mode_config_funcs;
+
+ void *(*alloc_hw_ctx)(struct platform_device *pdev,
+ struct drm_crtc *crtc);
+ void (*cleanup_hw_ctx)(void *hw_ctx);
};
-extern const struct kirin_dc_ops ade_dc_ops;
+extern struct kirin_drm_data ade_driver_data;
#endif /* __KIRIN_DRM_DRV_H__ */
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 61e042918a7f..84c6d4c91c65 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -13,10 +13,10 @@
#include <sound/asoundef.h>
#include <sound/hdmi-codec.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/i2c/tda998x.h>
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 87a38c6aaa41..00786a142ff0 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -30,6 +30,7 @@ config DRM_I915_DEBUG
select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
select DRM_DEBUG_MM if DRM=y
select DRM_DEBUG_SELFTEST
+ select DMABUF_SELFTESTS
select SW_SYNC # signaling validation framework (igt/syncobj*)
select DRM_I915_SW_FENCE_DEBUG_OBJECTS
select DRM_I915_SELFTEST
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 331b19cc8247..658b930d34a8 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -41,13 +41,16 @@ subdir-ccflags-y += -I$(srctree)/$(src)
# core driver code
i915-y += i915_drv.o \
i915_irq.o \
+ i915_getparam.o \
i915_params.o \
i915_pci.o \
i915_scatterlist.o \
i915_suspend.o \
i915_sysfs.o \
+ i915_utils.o \
intel_csr.o \
intel_device_info.o \
+ intel_pch.o \
intel_pm.o \
intel_runtime_pm.o \
intel_sideband.o \
@@ -59,6 +62,7 @@ i915-y += \
i915_memcpy.o \
i915_mm.o \
i915_sw_fence.o \
+ i915_sw_fence_work.o \
i915_syncmap.o \
i915_user_extensions.o
@@ -72,9 +76,13 @@ gt-y += \
gt/intel_breadcrumbs.o \
gt/intel_context.o \
gt/intel_engine_cs.o \
+ gt/intel_engine_pool.o \
gt/intel_engine_pm.o \
+ gt/intel_engine_user.o \
gt/intel_gt.o \
+ gt/intel_gt_irq.o \
gt/intel_gt_pm.o \
+ gt/intel_gt_pm_irq.o \
gt/intel_hangcheck.o \
gt/intel_lrc.o \
gt/intel_renderstate.o \
@@ -90,8 +98,6 @@ gt-y += \
gt/gen7_renderstate.o \
gt/gen8_renderstate.o \
gt/gen9_renderstate.o
-gt-$(CONFIG_DRM_I915_SELFTEST) += \
- gt/mock_engine.o
i915-y += $(gt-y)
# GEM (Graphics Execution Management) code
@@ -123,8 +129,8 @@ gem-y += \
i915-y += \
$(gem-y) \
i915_active.o \
+ i915_buddy.o \
i915_cmd_parser.o \
- i915_gem_batch_pool.o \
i915_gem_evict.o \
i915_gem_fence_reg.o \
i915_gem_gtt.o \
diff --git a/drivers/gpu/drm/i915/display/dvo_ch7017.c b/drivers/gpu/drm/i915/display/dvo_ch7017.c
index 602380fe74f3..0589994dde11 100644
--- a/drivers/gpu/drm/i915/display/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/display/dvo_ch7017.c
@@ -25,7 +25,7 @@
*
*/
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_dvo_dev.h"
#define CH7017_TV_DISPLAY_MODE 0x00
diff --git a/drivers/gpu/drm/i915/display/dvo_ch7xxx.c b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
index e070bebee7b5..54f58ba44b9f 100644
--- a/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
@@ -26,7 +26,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_dvo_dev.h"
#define CH7xxx_REG_VID 0x4a
diff --git a/drivers/gpu/drm/i915/display/dvo_ivch.c b/drivers/gpu/drm/i915/display/dvo_ivch.c
index 09dba35f3ffa..f43d8c610d3f 100644
--- a/drivers/gpu/drm/i915/display/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/display/dvo_ivch.c
@@ -29,7 +29,7 @@
*
*/
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_dvo_dev.h"
/*
diff --git a/drivers/gpu/drm/i915/display/dvo_ns2501.c b/drivers/gpu/drm/i915/display/dvo_ns2501.c
index c83a5d88d62b..a724a8755673 100644
--- a/drivers/gpu/drm/i915/display/dvo_ns2501.c
+++ b/drivers/gpu/drm/i915/display/dvo_ns2501.c
@@ -28,7 +28,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_dvo_dev.h"
#define NS2501_VID 0x1305
diff --git a/drivers/gpu/drm/i915/display/dvo_sil164.c b/drivers/gpu/drm/i915/display/dvo_sil164.c
index 04698eaeb632..0dfa0a0209ff 100644
--- a/drivers/gpu/drm/i915/display/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/display/dvo_sil164.c
@@ -26,7 +26,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_dvo_dev.h"
#define SIL164_VID 0x0001
diff --git a/drivers/gpu/drm/i915/display/dvo_tfp410.c b/drivers/gpu/drm/i915/display/dvo_tfp410.c
index 623114ee73cd..009d65b0f3e9 100644
--- a/drivers/gpu/drm/i915/display/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/display/dvo_tfp410.c
@@ -25,7 +25,7 @@
*
*/
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_dvo_dev.h"
/* register definitions according to the TFP410 data sheet */
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index a42348be0438..6e398c33a524 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -403,8 +403,8 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
I915_WRITE(ICL_PORT_TX_DW2_GRP(phy), tmp);
- /* For EHL set latency optimization for PCS_DW1 lanes */
- if (IS_ELKHARTLAKE(dev_priv)) {
+ /* For EHL, TGL, set latency optimization for PCS_DW1 lanes */
+ if (IS_ELKHARTLAKE(dev_priv) || (INTEL_GEN(dev_priv) >= 12)) {
tmp = I915_READ(ICL_PORT_PCS_DW1_AUX(phy));
tmp &= ~LATENCY_OPTIM_MASK;
tmp |= LATENCY_OPTIM_VAL(0);
@@ -530,18 +530,20 @@ static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder)
* a value '0' inside TA_PARAM_REGISTERS otherwise
* leave all fields at HW default values.
*/
- if (intel_dsi_bitrate(intel_dsi) <= 800000) {
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(DPHY_TA_TIMING_PARAM(port));
- tmp &= ~TA_SURE_MASK;
- tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
- I915_WRITE(DPHY_TA_TIMING_PARAM(port), tmp);
-
- /* shadow register inside display core */
- tmp = I915_READ(DSI_TA_TIMING_PARAM(port));
- tmp &= ~TA_SURE_MASK;
- tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
- I915_WRITE(DSI_TA_TIMING_PARAM(port), tmp);
+ if (IS_GEN(dev_priv, 11)) {
+ if (intel_dsi_bitrate(intel_dsi) <= 800000) {
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = I915_READ(DPHY_TA_TIMING_PARAM(port));
+ tmp &= ~TA_SURE_MASK;
+ tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
+ I915_WRITE(DPHY_TA_TIMING_PARAM(port), tmp);
+
+ /* shadow register inside display core */
+ tmp = I915_READ(DSI_TA_TIMING_PARAM(port));
+ tmp &= ~TA_SURE_MASK;
+ tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
+ I915_WRITE(DSI_TA_TIMING_PARAM(port), tmp);
+ }
}
}
@@ -605,7 +607,10 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
I915_WRITE(ICL_DPCLKA_CFGCR0, val);
for_each_dsi_phy(phy, intel_dsi->phys) {
- val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
+ if (INTEL_GEN(dev_priv) >= 12)
+ val |= ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
+ else
+ val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
}
I915_WRITE(ICL_DPCLKA_CFGCR0, val);
@@ -680,6 +685,11 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
break;
}
+ if (INTEL_GEN(dev_priv) >= 12) {
+ if (is_vid_mode(intel_dsi))
+ tmp |= BLANKING_PACKET_ENABLE;
+ }
+
/* program DSI operation mode */
if (is_vid_mode(intel_dsi)) {
tmp &= ~OP_MODE_MASK;
@@ -862,6 +872,15 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
dsi_trans = dsi_port_to_transcoder(port);
I915_WRITE(VSYNCSHIFT(dsi_trans), vsync_shift);
}
+
+ /* program TRANS_VBLANK register, should be same as vtotal programmed */
+ if (INTEL_GEN(dev_priv) >= 12) {
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ I915_WRITE(VBLANK(dsi_trans),
+ (vactive - 1) | ((vtotal - 1) << 16));
+ }
+ }
}
static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
@@ -879,10 +898,8 @@ static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
I915_WRITE(PIPECONF(dsi_trans), tmp);
/* wait for transcoder to be enabled */
- if (intel_wait_for_register(&dev_priv->uncore,
- PIPECONF(dsi_trans),
- I965_PIPECONF_ACTIVE,
- I965_PIPECONF_ACTIVE, 10))
+ if (intel_de_wait_for_set(dev_priv, PIPECONF(dsi_trans),
+ I965_PIPECONF_ACTIVE, 10))
DRM_ERROR("DSI transcoder not enabled\n");
}
}
@@ -940,6 +957,8 @@ static void
gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
/* step 4a: power up all lanes of the DDI used by DSI */
gen11_dsi_power_up_lanes(encoder);
@@ -962,7 +981,8 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
gen11_dsi_configure_transcoder(encoder, pipe_config);
/* Step 4l: Gate DDI clocks */
- gen11_dsi_gate_clocks(encoder);
+ if (IS_GEN(dev_priv, 11))
+ gen11_dsi_gate_clocks(encoder);
}
static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
@@ -1058,9 +1078,8 @@ static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
I915_WRITE(PIPECONF(dsi_trans), tmp);
/* wait for transcoder to be disabled */
- if (intel_wait_for_register(&dev_priv->uncore,
- PIPECONF(dsi_trans),
- I965_PIPECONF_ACTIVE, 0, 50))
+ if (intel_de_wait_for_clear(dev_priv, PIPECONF(dsi_trans),
+ I965_PIPECONF_ACTIVE, 50))
DRM_ERROR("DSI trancoder not disabled\n");
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index 90ca11a4ae88..d3fb75bb9eb1 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -35,7 +35,7 @@
#include <drm/drm_plane_helper.h>
#include "intel_atomic.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_hdcp.h"
#include "intel_sprite.h"
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index ab411d5e093c..d1fcdf206da4 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -35,8 +35,9 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
+#include "i915_trace.h"
#include "intel_atomic_plane.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_pm.h"
#include "intel_sprite.h"
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index c8fd35a7ca42..ddcccf4408c3 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -29,7 +29,7 @@
#include "i915_drv.h"
#include "intel_audio.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_lpe_audio.h"
/**
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index b416b394b641..efb39f350b19 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -1343,21 +1343,6 @@ static const u8 cnp_ddc_pin_map[] = {
static const u8 icp_ddc_pin_map[] = {
[ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
[ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
- [ICL_DDC_BUS_PORT_1] = GMBUS_PIN_9_TC1_ICP,
- [ICL_DDC_BUS_PORT_2] = GMBUS_PIN_10_TC2_ICP,
- [ICL_DDC_BUS_PORT_3] = GMBUS_PIN_11_TC3_ICP,
- [ICL_DDC_BUS_PORT_4] = GMBUS_PIN_12_TC4_ICP,
-};
-
-static const u8 mcc_ddc_pin_map[] = {
- [MCC_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
- [MCC_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
- [MCC_DDC_BUS_DDI_C] = GMBUS_PIN_9_TC1_ICP,
-};
-
-static const u8 tgp_ddc_pin_map[] = {
- [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
- [ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
[TGL_DDC_BUS_DDI_C] = GMBUS_PIN_3_BXT,
[ICL_DDC_BUS_PORT_1] = GMBUS_PIN_9_TC1_ICP,
[ICL_DDC_BUS_PORT_2] = GMBUS_PIN_10_TC2_ICP,
@@ -1372,13 +1357,7 @@ static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
const u8 *ddc_pin_map;
int n_entries;
- if (HAS_PCH_TGP(dev_priv)) {
- ddc_pin_map = tgp_ddc_pin_map;
- n_entries = ARRAY_SIZE(tgp_ddc_pin_map);
- } else if (HAS_PCH_MCC(dev_priv)) {
- ddc_pin_map = mcc_ddc_pin_map;
- n_entries = ARRAY_SIZE(mcc_ddc_pin_map);
- } else if (HAS_PCH_ICP(dev_priv)) {
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) {
ddc_pin_map = icp_ddc_pin_map;
n_entries = ARRAY_SIZE(icp_ddc_pin_map);
} else if (HAS_PCH_CNP(dev_priv)) {
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index ee52c5b4643b..688858ebe4d0 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -6,7 +6,7 @@
#include <drm/drm_atomic_state_helper.h>
#include "intel_bw.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_sideband.h"
/* Parameters for Qclk Geyserville (QGV) */
@@ -322,6 +322,20 @@ static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv,
return data_rate;
}
+static struct intel_bw_state *
+intel_atomic_get_bw_state(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct drm_private_state *bw_state;
+
+ bw_state = drm_atomic_get_private_obj_state(&state->base,
+ &dev_priv->bw_obj);
+ if (IS_ERR(bw_state))
+ return ERR_CAST(bw_state);
+
+ return to_intel_bw_state(bw_state);
+}
+
int intel_bw_atomic_check(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
index e9d9c6d63bc3..9db10af012f4 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_bw.h
@@ -8,7 +8,6 @@
#include <drm/drm_atomic.h>
-#include "i915_drv.h"
#include "intel_display.h"
struct drm_i915_private;
@@ -24,20 +23,6 @@ struct intel_bw_state {
#define to_intel_bw_state(x) container_of((x), struct intel_bw_state, base)
-static inline struct intel_bw_state *
-intel_atomic_get_bw_state(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct drm_private_state *bw_state;
-
- bw_state = drm_atomic_get_private_obj_state(&state->base,
- &dev_priv->bw_obj);
- if (IS_ERR(bw_state))
- return ERR_CAST(bw_state);
-
- return to_intel_bw_state(bw_state);
-}
-
void intel_bw_init_hw(struct drm_i915_private *dev_priv);
int intel_bw_init(struct drm_i915_private *dev_priv);
int intel_bw_atomic_check(struct intel_atomic_state *state);
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 93b0d190c184..d0bc42e5039c 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -22,7 +22,7 @@
*/
#include "intel_cdclk.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_sideband.h"
/**
@@ -969,9 +969,7 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
- if (intel_wait_for_register(&dev_priv->uncore,
- LCPLL1_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
- 5))
+ if (intel_de_wait_for_set(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 5))
DRM_ERROR("DPLL0 not locked\n");
dev_priv->cdclk.hw.vco = vco;
@@ -983,9 +981,7 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
static void skl_dpll0_disable(struct drm_i915_private *dev_priv)
{
I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
- if (intel_wait_for_register(&dev_priv->uncore,
- LCPLL1_CTL, LCPLL_PLL_LOCK, 0,
- 1))
+ if (intel_de_wait_for_clear(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 1))
DRM_ERROR("Couldn't disable DPLL0\n");
dev_priv->cdclk.hw.vco = 0;
@@ -1309,9 +1305,8 @@ static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
I915_WRITE(BXT_DE_PLL_ENABLE, 0);
/* Timeout 200us */
- if (intel_wait_for_register(&dev_priv->uncore,
- BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 0,
- 1))
+ if (intel_de_wait_for_clear(dev_priv,
+ BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
DRM_ERROR("timeout waiting for DE PLL unlock\n");
dev_priv->cdclk.hw.vco = 0;
@@ -1330,11 +1325,8 @@ static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
/* Timeout 200us */
- if (intel_wait_for_register(&dev_priv->uncore,
- BXT_DE_PLL_ENABLE,
- BXT_DE_PLL_LOCK,
- BXT_DE_PLL_LOCK,
- 1))
+ if (intel_de_wait_for_set(dev_priv,
+ BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
DRM_ERROR("timeout waiting for DE PLL lock\n");
dev_priv->cdclk.hw.vco = vco;
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 23a84dd7989f..71a0201437a9 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -23,7 +23,7 @@
*/
#include "intel_color.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#define CTM_COEFF_SIGN (1ULL << 63)
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index ac8218a040ab..44bbc7e74fc3 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -4,7 +4,7 @@
*/
#include "intel_combo_phy.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#define for_each_combo_phy(__dev_priv, __phy) \
for ((__phy) = PHY_A; (__phy) < I915_MAX_PHYS; (__phy)++) \
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index d0163d86c42a..308ec63207ee 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -33,7 +33,7 @@
#include "i915_drv.h"
#include "intel_connector.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_hdcp.h"
int intel_connector_init(struct intel_connector *connector)
@@ -118,7 +118,7 @@ int intel_connector_register(struct drm_connector *connector)
if (ret)
goto err;
- if (i915_inject_probe_failure()) {
+ if (i915_inject_probe_failure(to_i915(connector->dev))) {
ret = -EFAULT;
goto err_backlight;
}
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 3fcf2f84bcce..e6e8d4a82044 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -38,7 +38,7 @@
#include "intel_connector.h"
#include "intel_crt.h"
#include "intel_ddi.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_fifo_underrun.h"
#include "intel_gmbus.h"
#include "intel_hotplug.h"
@@ -443,9 +443,9 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
I915_WRITE(crt->adpa_reg, adpa);
- if (intel_wait_for_register(&dev_priv->uncore,
+ if (intel_de_wait_for_clear(dev_priv,
crt->adpa_reg,
- ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0,
+ ADPA_CRT_HOTPLUG_FORCE_TRIGGER,
1000))
DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
@@ -497,10 +497,8 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
I915_WRITE(crt->adpa_reg, adpa);
- if (intel_wait_for_register(&dev_priv->uncore,
- crt->adpa_reg,
- ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0,
- 1000)) {
+ if (intel_de_wait_for_clear(dev_priv, crt->adpa_reg,
+ ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 1000)) {
DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
I915_WRITE(crt->adpa_reg, save_adpa);
}
@@ -550,9 +548,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
CRT_HOTPLUG_FORCE_DETECT,
CRT_HOTPLUG_FORCE_DETECT);
/* wait for FORCE_DETECT to go off */
- if (intel_wait_for_register(&dev_priv->uncore, PORT_HOTPLUG_EN,
- CRT_HOTPLUG_FORCE_DETECT, 0,
- 1000))
+ if (intel_de_wait_for_clear(dev_priv, PORT_HOTPLUG_EN,
+ CRT_HOTPLUG_FORCE_DETECT, 1000))
DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
}
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index cf3c3fd7089f..8eb2b3ec01ed 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -32,10 +32,10 @@
#include "intel_combo_phy.h"
#include "intel_connector.h"
#include "intel_ddi.h"
+#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_link_training.h"
#include "intel_dpio_phy.h"
-#include "intel_drv.h"
#include "intel_dsi.h"
#include "intel_fifo_underrun.h"
#include "intel_gmbus.h"
@@ -1467,8 +1467,8 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
else if (intel_crtc_has_dp_encoder(pipe_config))
dotclock = intel_dotclock_calculate(pipe_config->port_clock,
&pipe_config->dp_m_n);
- else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp == 36)
- dotclock = pipe_config->port_clock * 2 / 3;
+ else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24)
+ dotclock = pipe_config->port_clock * 24 / pipe_config->pipe_bpp;
else
dotclock = pipe_config->port_clock;
@@ -2015,6 +2015,12 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
for_each_pipe(dev_priv, p) {
enum transcoder cpu_transcoder = (enum transcoder)p;
unsigned int port_mask, ddi_select;
+ intel_wakeref_t trans_wakeref;
+
+ trans_wakeref = intel_display_power_get_if_enabled(dev_priv,
+ POWER_DOMAIN_TRANSCODER(cpu_transcoder));
+ if (!trans_wakeref)
+ continue;
if (INTEL_GEN(dev_priv) >= 12) {
port_mask = TGL_TRANS_DDI_PORT_MASK;
@@ -2025,6 +2031,8 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
}
tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ intel_display_power_put(dev_priv, POWER_DOMAIN_TRANSCODER(cpu_transcoder),
+ trans_wakeref);
if ((tmp & port_mask) != ddi_select)
continue;
@@ -2921,6 +2929,12 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
if (!intel_phy_is_combo(dev_priv, phy))
I915_WRITE(DDI_CLK_SEL(port),
icl_pll_to_ddi_clk_sel(encoder, crtc_state));
+ else if (IS_ELKHARTLAKE(dev_priv) && port >= PORT_C)
+ /*
+ * MG does not exist but the programming is required
+ * to ungate DDIC and DDID
+ */
+ I915_WRITE(DDI_CLK_SEL(port), DDI_CLK_SEL_MG);
} else if (IS_CANNONLAKE(dev_priv)) {
/* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */
val = I915_READ(DPCLKA_CFGCR0);
@@ -2961,7 +2975,8 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder)
enum phy phy = intel_port_to_phy(dev_priv, port);
if (INTEL_GEN(dev_priv) >= 11) {
- if (!intel_phy_is_combo(dev_priv, phy))
+ if (!intel_phy_is_combo(dev_priv, phy) ||
+ (IS_ELKHARTLAKE(dev_priv) && port >= PORT_C))
I915_WRITE(DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
} else if (IS_CANNONLAKE(dev_priv)) {
I915_WRITE(DPCLKA_CFGCR0, I915_READ(DPCLKA_CFGCR0) |
@@ -3124,10 +3139,8 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
val |= DP_TP_CTL_FEC_ENABLE;
I915_WRITE(DP_TP_CTL(port), val);
- if (intel_wait_for_register(&dev_priv->uncore, DP_TP_STATUS(port),
- DP_TP_STATUS_FEC_ENABLE_LIVE,
- DP_TP_STATUS_FEC_ENABLE_LIVE,
- 1))
+ if (intel_de_wait_for_set(dev_priv, DP_TP_STATUS(port),
+ DP_TP_STATUS_FEC_ENABLE_LIVE, 1))
DRM_ERROR("Timed out waiting for FEC Enable Status\n");
}
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 9e4ee29fd0fc..b51d1ceb8739 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -29,7 +29,7 @@
#include <linux/intel-iommu.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
#include <linux/slab.h>
#include <linux/vgaarb.h>
@@ -62,9 +62,9 @@
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
#include "intel_bw.h"
-#include "intel_color.h"
#include "intel_cdclk.h"
-#include "intel_drv.h"
+#include "intel_color.h"
+#include "intel_display_types.h"
#include "intel_fbc.h"
#include "intel_fbdev.h"
#include "intel_fifo_underrun.h"
@@ -1077,9 +1077,8 @@ intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
i915_reg_t reg = PIPECONF(cpu_transcoder);
/* Wait for the Pipe State to go off */
- if (intel_wait_for_register(&dev_priv->uncore,
- reg, I965_PIPECONF_ACTIVE, 0,
- 100))
+ if (intel_de_wait_for_clear(dev_priv, reg,
+ I965_PIPECONF_ACTIVE, 100))
WARN(1, "pipe_off wait timed out\n");
} else {
intel_wait_for_pipe_scanline_stopped(crtc);
@@ -1383,11 +1382,7 @@ static void _vlv_enable_pll(struct intel_crtc *crtc,
POSTING_READ(DPLL(pipe));
udelay(150);
- if (intel_wait_for_register(&dev_priv->uncore,
- DPLL(pipe),
- DPLL_LOCK_VLV,
- DPLL_LOCK_VLV,
- 1))
+ if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
DRM_ERROR("DPLL %d failed to lock\n", pipe);
}
@@ -1436,9 +1431,7 @@ static void _chv_enable_pll(struct intel_crtc *crtc,
I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
/* Check PLL is locked */
- if (intel_wait_for_register(&dev_priv->uncore,
- DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
- 1))
+ if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
DRM_ERROR("PLL %d failed to lock\n", pipe);
}
@@ -1617,9 +1610,8 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
BUG();
}
- if (intel_wait_for_register(&dev_priv->uncore,
- dpll_reg, port_mask, expected_mask,
- 1000))
+ if (intel_de_wait_for_register(dev_priv, dpll_reg,
+ port_mask, expected_mask, 1000))
WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
port_name(dport->base.port),
I915_READ(dpll_reg) & port_mask, expected_mask);
@@ -1678,9 +1670,7 @@ static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_s
}
I915_WRITE(reg, val | TRANS_ENABLE);
- if (intel_wait_for_register(&dev_priv->uncore,
- reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
- 100))
+ if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
}
@@ -1708,11 +1698,8 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
val |= TRANS_PROGRESSIVE;
I915_WRITE(LPT_TRANSCONF, val);
- if (intel_wait_for_register(&dev_priv->uncore,
- LPT_TRANSCONF,
- TRANS_STATE_ENABLE,
- TRANS_STATE_ENABLE,
- 100))
+ if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
+ TRANS_STATE_ENABLE, 100))
DRM_ERROR("Failed to enable PCH transcoder\n");
}
@@ -1734,9 +1721,7 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
val &= ~TRANS_ENABLE;
I915_WRITE(reg, val);
/* wait for PCH transcoder off, transcoder state */
- if (intel_wait_for_register(&dev_priv->uncore,
- reg, TRANS_STATE_ENABLE, 0,
- 50))
+ if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
if (HAS_PCH_CPT(dev_priv)) {
@@ -1756,9 +1741,8 @@ void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
val &= ~TRANS_ENABLE;
I915_WRITE(LPT_TRANSCONF, val);
/* wait for PCH transcoder off, transcoder state */
- if (intel_wait_for_register(&dev_priv->uncore,
- LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
- 50))
+ if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
+ TRANS_STATE_ENABLE, 50))
DRM_ERROR("Failed to disable PCH transcoder\n");
/* Workaround: clear timing override bit. */
@@ -3049,12 +3033,13 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_i915_gem_object *obj = NULL;
struct drm_mode_fb_cmd2 mode_cmd = { 0 };
struct drm_framebuffer *fb = &plane_config->fb->base;
u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
u32 size_aligned = round_up(plane_config->base + plane_config->size,
PAGE_SIZE);
+ struct drm_i915_gem_object *obj;
+ bool ret = false;
size_aligned -= base_aligned;
@@ -3096,7 +3081,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
break;
default:
MISSING_CASE(plane_config->tiling);
- return false;
+ goto out;
}
mode_cmd.pixel_format = fb->format->format;
@@ -3108,16 +3093,15 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
DRM_DEBUG_KMS("intel fb init failed\n");
- goto out_unref_obj;
+ goto out;
}
DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
- return true;
-
-out_unref_obj:
+ ret = true;
+out:
i915_gem_object_put(obj);
- return false;
+ return ret;
}
static void
@@ -3174,6 +3158,12 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
intel_disable_plane(plane, crtc_state);
}
+static struct intel_frontbuffer *
+to_intel_frontbuffer(struct drm_framebuffer *fb)
+{
+ return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL;
+}
+
static void
intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
struct intel_initial_plane_config *plane_config)
@@ -3181,7 +3171,6 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *c;
- struct drm_i915_gem_object *obj;
struct drm_plane *primary = intel_crtc->base.primary;
struct drm_plane_state *plane_state = primary->state;
struct intel_plane *intel_plane = to_intel_plane(primary);
@@ -3257,8 +3246,7 @@ valid_fb:
return;
}
- obj = intel_fb_obj(fb);
- intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
+ intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
plane_state->src_x = 0;
plane_state->src_y = 0;
@@ -3273,14 +3261,14 @@ valid_fb:
intel_state->base.src = drm_plane_state_src(plane_state);
intel_state->base.dst = drm_plane_state_dest(plane_state);
- if (i915_gem_object_is_tiled(obj))
+ if (plane_config->tiling)
dev_priv->preserve_bios_swizzle = true;
plane_state->fb = fb;
plane_state->crtc = &intel_crtc->base;
atomic_or(to_intel_plane(primary)->frontbuffer_bit,
- &obj->frontbuffer_bits);
+ &to_intel_frontbuffer(fb)->bits);
}
static int skl_max_plane_width(const struct drm_framebuffer *fb,
@@ -5693,9 +5681,7 @@ void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
* and don't wait for vblanks until the end of crtc_enable, then
* the HW state readout code will complain that the expected
* IPS_CTL value is not the one we read. */
- if (intel_wait_for_register(&dev_priv->uncore,
- IPS_CTL, IPS_ENABLE, IPS_ENABLE,
- 50))
+ if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
DRM_ERROR("Timed out waiting for IPS enable\n");
}
}
@@ -5716,9 +5702,7 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
* 42ms timeout value leads to occasional timeouts so use 100ms
* instead.
*/
- if (intel_wait_for_register(&dev_priv->uncore,
- IPS_CTL, IPS_ENABLE, 0,
- 100))
+ if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
DRM_ERROR("Timed out waiting for IPS disable\n");
} else {
I915_WRITE(IPS_CTL, 0);
@@ -6683,7 +6667,7 @@ bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
if (phy == PHY_NONE)
return false;
- if (IS_ELKHARTLAKE(dev_priv) || INTEL_GEN(dev_priv) >= 12)
+ if (IS_ELKHARTLAKE(dev_priv))
return phy <= PHY_C;
if (INTEL_GEN(dev_priv) >= 11)
@@ -10354,10 +10338,9 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
if (INTEL_GEN(dev_priv) >= 12)
- port = (tmp & TGL_TRANS_DDI_PORT_MASK) >>
- TGL_TRANS_DDI_PORT_SHIFT;
+ port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
else
- port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
+ port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
if (INTEL_GEN(dev_priv) >= 11)
icelake_get_ddi_pll(dev_priv, port, pipe_config);
@@ -14133,9 +14116,9 @@ static void intel_atomic_track_fbs(struct intel_atomic_state *state)
for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
new_plane_state, i)
- i915_gem_track_fb(intel_fb_obj(old_plane_state->base.fb),
- intel_fb_obj(new_plane_state->base.fb),
- plane->frontbuffer_bit);
+ intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->base.fb),
+ to_intel_frontbuffer(new_plane_state->base.fb),
+ plane->frontbuffer_bit);
}
static int intel_atomic_commit(struct drm_device *dev,
@@ -14419,7 +14402,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
return ret;
fb_obj_bump_render_priority(obj);
- intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
+ intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_DIRTYFB);
if (!new_state->fence) { /* implicit fencing */
struct dma_fence *fence;
@@ -14431,7 +14414,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
if (ret < 0)
return ret;
- fence = reservation_object_get_excl_rcu(obj->base.resv);
+ fence = dma_resv_get_excl_rcu(obj->base.resv);
if (fence) {
add_rps_boost_after_vblank(new_state->crtc, fence);
dma_fence_put(fence);
@@ -14682,13 +14665,12 @@ intel_legacy_cursor_update(struct drm_plane *plane,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- int ret;
struct drm_plane_state *old_plane_state, *new_plane_state;
struct intel_plane *intel_plane = to_intel_plane(plane);
- struct drm_framebuffer *old_fb;
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->state);
struct intel_crtc_state *new_crtc_state;
+ int ret;
/*
* When crtc is inactive or there is a modeset pending,
@@ -14756,11 +14738,10 @@ intel_legacy_cursor_update(struct drm_plane *plane,
if (ret)
goto out_unlock;
- intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_FLIP);
-
- old_fb = old_plane_state->fb;
- i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb),
- intel_plane->frontbuffer_bit);
+ intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_FLIP);
+ intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->fb),
+ to_intel_frontbuffer(fb),
+ intel_plane->frontbuffer_bit);
/* Swap plane state */
plane->state = new_plane_state;
@@ -15318,7 +15299,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
/* TODO: initialize TC ports as well */
intel_ddi_init(dev_priv, PORT_A);
intel_ddi_init(dev_priv, PORT_B);
- intel_ddi_init(dev_priv, PORT_C);
+ icl_dsi_init(dev_priv);
} else if (IS_ELKHARTLAKE(dev_priv)) {
intel_ddi_init(dev_priv, PORT_A);
intel_ddi_init(dev_priv, PORT_B);
@@ -15540,15 +15521,9 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
drm_framebuffer_cleanup(fb);
-
- i915_gem_object_lock(obj);
- WARN_ON(!obj->framebuffer_references--);
- i915_gem_object_unlock(obj);
-
- i915_gem_object_put(obj);
+ intel_frontbuffer_put(intel_fb->frontbuffer);
kfree(intel_fb);
}
@@ -15576,7 +15551,7 @@ static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
i915_gem_object_flush_if_display(obj);
- intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
+ intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
return 0;
}
@@ -15598,8 +15573,11 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
int ret = -EINVAL;
int i;
+ intel_fb->frontbuffer = intel_frontbuffer_get(obj);
+ if (!intel_fb->frontbuffer)
+ return -ENOMEM;
+
i915_gem_object_lock(obj);
- obj->framebuffer_references++;
tiling = i915_gem_object_get_tiling(obj);
stride = i915_gem_object_get_stride(obj);
i915_gem_object_unlock(obj);
@@ -15716,9 +15694,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
return 0;
err:
- i915_gem_object_lock(obj);
- obj->framebuffer_references--;
- i915_gem_object_unlock(obj);
+ intel_frontbuffer_put(intel_fb->frontbuffer);
return ret;
}
@@ -15736,8 +15712,7 @@ intel_user_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-ENOENT);
fb = intel_framebuffer_create(obj, &mode_cmd);
- if (IS_ERR(fb))
- i915_gem_object_put(obj);
+ i915_gem_object_put(obj);
return fb;
}
@@ -16126,7 +16101,6 @@ out:
int intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
enum pipe pipe;
struct intel_crtc *crtc;
int ret;
@@ -16206,8 +16180,6 @@ int intel_modeset_init(struct drm_device *dev)
dev->mode_config.cursor_height = 256;
}
- dev->mode_config.fb_base = ggtt->gmadr.start;
-
DRM_DEBUG_KMS("%d display pipe%s available.\n",
INTEL_INFO(dev_priv)->num_pipes,
INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index d2c718f25478..e57e6969051d 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -28,8 +28,30 @@
#include <drm/drm_util.h>
#include <drm/i915_drm.h>
+enum link_m_n_set;
+struct dpll;
+struct drm_connector;
+struct drm_device;
+struct drm_encoder;
+struct drm_file;
+struct drm_framebuffer;
+struct drm_i915_error_state_buf;
+struct drm_i915_gem_object;
struct drm_i915_private;
+struct drm_modeset_acquire_ctx;
+struct drm_plane;
+struct drm_plane_state;
+struct i915_ggtt_view;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_digital_port;
+struct intel_dp;
+struct intel_encoder;
+struct intel_load_detect_pipe;
+struct intel_plane;
struct intel_plane_state;
+struct intel_remapped_info;
+struct intel_rotation_info;
enum i915_gpio {
GPIOA,
@@ -400,4 +422,171 @@ u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
bool intel_plane_can_remap(const struct intel_plane_state *plane_state);
enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port);
+void intel_plane_destroy(struct drm_plane *plane);
+void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
+void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
+enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc);
+int vlv_get_hpll_vco(struct drm_i915_private *dev_priv);
+int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
+ const char *name, u32 reg, int ref_freq);
+int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
+ const char *name, u32 reg);
+void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv);
+void lpt_disable_iclkip(struct drm_i915_private *dev_priv);
+void intel_init_display_hooks(struct drm_i915_private *dev_priv);
+unsigned int intel_fb_xy_to_linear(int x, int y,
+ const struct intel_plane_state *state,
+ int plane);
+unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
+ int color_plane, unsigned int height);
+void intel_add_fb_offsets(int *x, int *y,
+ const struct intel_plane_state *state, int plane);
+unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
+unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info);
+bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv);
+int intel_display_suspend(struct drm_device *dev);
+void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv);
+void intel_encoder_destroy(struct drm_encoder *encoder);
+struct drm_display_mode *
+intel_encoder_current_mode(struct intel_encoder *encoder);
+bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy);
+bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy);
+enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv,
+ enum port port);
+int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
+u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc);
+
+int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
+void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *dport,
+ unsigned int expected_mask);
+int intel_get_load_detect_pipe(struct drm_connector *connector,
+ const struct drm_display_mode *mode,
+ struct intel_load_detect_pipe *old,
+ struct drm_modeset_acquire_ctx *ctx);
+void intel_release_load_detect_pipe(struct drm_connector *connector,
+ struct intel_load_detect_pipe *old,
+ struct drm_modeset_acquire_ctx *ctx);
+struct i915_vma *
+intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
+ const struct i915_ggtt_view *view,
+ bool uses_fence,
+ unsigned long *out_flags);
+void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags);
+struct drm_framebuffer *
+intel_framebuffer_create(struct drm_i915_gem_object *obj,
+ struct drm_mode_fb_cmd2 *mode_cmd);
+int intel_prepare_plane_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state);
+void intel_cleanup_plane_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state);
+
+void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
+
+int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
+ const struct dpll *dpll);
+void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe);
+int lpt_get_iclkip(struct drm_i915_private *dev_priv);
+bool intel_fuzzy_clock_check(int clock1, int clock2);
+
+void intel_prepare_reset(struct drm_i915_private *dev_priv);
+void intel_finish_reset(struct drm_i915_private *dev_priv);
+void intel_dp_get_m_n(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config);
+void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state,
+ enum link_m_n_set m_n);
+void intel_dp_ycbcr_420_enable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
+int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
+bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
+ struct dpll *best_clock);
+int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
+
+bool intel_crtc_active(struct intel_crtc *crtc);
+bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state);
+void hsw_enable_ips(const struct intel_crtc_state *crtc_state);
+void hsw_disable_ips(const struct intel_crtc_state *crtc_state);
+enum intel_display_power_domain intel_port_to_power_domain(enum port port);
+enum intel_display_power_domain
+intel_aux_power_domain(struct intel_digital_port *dig_port);
+void intel_mode_from_pipe_config(struct drm_display_mode *mode,
+ struct intel_crtc_state *pipe_config);
+void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state);
+
+u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center);
+int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
+int skl_max_scale(const struct intel_crtc_state *crtc_state,
+ u32 pixel_format);
+u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state);
+u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state);
+u32 skl_plane_stride(const struct intel_plane_state *plane_state,
+ int plane);
+int skl_check_plane_surface(struct intel_plane_state *plane_state);
+int i9xx_check_plane_surface(struct intel_plane_state *plane_state);
+int skl_format_to_fourcc(int format, bool rgb_order, bool alpha);
+unsigned int i9xx_plane_max_stride(struct intel_plane *plane,
+ u32 pixel_format, u64 modifier,
+ unsigned int rotation);
+int bdw_get_pipemisc_bpp(struct intel_crtc *crtc);
+
+struct intel_display_error_state *
+intel_display_capture_error_state(struct drm_i915_private *dev_priv);
+void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
+ struct intel_display_error_state *error);
+
+/* modesetting */
+void intel_modeset_init_hw(struct drm_device *dev);
+int intel_modeset_init(struct drm_device *dev);
+void intel_modeset_driver_remove(struct drm_device *dev);
+int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state);
+void intel_display_resume(struct drm_device *dev);
+void i915_redisable_vga(struct drm_i915_private *dev_priv);
+void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv);
+void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
+
+/* modesetting asserts */
+void assert_panel_unlocked(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
+void assert_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state);
+#define assert_pll_enabled(d, p) assert_pll(d, p, true)
+#define assert_pll_disabled(d, p) assert_pll(d, p, false)
+void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state);
+#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
+#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
+void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state);
+#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
+#define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false)
+void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
+#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
+#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
+
+/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
+ * WARN_ON()) for hw state sanity checks to check for unexpected conditions
+ * which may not necessarily be a user visible problem. This will either
+ * WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to
+ * enable distros and users to tailor their preferred amount of i915 abrt
+ * spam.
+ */
+#define I915_STATE_WARN(condition, format...) ({ \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) \
+ if (!WARN(i915_modparams.verbose_state_checks, format)) \
+ DRM_ERROR(format); \
+ unlikely(__ret_warn_on); \
+})
+
+#define I915_STATE_WARN_ON(x) \
+ I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
+
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index dd2a50b8ba0a..12099760d99e 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -13,8 +13,9 @@
#include "intel_cdclk.h"
#include "intel_combo_phy.h"
#include "intel_csr.h"
+#include "intel_display_power.h"
+#include "intel_display_types.h"
#include "intel_dpio_phy.h"
-#include "intel_drv.h"
#include "intel_hotplug.h"
#include "intel_sideband.h"
#include "intel_tc.h"
@@ -318,11 +319,8 @@ static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
int pw_idx = power_well->desc->hsw.idx;
/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
- if (intel_wait_for_register(&dev_priv->uncore,
- regs->driver,
- HSW_PWR_WELL_CTL_STATE(pw_idx),
- HSW_PWR_WELL_CTL_STATE(pw_idx),
- 1)) {
+ if (intel_de_wait_for_set(dev_priv, regs->driver,
+ HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) {
DRM_DEBUG_KMS("%s power well enable timeout\n",
power_well->desc->name);
@@ -379,9 +377,8 @@ static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
enum skl_power_gate pg)
{
/* Timeout 5us for PG#0, for other PGs 1us */
- WARN_ON(intel_wait_for_register(&dev_priv->uncore, SKL_FUSE_STATUS,
- SKL_FUSE_PG_DIST_STATUS(pg),
- SKL_FUSE_PG_DIST_STATUS(pg), 1));
+ WARN_ON(intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,
+ SKL_FUSE_PG_DIST_STATUS(pg), 1));
}
static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
@@ -727,7 +724,7 @@ static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
return mask;
}
-void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
+static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
{
u32 val;
@@ -787,7 +784,7 @@ static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
dev_priv->csr.dc_state = val & mask;
}
-void bxt_enable_dc9(struct drm_i915_private *dev_priv)
+static void bxt_enable_dc9(struct drm_i915_private *dev_priv)
{
assert_can_enable_dc9(dev_priv);
@@ -802,7 +799,7 @@ void bxt_enable_dc9(struct drm_i915_private *dev_priv)
gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
}
-void bxt_disable_dc9(struct drm_i915_private *dev_priv)
+static void bxt_disable_dc9(struct drm_i915_private *dev_priv)
{
assert_can_disable_dc9(dev_priv);
@@ -856,7 +853,7 @@ static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
assert_csr_loaded(dev_priv);
}
-void gen9_enable_dc5(struct drm_i915_private *dev_priv)
+static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
{
assert_can_enable_dc5(dev_priv);
@@ -880,7 +877,7 @@ static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
assert_csr_loaded(dev_priv);
}
-void skl_enable_dc6(struct drm_i915_private *dev_priv)
+static void skl_enable_dc6(struct drm_i915_private *dev_priv)
{
assert_can_enable_dc6(dev_priv);
@@ -966,8 +963,7 @@ static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
"Unexpected DBuf power power state (0x%08x)\n", tmp);
}
-static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
+static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state = {};
@@ -991,6 +987,12 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
intel_combo_phy_init(dev_priv);
}
+static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ gen9_disable_dc_states(dev_priv);
+}
+
static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
@@ -1379,11 +1381,8 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
* The PHY may be busy with some initial calibration and whatnot,
* so the power state can take a while to actually change.
*/
- if (intel_wait_for_register(&dev_priv->uncore,
- DISPLAY_PHY_STATUS,
- phy_status_mask,
- phy_status,
- 10))
+ if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS,
+ phy_status_mask, phy_status, 10))
DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
phy_status, dev_priv->chv_phy_control);
@@ -1414,11 +1413,8 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
vlv_set_power_well(dev_priv, power_well, true);
/* Poll for phypwrgood signal */
- if (intel_wait_for_register(&dev_priv->uncore,
- DISPLAY_PHY_STATUS,
- PHY_POWERGOOD(phy),
- PHY_POWERGOOD(phy),
- 1))
+ if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS,
+ PHY_POWERGOOD(phy), 1))
DRM_ERROR("Display PHY %d is not power up\n", phy);
vlv_dpio_get(dev_priv);
@@ -2482,15 +2478,10 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \
BIT_ULL(POWER_DOMAIN_AUX_B) | \
BIT_ULL(POWER_DOMAIN_AUX_C) | \
BIT_ULL(POWER_DOMAIN_AUX_D) | \
@@ -2558,12 +2549,14 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
#define TGL_PW_5_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_PIPE_D) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \
BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define TGL_PW_4_POWER_DOMAINS ( \
TGL_PW_5_POWER_DOMAINS | \
BIT_ULL(POWER_DOMAIN_PIPE_C) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
BIT_ULL(POWER_DOMAIN_INIT))
@@ -2571,21 +2564,13 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
TGL_PW_4_POWER_DOMAINS | \
BIT_ULL(POWER_DOMAIN_PIPE_B) | \
BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \
BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_TC1_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC1_IO) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_TC2_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC2_IO) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_TC3_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC3_IO) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_TC4_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC4_IO) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_TC5_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC5_IO) | \
BIT_ULL(POWER_DOMAIN_PORT_DDI_TC6_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC6_IO) | \
BIT_ULL(POWER_DOMAIN_AUX_TC1) | \
BIT_ULL(POWER_DOMAIN_AUX_TC2) | \
BIT_ULL(POWER_DOMAIN_AUX_TC3) | \
@@ -4342,8 +4327,7 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
I915_WRITE(LCPLL_CTL, val);
POSTING_READ(LCPLL_CTL);
- if (intel_wait_for_register(&dev_priv->uncore, LCPLL_CTL,
- LCPLL_PLL_LOCK, 0, 1))
+ if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
DRM_ERROR("LCPLL still locked\n");
val = hsw_read_dcomp(dev_priv);
@@ -4398,8 +4382,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
val &= ~LCPLL_PLL_DISABLE;
I915_WRITE(LCPLL_CTL, val);
- if (intel_wait_for_register(&dev_priv->uncore, LCPLL_CTL,
- LCPLL_PLL_LOCK, LCPLL_PLL_LOCK, 5))
+ if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
DRM_ERROR("LCPLL not locked yet\n");
if (val & LCPLL_CD_SOURCE_FCLK) {
@@ -4441,7 +4424,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
* For more, read "Display Sequences for Package C8" on the hardware
* documentation.
*/
-void hsw_enable_pc8(struct drm_i915_private *dev_priv)
+static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
{
u32 val;
@@ -4457,7 +4440,7 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv)
hsw_disable_lcpll(dev_priv, true, true);
}
-void hsw_disable_pc8(struct drm_i915_private *dev_priv)
+static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
{
u32 val;
@@ -4532,7 +4515,7 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+ gen9_disable_dc_states(dev_priv);
gen9_dbuf_disable(dev_priv);
@@ -4557,8 +4540,7 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
usleep_range(10, 30); /* 10 us delay per Bspec */
}
-void bxt_display_core_init(struct drm_i915_private *dev_priv,
- bool resume)
+static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
@@ -4589,12 +4571,12 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv,
intel_csr_load_program(dev_priv);
}
-void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
+static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+ gen9_disable_dc_states(dev_priv);
gen9_dbuf_disable(dev_priv);
@@ -4654,7 +4636,7 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+ gen9_disable_dc_states(dev_priv);
/* 1. Disable all display engine functions -> aready done */
@@ -4680,8 +4662,8 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
intel_combo_phy_uninit(dev_priv);
}
-void icl_display_core_init(struct drm_i915_private *dev_priv,
- bool resume)
+static void icl_display_core_init(struct drm_i915_private *dev_priv,
+ bool resume)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
@@ -4716,12 +4698,12 @@ void icl_display_core_init(struct drm_i915_private *dev_priv,
intel_csr_load_program(dev_priv);
}
-void icl_display_core_uninit(struct drm_i915_private *dev_priv)
+static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+ gen9_disable_dc_states(dev_priv);
/* 1. Disable all display engine functions -> aready done */
@@ -5193,3 +5175,58 @@ static void intel_power_domains_verify_state(struct drm_i915_private *i915)
}
#endif
+
+void intel_display_power_suspend_late(struct drm_i915_private *i915)
+{
+ if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915))
+ bxt_enable_dc9(i915);
+ else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
+ hsw_enable_pc8(i915);
+}
+
+void intel_display_power_resume_early(struct drm_i915_private *i915)
+{
+ if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915)) {
+ gen9_sanitize_dc_state(i915);
+ bxt_disable_dc9(i915);
+ } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
+ hsw_disable_pc8(i915);
+ }
+}
+
+void intel_display_power_suspend(struct drm_i915_private *i915)
+{
+ if (INTEL_GEN(i915) >= 11) {
+ icl_display_core_uninit(i915);
+ bxt_enable_dc9(i915);
+ } else if (IS_GEN9_LP(i915)) {
+ bxt_display_core_uninit(i915);
+ bxt_enable_dc9(i915);
+ } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
+ hsw_enable_pc8(i915);
+ }
+}
+
+void intel_display_power_resume(struct drm_i915_private *i915)
+{
+ if (INTEL_GEN(i915) >= 11) {
+ bxt_disable_dc9(i915);
+ icl_display_core_init(i915, true);
+ if (i915->csr.dmc_payload) {
+ if (i915->csr.allowed_dc_mask &
+ DC_STATE_EN_UPTO_DC6)
+ skl_enable_dc6(i915);
+ else if (i915->csr.allowed_dc_mask &
+ DC_STATE_EN_UPTO_DC5)
+ gen9_enable_dc5(i915);
+ }
+ } else if (IS_GEN9_LP(i915)) {
+ bxt_disable_dc9(i915);
+ bxt_display_core_init(i915, true);
+ if (i915->csr.dmc_payload &&
+ (i915->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
+ gen9_enable_dc5(i915);
+ } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
+ hsw_disable_pc8(i915);
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index e4d2c1ba24b0..a50605b8b1ad 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -92,6 +92,27 @@ enum intel_display_power_domain {
POWER_DOMAIN_NUM,
};
+/*
+ * i915_power_well_id:
+ *
+ * IDs used to look up power wells. Power wells accessed directly bypassing
+ * the power domains framework must be assigned a unique ID. The rest of power
+ * wells must be assigned DISP_PW_ID_NONE.
+ */
+enum i915_power_well_id {
+ DISP_PW_ID_NONE,
+
+ VLV_DISP_PW_DISP2D,
+ BXT_DISP_PW_DPIO_CMN_A,
+ VLV_DISP_PW_DPIO_CMN_BC,
+ GLK_DISP_PW_DPIO_CMN_C,
+ CHV_DISP_PW_DPIO_CMN_D,
+ HSW_DISP_PW_GLOBAL,
+ SKL_DISP_PW_MISC_IO,
+ SKL_DISP_PW_1,
+ SKL_DISP_PW_2,
+};
+
#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
@@ -232,27 +253,20 @@ struct i915_power_domains {
for_each_power_well_reverse(__dev_priv, __power_well) \
for_each_if((__power_well)->desc->domains & (__domain_mask))
-void skl_enable_dc6(struct drm_i915_private *dev_priv);
-void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv);
-void bxt_enable_dc9(struct drm_i915_private *dev_priv);
-void bxt_disable_dc9(struct drm_i915_private *dev_priv);
-void gen9_enable_dc5(struct drm_i915_private *dev_priv);
-
int intel_power_domains_init(struct drm_i915_private *dev_priv);
void intel_power_domains_cleanup(struct drm_i915_private *dev_priv);
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
void intel_power_domains_driver_remove(struct drm_i915_private *dev_priv);
-void icl_display_core_init(struct drm_i915_private *dev_priv, bool resume);
-void icl_display_core_uninit(struct drm_i915_private *dev_priv);
void intel_power_domains_enable(struct drm_i915_private *dev_priv);
void intel_power_domains_disable(struct drm_i915_private *dev_priv);
void intel_power_domains_suspend(struct drm_i915_private *dev_priv,
enum i915_drm_suspend_mode);
void intel_power_domains_resume(struct drm_i915_private *dev_priv);
-void hsw_enable_pc8(struct drm_i915_private *dev_priv);
-void hsw_disable_pc8(struct drm_i915_private *dev_priv);
-void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume);
-void bxt_display_core_uninit(struct drm_i915_private *dev_priv);
+
+void intel_display_power_suspend_late(struct drm_i915_private *i915);
+void intel_display_power_resume_early(struct drm_i915_private *i915);
+void intel_display_power_suspend(struct drm_i915_private *i915);
+void intel_display_power_resume(struct drm_i915_private *i915);
const char *
intel_display_power_domain_str(struct drm_i915_private *i915,
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index c4016164c34e..449abaea619f 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -22,8 +22,9 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
-#ifndef __INTEL_DRV_H__
-#define __INTEL_DRV_H__
+
+#ifndef __INTEL_DISPLAY_TYPES_H__
+#define __INTEL_DISPLAY_TYPES_H__
#include <linux/async.h>
#include <linux/i2c.h>
@@ -67,8 +68,23 @@ enum intel_output_type {
INTEL_OUTPUT_DP_MST = 11,
};
+enum hdmi_force_audio {
+ HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
+ HDMI_AUDIO_OFF, /* force turn off HDMI audio */
+ HDMI_AUDIO_AUTO, /* trust EDID */
+ HDMI_AUDIO_ON, /* force turn on HDMI audio */
+};
+
+/* "Broadcast RGB" property */
+enum intel_broadcast_rgb {
+ INTEL_BROADCAST_RGB_AUTO,
+ INTEL_BROADCAST_RGB_FULL,
+ INTEL_BROADCAST_RGB_LIMITED,
+};
+
struct intel_framebuffer {
struct drm_framebuffer base;
+ struct intel_frontbuffer *frontbuffer;
struct intel_rotation_info rot_info;
/* for each plane in the normal GTT view */
@@ -851,7 +867,7 @@ struct intel_crtc_state {
/*
* Frequence the dpll for the port should run at. Differs from the
- * adjusted dotclock e.g. for DP or 12bpc hdmi mode. This is also
+ * adjusted dotclock e.g. for DP or 10/12bpc hdmi mode. This is also
* already multiplied by pixel_multiplier.
*/
int port_clock;
@@ -1473,41 +1489,6 @@ intel_atomic_get_new_crtc_state(struct intel_atomic_state *state,
}
/* intel_display.c */
-void intel_plane_destroy(struct drm_plane *plane);
-void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
-void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
-enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc);
-int vlv_get_hpll_vco(struct drm_i915_private *dev_priv);
-int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
- const char *name, u32 reg, int ref_freq);
-int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
- const char *name, u32 reg);
-void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv);
-void lpt_disable_iclkip(struct drm_i915_private *dev_priv);
-void intel_init_display_hooks(struct drm_i915_private *dev_priv);
-unsigned int intel_fb_xy_to_linear(int x, int y,
- const struct intel_plane_state *state,
- int plane);
-unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
- int color_plane, unsigned int height);
-void intel_add_fb_offsets(int *x, int *y,
- const struct intel_plane_state *state, int plane);
-unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
-unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info);
-bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv);
-int intel_display_suspend(struct drm_device *dev);
-void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv);
-void intel_encoder_destroy(struct drm_encoder *encoder);
-struct drm_display_mode *
-intel_encoder_current_mode(struct intel_encoder *encoder);
-bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy);
-bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy);
-enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv,
- enum port port);
-int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
- enum pipe pipe);
static inline bool
intel_crtc_has_type(const struct intel_crtc_state *crtc_state,
enum intel_output_type type)
@@ -1536,108 +1517,9 @@ intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, int pipe)
intel_wait_for_vblank(dev_priv, pipe);
}
-u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc);
-
-int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
-void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
- struct intel_digital_port *dport,
- unsigned int expected_mask);
-int intel_get_load_detect_pipe(struct drm_connector *connector,
- const struct drm_display_mode *mode,
- struct intel_load_detect_pipe *old,
- struct drm_modeset_acquire_ctx *ctx);
-void intel_release_load_detect_pipe(struct drm_connector *connector,
- struct intel_load_detect_pipe *old,
- struct drm_modeset_acquire_ctx *ctx);
-struct i915_vma *
-intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
- const struct i915_ggtt_view *view,
- bool uses_fence,
- unsigned long *out_flags);
-void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags);
-struct drm_framebuffer *
-intel_framebuffer_create(struct drm_i915_gem_object *obj,
- struct drm_mode_fb_cmd2 *mode_cmd);
-int intel_prepare_plane_fb(struct drm_plane *plane,
- struct drm_plane_state *new_state);
-void intel_cleanup_plane_fb(struct drm_plane *plane,
- struct drm_plane_state *old_state);
-
-void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
- enum pipe pipe);
-
-int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
- const struct dpll *dpll);
-void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe);
-int lpt_get_iclkip(struct drm_i915_private *dev_priv);
-bool intel_fuzzy_clock_check(int clock1, int clock2);
-
-/* modesetting asserts */
-void assert_panel_unlocked(struct drm_i915_private *dev_priv,
- enum pipe pipe);
-void assert_pll(struct drm_i915_private *dev_priv,
- enum pipe pipe, bool state);
-#define assert_pll_enabled(d, p) assert_pll(d, p, true)
-#define assert_pll_disabled(d, p) assert_pll(d, p, false)
-void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state);
-#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
-#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
-void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
- enum pipe pipe, bool state);
-#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
-#define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false)
-void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
-#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
-#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
-void intel_prepare_reset(struct drm_i915_private *dev_priv);
-void intel_finish_reset(struct drm_i915_private *dev_priv);
-void intel_dp_get_m_n(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config);
-void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state,
- enum link_m_n_set m_n);
-void intel_dp_ycbcr_420_enable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
-int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
-bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
- struct dpll *best_clock);
-int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
-
-bool intel_crtc_active(struct intel_crtc *crtc);
-bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state);
-void hsw_enable_ips(const struct intel_crtc_state *crtc_state);
-void hsw_disable_ips(const struct intel_crtc_state *crtc_state);
-enum intel_display_power_domain intel_port_to_power_domain(enum port port);
-enum intel_display_power_domain
-intel_aux_power_domain(struct intel_digital_port *dig_port);
-void intel_mode_from_pipe_config(struct drm_display_mode *mode,
- struct intel_crtc_state *pipe_config);
-void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state);
-
-u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center);
-int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
-int skl_max_scale(const struct intel_crtc_state *crtc_state,
- u32 pixel_format);
-
static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state)
{
return i915_ggtt_offset(state->vma);
}
-u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state);
-u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state);
-u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state);
-u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state);
-u32 skl_plane_stride(const struct intel_plane_state *plane_state,
- int plane);
-int skl_check_plane_surface(struct intel_plane_state *plane_state);
-int i9xx_check_plane_surface(struct intel_plane_state *plane_state);
-int skl_format_to_fourcc(int format, bool rgb_order, bool alpha);
-unsigned int i9xx_plane_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation);
-int bdw_get_pipemisc_bpp(struct intel_crtc *crtc);
-
-#endif /* __INTEL_DRV_H__ */
+#endif /* __INTEL_DISPLAY_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 0eb5d66f87a7..921ad0a2f7ba 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -44,15 +44,16 @@
#include "i915_debugfs.h"
#include "i915_drv.h"
+#include "i915_trace.h"
#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_ddi.h"
+#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_link_training.h"
#include "intel_dp_mst.h"
#include "intel_dpio_phy.h"
-#include "intel_drv.h"
#include "intel_fifo_underrun.h"
#include "intel_hdcp.h"
#include "intel_hdmi.h"
@@ -2370,9 +2371,8 @@ static void wait_panel_status(struct intel_dp *intel_dp,
I915_READ(pp_stat_reg),
I915_READ(pp_ctrl_reg));
- if (intel_wait_for_register(&dev_priv->uncore,
- pp_stat_reg, mask, value,
- 5000))
+ if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
+ mask, value, 5000))
DRM_ERROR("Panel status timeout: status %08x control %08x\n",
I915_READ(pp_stat_reg),
I915_READ(pp_ctrl_reg));
@@ -3959,10 +3959,8 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
if (port == PORT_A)
return;
- if (intel_wait_for_register(&dev_priv->uncore, DP_TP_STATUS(port),
- DP_TP_STATUS_IDLE_DONE,
- DP_TP_STATUS_IDLE_DONE,
- 1))
+ if (intel_de_wait_for_set(dev_priv, DP_TP_STATUS(port),
+ DP_TP_STATUS_IDLE_DONE, 1))
DRM_ERROR("Timed out waiting for DP idle patterns\n");
}
@@ -4146,10 +4144,6 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
drm_dp_is_branch(intel_dp->dpcd));
- if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
- dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
- DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
-
/*
* Read the eDP display control registers.
*
@@ -5818,47 +5812,49 @@ struct hdcp2_dp_errata_stream_type {
u8 stream_type;
} __packed;
-static struct hdcp2_dp_msg_data {
+struct hdcp2_dp_msg_data {
u8 msg_id;
u32 offset;
bool msg_detectable;
u32 timeout;
u32 timeout2; /* Added for non_paired situation */
- } hdcp2_msg_data[] = {
- {HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0},
- {HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET,
- false, HDCP_2_2_CERT_TIMEOUT_MS, 0},
- {HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET,
- false, 0, 0},
- {HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET,
- false, 0, 0},
- {HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET,
- true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
- HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS},
- {HDCP_2_2_AKE_SEND_PAIRING_INFO,
- DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true,
- HDCP_2_2_PAIRING_TIMEOUT_MS, 0},
- {HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0},
- {HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET,
- false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0},
- {HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false,
- 0, 0},
- {HDCP_2_2_REP_SEND_RECVID_LIST,
- DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true,
- HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0},
- {HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false,
- 0, 0},
- {HDCP_2_2_REP_STREAM_MANAGE,
- DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false,
- 0, 0},
- {HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET,
- false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0},
+};
+
+static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = {
+ { HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0 },
+ { HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET,
+ false, HDCP_2_2_CERT_TIMEOUT_MS, 0 },
+ { HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET,
+ false, 0, 0 },
+ { HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET,
+ false, 0, 0 },
+ { HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET,
+ true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
+ HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS },
+ { HDCP_2_2_AKE_SEND_PAIRING_INFO,
+ DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true,
+ HDCP_2_2_PAIRING_TIMEOUT_MS, 0 },
+ { HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0 },
+ { HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET,
+ false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0 },
+ { HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false,
+ 0, 0 },
+ { HDCP_2_2_REP_SEND_RECVID_LIST,
+ DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true,
+ HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0 },
+ { HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false,
+ 0, 0 },
+ { HDCP_2_2_REP_STREAM_MANAGE,
+ DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false,
+ 0, 0 },
+ { HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET,
+ false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0 },
/* local define to shovel this through the write_2_2 interface */
#define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50
- {HDCP_2_2_ERRATA_DP_STREAM_TYPE,
- DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false,
- 0, 0},
- };
+ { HDCP_2_2_ERRATA_DP_STREAM_TYPE,
+ DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false,
+ 0, 0 },
+};
static inline
int intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
@@ -5912,7 +5908,7 @@ int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
static ssize_t
intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
- struct hdcp2_dp_msg_data *hdcp2_msg_data)
+ const struct hdcp2_dp_msg_data *hdcp2_msg_data)
{
struct intel_dp *dp = &intel_dig_port->dp;
struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
@@ -5951,13 +5947,13 @@ intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
return ret;
}
-static struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id)
+static const struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id)
{
int i;
- for (i = 0; i < ARRAY_SIZE(hdcp2_msg_data); i++)
- if (hdcp2_msg_data[i].msg_id == msg_id)
- return &hdcp2_msg_data[i];
+ for (i = 0; i < ARRAY_SIZE(hdcp2_dp_msg_data); i++)
+ if (hdcp2_dp_msg_data[i].msg_id == msg_id)
+ return &hdcp2_dp_msg_data[i];
return NULL;
}
@@ -5971,7 +5967,7 @@ int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
unsigned int offset;
u8 *byte = buf;
ssize_t ret, bytes_to_write, len;
- struct hdcp2_dp_msg_data *hdcp2_msg_data;
+ const struct hdcp2_dp_msg_data *hdcp2_msg_data;
hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte);
if (!hdcp2_msg_data)
@@ -6035,7 +6031,7 @@ int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
unsigned int offset;
u8 *byte = buf;
ssize_t ret, bytes_to_recv, len;
- struct hdcp2_dp_msg_data *hdcp2_msg_data;
+ const struct hdcp2_dp_msg_data *hdcp2_msg_data;
hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id);
if (!hdcp2_msg_data)
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 6b0b73479fb8..020422da2ae2 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -22,8 +22,8 @@
*
*/
+#include "intel_display_types.h"
#include "intel_dp_aux_backlight.h"
-#include "intel_drv.h"
static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
{
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 9b1fccea966b..2a1130dd1ad0 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -21,9 +21,9 @@
* IN THE SOFTWARE.
*/
+#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_link_training.h"
-#include "intel_drv.h"
static void
intel_dp_dump_link_status(const u8 link_status[DP_LINK_STATUS_SIZE])
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 60652ebbdf61..2c5ac3dd647f 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -32,10 +32,10 @@
#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_ddi.h"
+#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_mst.h"
#include "intel_dpio_phy.h"
-#include "intel_drv.h"
static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
@@ -338,11 +338,8 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
- if (intel_wait_for_register(&dev_priv->uncore,
- DP_TP_STATUS(port),
- DP_TP_STATUS_ACT_SENT,
- DP_TP_STATUS_ACT_SENT,
- 1))
+ if (intel_de_wait_for_set(dev_priv, DP_TP_STATUS(port),
+ DP_TP_STATUS_ACT_SENT, 1))
DRM_ERROR("Timed out waiting for ACT sent\n");
drm_dp_check_act_status(&intel_dp->mst_mgr);
@@ -539,7 +536,15 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
- drm_connector_attach_max_bpc_property(connector, 6, 12);
+
+ /*
+ * Reuse the prop from the SST connector because we're
+ * not allowed to create new props after device registration.
+ */
+ connector->max_bpc_property =
+ intel_dp->attached_connector->base.max_bpc_property;
+ if (connector->max_bpc_property)
+ drm_connector_attach_max_bpc_property(connector, 6, 12);
return connector;
@@ -602,7 +607,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
intel_encoder->type = INTEL_OUTPUT_DP_MST;
intel_encoder->power_domain = intel_dig_port->base.power_domain;
intel_encoder->port = intel_dig_port->base.port;
- intel_encoder->crtc_mask = 0x7;
+ intel_encoder->crtc_mask = BIT(pipe);
intel_encoder->cloneable = 0;
intel_encoder->compute_config = intel_dp_mst_compute_config;
@@ -632,6 +637,12 @@ intel_dp_create_fake_mst_encoders(struct intel_digital_port *intel_dig_port)
}
int
+intel_dp_mst_encoder_active_links(struct intel_digital_port *intel_dig_port)
+{
+ return intel_dig_port->dp.active_mst_links;
+}
+
+int
intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_base_id)
{
struct intel_dp *intel_dp = &intel_dig_port->dp;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h
index 6754c211205a..f660ad80db04 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h
@@ -6,15 +6,10 @@
#ifndef __INTEL_DP_MST_H__
#define __INTEL_DP_MST_H__
-#include "intel_drv.h"
+struct intel_digital_port;
int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
-static inline int
-intel_dp_mst_encoder_active_links(struct intel_digital_port *intel_dig_port)
-{
- return intel_dig_port->dp.active_mst_links;
-}
-
+int intel_dp_mst_encoder_active_links(struct intel_digital_port *intel_dig_port);
#endif /* __INTEL_DP_MST_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index 7ccf7f3974db..556d1b30f06a 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -23,8 +23,8 @@
#include "display/intel_dp.h"
+#include "intel_display_types.h"
#include "intel_dpio_phy.h"
-#include "intel_drv.h"
#include "intel_sideband.h"
/**
@@ -345,10 +345,8 @@ static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
enum dpio_phy phy)
{
- if (intel_wait_for_register(&dev_priv->uncore,
- BXT_PORT_REF_DW3(phy),
- GRC_DONE, GRC_DONE,
- 10))
+ if (intel_de_wait_for_set(dev_priv, BXT_PORT_REF_DW3(phy),
+ GRC_DONE, 10))
DRM_ERROR("timeout waiting for PHY%d GRC\n", phy);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index f9bdf8514a53..b8148f838354 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -21,9 +21,9 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include "intel_display_types.h"
#include "intel_dpio_phy.h"
#include "intel_dpll_mgr.h"
-#include "intel_drv.h"
/**
* DOC: Display PLLs
@@ -1000,11 +1000,7 @@ static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
I915_WRITE(regs[id].ctl,
I915_READ(regs[id].ctl) | LCPLL_PLL_ENABLE);
- if (intel_wait_for_register(&dev_priv->uncore,
- DPLL_STATUS,
- DPLL_LOCK(id),
- DPLL_LOCK(id),
- 5))
+ if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
DRM_ERROR("DPLL %d not locked\n", id);
}
@@ -2016,11 +2012,8 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
I915_WRITE(CNL_DPLL_ENABLE(id), val);
/* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
- if (intel_wait_for_register(&dev_priv->uncore,
- CNL_DPLL_ENABLE(id),
- PLL_POWER_STATE,
- PLL_POWER_STATE,
- 5))
+ if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id),
+ PLL_POWER_STATE, 5))
DRM_ERROR("PLL %d Power not enabled\n", id);
/*
@@ -2057,11 +2050,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
I915_WRITE(CNL_DPLL_ENABLE(id), val);
/* 7. Wait for PLL lock status in DPLL_ENABLE. */
- if (intel_wait_for_register(&dev_priv->uncore,
- CNL_DPLL_ENABLE(id),
- PLL_LOCK,
- PLL_LOCK,
- 5))
+ if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
DRM_ERROR("PLL %d not locked\n", id);
/*
@@ -2105,11 +2094,7 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
I915_WRITE(CNL_DPLL_ENABLE(id), val);
/* 4. Wait for PLL not locked status in DPLL_ENABLE. */
- if (intel_wait_for_register(&dev_priv->uncore,
- CNL_DPLL_ENABLE(id),
- PLL_LOCK,
- 0,
- 5))
+ if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
DRM_ERROR("PLL %d locked\n", id);
/*
@@ -2127,11 +2112,8 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
I915_WRITE(CNL_DPLL_ENABLE(id), val);
/* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
- if (intel_wait_for_register(&dev_priv->uncore,
- CNL_DPLL_ENABLE(id),
- PLL_POWER_STATE,
- 0,
- 5))
+ if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id),
+ PLL_POWER_STATE, 5))
DRM_ERROR("PLL %d Power not disabled\n", id);
}
@@ -3252,8 +3234,7 @@ static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
* The spec says we need to "wait" but it also says it should be
* immediate.
*/
- if (intel_wait_for_register(&dev_priv->uncore, enable_reg,
- PLL_POWER_STATE, PLL_POWER_STATE, 1))
+ if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
DRM_ERROR("PLL %d Power not enabled\n", pll->info->id);
}
@@ -3268,8 +3249,7 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv,
I915_WRITE(enable_reg, val);
/* Timeout is actually 600us. */
- if (intel_wait_for_register(&dev_priv->uncore, enable_reg,
- PLL_LOCK, PLL_LOCK, 1))
+ if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
DRM_ERROR("PLL %d not locked\n", pll->info->id);
}
@@ -3364,8 +3344,7 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
I915_WRITE(enable_reg, val);
/* Timeout is actually 1us. */
- if (intel_wait_for_register(&dev_priv->uncore,
- enable_reg, PLL_LOCK, 0, 1))
+ if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
DRM_ERROR("PLL %d locked\n", pll->info->id);
/* DVFS post sequence would be here. See the comment above. */
@@ -3378,8 +3357,7 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
* The spec says we need to "wait" but it also says it should be
* immediate.
*/
- if (intel_wait_for_register(&dev_priv->uncore,
- enable_reg, PLL_POWER_STATE, 0, 1))
+ if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
DRM_ERROR("PLL %d Power not disabled\n", pll->info->id);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.h b/drivers/gpu/drm/i915/display/intel_dsi.h
index 1cd24bd46518..b15be5814599 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.h
+++ b/drivers/gpu/drm/i915/display/intel_dsi.h
@@ -26,7 +26,8 @@
#include <drm/drm_crtc.h>
#include <drm/drm_mipi_dsi.h>
-#include "intel_drv.h"
+
+#include "intel_display_types.h"
#define INTEL_DSI_VIDEO_MODE 0
#define INTEL_DSI_COMMAND_MODE 1
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
index 8c33262cb0b2..bb3fd8b786a2 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
@@ -27,7 +27,7 @@
#include <video/mipi_display.h>
#include "i915_drv.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_dsi.h"
#include "intel_dsi_dcs_backlight.h"
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index e5b178660408..f90946c912ee 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -38,7 +38,7 @@
#include <video/mipi_display.h>
#include "i915_drv.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_dsi.h"
#include "intel_sideband.h"
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index 22666d28f4aa..93baf366692e 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -34,7 +34,7 @@
#include "i915_drv.h"
#include "intel_connector.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_dvo.h"
#include "intel_dvo_dev.h"
#include "intel_gmbus.h"
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index d36cada2cc7d..16ed44bfd734 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -41,7 +41,7 @@
#include <drm/drm_fourcc.h>
#include "i915_drv.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
@@ -110,9 +110,8 @@ static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
I915_WRITE(FBC_CONTROL, fbc_ctl);
/* Wait for compressing bit to clear */
- if (intel_wait_for_register(&dev_priv->uncore,
- FBC_STATUS, FBC_STAT_COMPRESSING, 0,
- 10)) {
+ if (intel_de_wait_for_clear(dev_priv, FBC_STATUS,
+ FBC_STAT_COMPRESSING, 10)) {
DRM_DEBUG_KMS("FBC idle timed out\n");
return;
}
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 1edd44ee32b2..d59eee5c5d9c 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -43,17 +43,18 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_fbdev.h"
#include "intel_frontbuffer.h"
-static void intel_fbdev_invalidate(struct intel_fbdev *ifbdev)
+static struct intel_frontbuffer *to_frontbuffer(struct intel_fbdev *ifbdev)
{
- struct drm_i915_gem_object *obj = intel_fb_obj(&ifbdev->fb->base);
- unsigned int origin =
- ifbdev->vma_flags & PLANE_HAS_FENCE ? ORIGIN_GTT : ORIGIN_CPU;
+ return ifbdev->fb->frontbuffer;
+}
- intel_fb_obj_invalidate(obj, origin);
+static void intel_fbdev_invalidate(struct intel_fbdev *ifbdev)
+{
+ intel_frontbuffer_invalidate(to_frontbuffer(ifbdev), ORIGIN_CPU);
}
static int intel_fbdev_set_par(struct fb_info *info)
@@ -120,7 +121,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_mode_fb_cmd2 mode_cmd = {};
struct drm_i915_gem_object *obj;
- int size, ret;
+ int size;
/* we don't do packed 24bpp */
if (sizes->surface_bpp == 24)
@@ -147,24 +148,16 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
obj = i915_gem_object_create_shmem(dev_priv, size);
if (IS_ERR(obj)) {
DRM_ERROR("failed to allocate framebuffer\n");
- ret = PTR_ERR(obj);
- goto err;
+ return PTR_ERR(obj);
}
fb = intel_framebuffer_create(obj, &mode_cmd);
- if (IS_ERR(fb)) {
- ret = PTR_ERR(fb);
- goto err_obj;
- }
+ i915_gem_object_put(obj);
+ if (IS_ERR(fb))
+ return PTR_ERR(fb);
ifbdev->fb = to_intel_framebuffer(fb);
-
return 0;
-
-err_obj:
- i915_gem_object_put(obj);
-err:
- return ret;
}
static int intelfb_create(struct drm_fb_helper *helper,
@@ -180,7 +173,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
const struct i915_ggtt_view view = {
.type = I915_GGTT_VIEW_NORMAL,
};
- struct drm_framebuffer *fb;
intel_wakeref_t wakeref;
struct fb_info *info;
struct i915_vma *vma;
@@ -226,8 +218,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
goto out_unlock;
}
- fb = &ifbdev->fb->base;
- intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_DIRTYFB);
+ intel_frontbuffer_flush(to_frontbuffer(ifbdev), ORIGIN_DIRTYFB);
info = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(info)) {
@@ -236,17 +227,14 @@ static int intelfb_create(struct drm_fb_helper *helper,
goto out_unpin;
}
- ifbdev->helper.fb = fb;
+ ifbdev->helper.fb = &ifbdev->fb->base;
info->fbops = &intelfb_ops;
/* setup aperture base/size for vesafb takeover */
- info->apertures->ranges[0].base = dev->mode_config.fb_base;
+ info->apertures->ranges[0].base = ggtt->gmadr.start;
info->apertures->ranges[0].size = ggtt->mappable_end;
- info->fix.smem_start = dev->mode_config.fb_base + i915_ggtt_offset(vma);
- info->fix.smem_len = vma->node.size;
-
vaddr = i915_vma_pin_iomap(vma);
if (IS_ERR(vaddr)) {
DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
@@ -256,19 +244,24 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->screen_base = vaddr;
info->screen_size = vma->node.size;
+ /* Our framebuffer is the entirety of fbdev's system memory */
+ info->fix.smem_start = (unsigned long)info->screen_base;
+ info->fix.smem_len = info->screen_size;
+
drm_fb_helper_fill_info(info, &ifbdev->helper, sizes);
/* If the object is shmemfs backed, it will have given us zeroed pages.
* If the object is stolen however, it will be full of whatever
* garbage was left in there.
*/
- if (intel_fb_obj(fb)->stolen && !prealloc)
+ if (vma->obj->stolen && !prealloc)
memset_io(info->screen_base, 0, info->screen_size);
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x\n",
- fb->width, fb->height, i915_ggtt_offset(vma));
+ ifbdev->fb->base.width, ifbdev->fb->base.height,
+ i915_ggtt_offset(vma));
ifbdev->vma = vma;
ifbdev->vma_flags = flags;
diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
index 8545ad32bb50..ab61f88d1d33 100644
--- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
@@ -26,7 +26,8 @@
*/
#include "i915_drv.h"
-#include "intel_drv.h"
+#include "i915_trace.h"
+#include "intel_display_types.h"
#include "intel_fbc.h"
#include "intel_fifo_underrun.h"
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index 44273c10cea5..719379774fa5 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -30,11 +30,11 @@
* Many features require us to track changes to the currently active
* frontbuffer, especially rendering targeted at the frontbuffer.
*
- * To be able to do so GEM tracks frontbuffers using a bitmask for all possible
- * frontbuffer slots through i915_gem_track_fb(). The function in this file are
- * then called when the contents of the frontbuffer are invalidated, when
- * frontbuffer rendering has stopped again to flush out all the changes and when
- * the frontbuffer is exchanged with a flip. Subsystems interested in
+ * To be able to do so we track frontbuffers using a bitmask for all possible
+ * frontbuffer slots through intel_frontbuffer_track(). The functions in this
+ * file are then called when the contents of the frontbuffer are invalidated,
+ * when frontbuffer rendering has stopped again to flush out all the changes
+ * and when the frontbuffer is exchanged with a flip. Subsystems interested in
* frontbuffer changes (e.g. PSR, FBC, DRRS) should directly put their callbacks
* into the relevant places and filter for the frontbuffer slots that they are
* interested int.
@@ -58,33 +58,14 @@
#include "display/intel_dp.h"
#include "i915_drv.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
#include "intel_psr.h"
-void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
- enum fb_op_origin origin,
- unsigned int frontbuffer_bits)
-{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-
- if (origin == ORIGIN_CS) {
- spin_lock(&dev_priv->fb_tracking.lock);
- dev_priv->fb_tracking.busy_bits |= frontbuffer_bits;
- dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
- spin_unlock(&dev_priv->fb_tracking.lock);
- }
-
- might_sleep();
- intel_psr_invalidate(dev_priv, frontbuffer_bits, origin);
- intel_edp_drrs_invalidate(dev_priv, frontbuffer_bits);
- intel_fbc_invalidate(dev_priv, frontbuffer_bits, origin);
-}
-
/**
- * intel_frontbuffer_flush - flush frontbuffer
- * @dev_priv: i915 device
+ * frontbuffer_flush - flush frontbuffer
+ * @i915: i915 device
* @frontbuffer_bits: frontbuffer plane tracking bits
* @origin: which operation caused the flush
*
@@ -94,45 +75,27 @@ void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
*
* Can be called without any locks held.
*/
-static void intel_frontbuffer_flush(struct drm_i915_private *dev_priv,
- unsigned frontbuffer_bits,
- enum fb_op_origin origin)
+static void frontbuffer_flush(struct drm_i915_private *i915,
+ unsigned int frontbuffer_bits,
+ enum fb_op_origin origin)
{
/* Delay flushing when rings are still busy.*/
- spin_lock(&dev_priv->fb_tracking.lock);
- frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
- spin_unlock(&dev_priv->fb_tracking.lock);
+ spin_lock(&i915->fb_tracking.lock);
+ frontbuffer_bits &= ~i915->fb_tracking.busy_bits;
+ spin_unlock(&i915->fb_tracking.lock);
if (!frontbuffer_bits)
return;
might_sleep();
- intel_edp_drrs_flush(dev_priv, frontbuffer_bits);
- intel_psr_flush(dev_priv, frontbuffer_bits, origin);
- intel_fbc_flush(dev_priv, frontbuffer_bits, origin);
-}
-
-void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
- enum fb_op_origin origin,
- unsigned int frontbuffer_bits)
-{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-
- if (origin == ORIGIN_CS) {
- spin_lock(&dev_priv->fb_tracking.lock);
- /* Filter out new bits since rendering started. */
- frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
- dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
- spin_unlock(&dev_priv->fb_tracking.lock);
- }
-
- if (frontbuffer_bits)
- intel_frontbuffer_flush(dev_priv, frontbuffer_bits, origin);
+ intel_edp_drrs_flush(i915, frontbuffer_bits);
+ intel_psr_flush(i915, frontbuffer_bits, origin);
+ intel_fbc_flush(i915, frontbuffer_bits, origin);
}
/**
* intel_frontbuffer_flip_prepare - prepare asynchronous frontbuffer flip
- * @dev_priv: i915 device
+ * @i915: i915 device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* This function gets called after scheduling a flip on @obj. The actual
@@ -142,19 +105,19 @@ void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
*
* Can be called without any locks held.
*/
-void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
+void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
unsigned frontbuffer_bits)
{
- spin_lock(&dev_priv->fb_tracking.lock);
- dev_priv->fb_tracking.flip_bits |= frontbuffer_bits;
+ spin_lock(&i915->fb_tracking.lock);
+ i915->fb_tracking.flip_bits |= frontbuffer_bits;
/* Remove stale busy bits due to the old buffer. */
- dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
- spin_unlock(&dev_priv->fb_tracking.lock);
+ i915->fb_tracking.busy_bits &= ~frontbuffer_bits;
+ spin_unlock(&i915->fb_tracking.lock);
}
/**
* intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flip
- * @dev_priv: i915 device
+ * @i915: i915 device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* This function gets called after the flip has been latched and will complete
@@ -162,23 +125,22 @@ void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
*
* Can be called without any locks held.
*/
-void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv,
+void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
unsigned frontbuffer_bits)
{
- spin_lock(&dev_priv->fb_tracking.lock);
+ spin_lock(&i915->fb_tracking.lock);
/* Mask any cancelled flips. */
- frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
- dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
- spin_unlock(&dev_priv->fb_tracking.lock);
+ frontbuffer_bits &= i915->fb_tracking.flip_bits;
+ i915->fb_tracking.flip_bits &= ~frontbuffer_bits;
+ spin_unlock(&i915->fb_tracking.lock);
if (frontbuffer_bits)
- intel_frontbuffer_flush(dev_priv,
- frontbuffer_bits, ORIGIN_FLIP);
+ frontbuffer_flush(i915, frontbuffer_bits, ORIGIN_FLIP);
}
/**
* intel_frontbuffer_flip - synchronous frontbuffer flip
- * @dev_priv: i915 device
+ * @i915: i915 device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* This function gets called after scheduling a flip on @obj. This is for
@@ -187,13 +149,160 @@ void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv,
*
* Can be called without any locks held.
*/
-void intel_frontbuffer_flip(struct drm_i915_private *dev_priv,
+void intel_frontbuffer_flip(struct drm_i915_private *i915,
unsigned frontbuffer_bits)
{
- spin_lock(&dev_priv->fb_tracking.lock);
+ spin_lock(&i915->fb_tracking.lock);
/* Remove stale busy bits due to the old buffer. */
- dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
- spin_unlock(&dev_priv->fb_tracking.lock);
+ i915->fb_tracking.busy_bits &= ~frontbuffer_bits;
+ spin_unlock(&i915->fb_tracking.lock);
- intel_frontbuffer_flush(dev_priv, frontbuffer_bits, ORIGIN_FLIP);
+ frontbuffer_flush(i915, frontbuffer_bits, ORIGIN_FLIP);
+}
+
+void __intel_fb_invalidate(struct intel_frontbuffer *front,
+ enum fb_op_origin origin,
+ unsigned int frontbuffer_bits)
+{
+ struct drm_i915_private *i915 = to_i915(front->obj->base.dev);
+
+ if (origin == ORIGIN_CS) {
+ spin_lock(&i915->fb_tracking.lock);
+ i915->fb_tracking.busy_bits |= frontbuffer_bits;
+ i915->fb_tracking.flip_bits &= ~frontbuffer_bits;
+ spin_unlock(&i915->fb_tracking.lock);
+ }
+
+ might_sleep();
+ intel_psr_invalidate(i915, frontbuffer_bits, origin);
+ intel_edp_drrs_invalidate(i915, frontbuffer_bits);
+ intel_fbc_invalidate(i915, frontbuffer_bits, origin);
+}
+
+void __intel_fb_flush(struct intel_frontbuffer *front,
+ enum fb_op_origin origin,
+ unsigned int frontbuffer_bits)
+{
+ struct drm_i915_private *i915 = to_i915(front->obj->base.dev);
+
+ if (origin == ORIGIN_CS) {
+ spin_lock(&i915->fb_tracking.lock);
+ /* Filter out new bits since rendering started. */
+ frontbuffer_bits &= i915->fb_tracking.busy_bits;
+ i915->fb_tracking.busy_bits &= ~frontbuffer_bits;
+ spin_unlock(&i915->fb_tracking.lock);
+ }
+
+ if (frontbuffer_bits)
+ frontbuffer_flush(i915, frontbuffer_bits, origin);
+}
+
+static int frontbuffer_active(struct i915_active *ref)
+{
+ struct intel_frontbuffer *front =
+ container_of(ref, typeof(*front), write);
+
+ kref_get(&front->ref);
+ return 0;
+}
+
+static void frontbuffer_retire(struct i915_active *ref)
+{
+ struct intel_frontbuffer *front =
+ container_of(ref, typeof(*front), write);
+
+ intel_frontbuffer_flush(front, ORIGIN_CS);
+ intel_frontbuffer_put(front);
+}
+
+static void frontbuffer_release(struct kref *ref)
+ __releases(&to_i915(front->obj->base.dev)->fb_tracking.lock)
+{
+ struct intel_frontbuffer *front =
+ container_of(ref, typeof(*front), ref);
+
+ front->obj->frontbuffer = NULL;
+ spin_unlock(&to_i915(front->obj->base.dev)->fb_tracking.lock);
+
+ i915_gem_object_put(front->obj);
+ kfree(front);
+}
+
+struct intel_frontbuffer *
+intel_frontbuffer_get(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct intel_frontbuffer *front;
+
+ spin_lock(&i915->fb_tracking.lock);
+ front = obj->frontbuffer;
+ if (front)
+ kref_get(&front->ref);
+ spin_unlock(&i915->fb_tracking.lock);
+ if (front)
+ return front;
+
+ front = kmalloc(sizeof(*front), GFP_KERNEL);
+ if (!front)
+ return NULL;
+
+ front->obj = obj;
+ kref_init(&front->ref);
+ atomic_set(&front->bits, 0);
+ i915_active_init(i915, &front->write,
+ frontbuffer_active, frontbuffer_retire);
+
+ spin_lock(&i915->fb_tracking.lock);
+ if (obj->frontbuffer) {
+ kfree(front);
+ front = obj->frontbuffer;
+ kref_get(&front->ref);
+ } else {
+ i915_gem_object_get(obj);
+ obj->frontbuffer = front;
+ }
+ spin_unlock(&i915->fb_tracking.lock);
+
+ return front;
+}
+
+void intel_frontbuffer_put(struct intel_frontbuffer *front)
+{
+ kref_put_lock(&front->ref,
+ frontbuffer_release,
+ &to_i915(front->obj->base.dev)->fb_tracking.lock);
+}
+
+/**
+ * intel_frontbuffer_track - update frontbuffer tracking
+ * @old: current buffer for the frontbuffer slots
+ * @new: new buffer for the frontbuffer slots
+ * @frontbuffer_bits: bitmask of frontbuffer slots
+ *
+ * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
+ * from @old and setting them in @new. Both @old and @new can be NULL.
+ */
+void intel_frontbuffer_track(struct intel_frontbuffer *old,
+ struct intel_frontbuffer *new,
+ unsigned int frontbuffer_bits)
+{
+ /*
+ * Control of individual bits within the mask are guarded by
+ * the owning plane->mutex, i.e. we can never see concurrent
+ * manipulation of individual bits. But since the bitfield as a whole
+ * is updated using RMW, we need to use atomics in order to update
+ * the bits.
+ */
+ BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
+ BITS_PER_TYPE(atomic_t));
+
+ if (old) {
+ WARN_ON(!(atomic_read(&old->bits) & frontbuffer_bits));
+ atomic_andnot(frontbuffer_bits, &old->bits);
+ }
+
+ if (new) {
+ WARN_ON(atomic_read(&new->bits) & frontbuffer_bits);
+ atomic_or(frontbuffer_bits, &new->bits);
+ }
}
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.h b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
index 5727320c8084..adc64d61a4a5 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.h
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
@@ -24,7 +24,10 @@
#ifndef __INTEL_FRONTBUFFER_H__
#define __INTEL_FRONTBUFFER_H__
-#include "gem/i915_gem_object.h"
+#include <linux/atomic.h>
+#include <linux/kref.h>
+
+#include "i915_active.h"
struct drm_i915_private;
struct drm_i915_gem_object;
@@ -37,23 +40,30 @@ enum fb_op_origin {
ORIGIN_DIRTYFB,
};
-void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
+struct intel_frontbuffer {
+ struct kref ref;
+ atomic_t bits;
+ struct i915_active write;
+ struct drm_i915_gem_object *obj;
+};
+
+void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
unsigned frontbuffer_bits);
-void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv,
+void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
unsigned frontbuffer_bits);
-void intel_frontbuffer_flip(struct drm_i915_private *dev_priv,
+void intel_frontbuffer_flip(struct drm_i915_private *i915,
unsigned frontbuffer_bits);
-void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
- enum fb_op_origin origin,
- unsigned int frontbuffer_bits);
-void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
- enum fb_op_origin origin,
- unsigned int frontbuffer_bits);
+struct intel_frontbuffer *
+intel_frontbuffer_get(struct drm_i915_gem_object *obj);
+
+void __intel_fb_invalidate(struct intel_frontbuffer *front,
+ enum fb_op_origin origin,
+ unsigned int frontbuffer_bits);
/**
- * intel_fb_obj_invalidate - invalidate frontbuffer object
- * @obj: GEM object to invalidate
+ * intel_frontbuffer_invalidate - invalidate frontbuffer object
+ * @front: GEM object to invalidate
* @origin: which operation caused the invalidation
*
* This function gets called every time rendering on the given object starts and
@@ -62,37 +72,53 @@ void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
* until the rendering completes or a flip on this frontbuffer plane is
* scheduled.
*/
-static inline bool intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
- enum fb_op_origin origin)
+static inline bool intel_frontbuffer_invalidate(struct intel_frontbuffer *front,
+ enum fb_op_origin origin)
{
unsigned int frontbuffer_bits;
- frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
+ if (!front)
+ return false;
+
+ frontbuffer_bits = atomic_read(&front->bits);
if (!frontbuffer_bits)
return false;
- __intel_fb_obj_invalidate(obj, origin, frontbuffer_bits);
+ __intel_fb_invalidate(front, origin, frontbuffer_bits);
return true;
}
+void __intel_fb_flush(struct intel_frontbuffer *front,
+ enum fb_op_origin origin,
+ unsigned int frontbuffer_bits);
+
/**
- * intel_fb_obj_flush - flush frontbuffer object
- * @obj: GEM object to flush
+ * intel_frontbuffer_flush - flush frontbuffer object
+ * @front: GEM object to flush
* @origin: which operation caused the flush
*
* This function gets called every time rendering on the given object has
* completed and frontbuffer caching can be started again.
*/
-static inline void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
- enum fb_op_origin origin)
+static inline void intel_frontbuffer_flush(struct intel_frontbuffer *front,
+ enum fb_op_origin origin)
{
unsigned int frontbuffer_bits;
- frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
+ if (!front)
+ return;
+
+ frontbuffer_bits = atomic_read(&front->bits);
if (!frontbuffer_bits)
return;
- __intel_fb_obj_flush(obj, origin, frontbuffer_bits);
+ __intel_fb_flush(front, origin, frontbuffer_bits);
}
+void intel_frontbuffer_track(struct intel_frontbuffer *old,
+ struct intel_frontbuffer *new,
+ unsigned int frontbuffer_bits);
+
+void intel_frontbuffer_put(struct intel_frontbuffer *front);
+
#endif /* __INTEL_FRONTBUFFER_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index b42c79aea61a..d6775a005726 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -35,7 +35,7 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_gmbus.h"
struct gmbus_pin {
@@ -82,21 +82,6 @@ static const struct gmbus_pin gmbus_pins_cnp[] = {
static const struct gmbus_pin gmbus_pins_icp[] = {
[GMBUS_PIN_1_BXT] = { "dpa", GPIOB },
[GMBUS_PIN_2_BXT] = { "dpb", GPIOC },
- [GMBUS_PIN_9_TC1_ICP] = { "tc1", GPIOJ },
- [GMBUS_PIN_10_TC2_ICP] = { "tc2", GPIOK },
- [GMBUS_PIN_11_TC3_ICP] = { "tc3", GPIOL },
- [GMBUS_PIN_12_TC4_ICP] = { "tc4", GPIOM },
-};
-
-static const struct gmbus_pin gmbus_pins_mcc[] = {
- [GMBUS_PIN_1_BXT] = { "dpa", GPIOB },
- [GMBUS_PIN_2_BXT] = { "dpb", GPIOC },
- [GMBUS_PIN_9_TC1_ICP] = { "dpc", GPIOJ },
-};
-
-static const struct gmbus_pin gmbus_pins_tgp[] = {
- [GMBUS_PIN_1_BXT] = { "dpa", GPIOB },
- [GMBUS_PIN_2_BXT] = { "dpb", GPIOC },
[GMBUS_PIN_3_BXT] = { "dpc", GPIOD },
[GMBUS_PIN_9_TC1_ICP] = { "tc1", GPIOJ },
[GMBUS_PIN_10_TC2_ICP] = { "tc2", GPIOK },
@@ -110,11 +95,7 @@ static const struct gmbus_pin gmbus_pins_tgp[] = {
static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *dev_priv,
unsigned int pin)
{
- if (HAS_PCH_TGP(dev_priv))
- return &gmbus_pins_tgp[pin];
- else if (HAS_PCH_MCC(dev_priv))
- return &gmbus_pins_mcc[pin];
- else if (HAS_PCH_ICP(dev_priv))
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
return &gmbus_pins_icp[pin];
else if (HAS_PCH_CNP(dev_priv))
return &gmbus_pins_cnp[pin];
@@ -133,11 +114,7 @@ bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
{
unsigned int size;
- if (HAS_PCH_TGP(dev_priv))
- size = ARRAY_SIZE(gmbus_pins_tgp);
- else if (HAS_PCH_MCC(dev_priv))
- size = ARRAY_SIZE(gmbus_pins_mcc);
- else if (HAS_PCH_ICP(dev_priv))
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
size = ARRAY_SIZE(gmbus_pins_icp);
else if (HAS_PCH_CNP(dev_priv))
size = ARRAY_SIZE(gmbus_pins_cnp);
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.h b/drivers/gpu/drm/i915/display/intel_gmbus.h
index d989085b8d22..b96212b85425 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.h
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.h
@@ -11,6 +11,28 @@
struct drm_i915_private;
struct i2c_adapter;
+#define GMBUS_PIN_DISABLED 0
+#define GMBUS_PIN_SSC 1
+#define GMBUS_PIN_VGADDC 2
+#define GMBUS_PIN_PANEL 3
+#define GMBUS_PIN_DPD_CHV 3 /* HDMID_CHV */
+#define GMBUS_PIN_DPC 4 /* HDMIC */
+#define GMBUS_PIN_DPB 5 /* SDVO, HDMIB */
+#define GMBUS_PIN_DPD 6 /* HDMID */
+#define GMBUS_PIN_RESERVED 7 /* 7 reserved */
+#define GMBUS_PIN_1_BXT 1 /* BXT+ (atom) and CNP+ (big core) */
+#define GMBUS_PIN_2_BXT 2
+#define GMBUS_PIN_3_BXT 3
+#define GMBUS_PIN_4_CNP 4
+#define GMBUS_PIN_9_TC1_ICP 9
+#define GMBUS_PIN_10_TC2_ICP 10
+#define GMBUS_PIN_11_TC3_ICP 11
+#define GMBUS_PIN_12_TC4_ICP 12
+#define GMBUS_PIN_13_TC5_TGP 13
+#define GMBUS_PIN_14_TC6_TGP 14
+
+#define GMBUS_NUM_PINS 15 /* including 0 */
+
int intel_gmbus_setup(struct drm_i915_private *dev_priv);
void intel_gmbus_teardown(struct drm_i915_private *dev_priv);
bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 845eb8f29b58..6ec5ceeab601 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -14,7 +14,8 @@
#include <drm/i915_component.h>
#include "i915_reg.h"
-#include "intel_drv.h"
+#include "intel_display_power.h"
+#include "intel_display_types.h"
#include "intel_hdcp.h"
#include "intel_sideband.h"
@@ -244,8 +245,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
{
I915_WRITE(HDCP_SHA_TEXT, sha_text);
- if (intel_wait_for_register(&dev_priv->uncore, HDCP_REP_CTL,
- HDCP_SHA1_READY, HDCP_SHA1_READY, 1)) {
+ if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
DRM_ERROR("Timed out waiting for SHA1 ready\n");
return -ETIMEDOUT;
}
@@ -475,9 +475,8 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
/* Tell the HW we're done with the hash and wait for it to ACK */
I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_COMPLETE_HASH);
- if (intel_wait_for_register(&dev_priv->uncore, HDCP_REP_CTL,
- HDCP_SHA1_COMPLETE,
- HDCP_SHA1_COMPLETE, 1)) {
+ if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL,
+ HDCP_SHA1_COMPLETE, 1)) {
DRM_ERROR("Timed out waiting for SHA1 complete\n");
return -ETIMEDOUT;
}
@@ -540,7 +539,8 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
if (drm_hdcp_check_ksvs_revoked(dev, ksv_fifo, num_downstream)) {
DRM_ERROR("Revoked Ksv(s) in ksv_fifo\n");
- return -EPERM;
+ ret = -EPERM;
+ goto err;
}
/*
@@ -619,9 +619,8 @@ static int intel_hdcp_auth(struct intel_connector *connector)
I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_CAPTURE_AN);
/* Wait for An to be acquired */
- if (intel_wait_for_register(&dev_priv->uncore, PORT_HDCP_STATUS(port),
- HDCP_STATUS_AN_READY,
- HDCP_STATUS_AN_READY, 1)) {
+ if (intel_de_wait_for_set(dev_priv, PORT_HDCP_STATUS(port),
+ HDCP_STATUS_AN_READY, 1)) {
DRM_ERROR("Timed out waiting for An\n");
return -ETIMEDOUT;
}
@@ -705,9 +704,9 @@ static int intel_hdcp_auth(struct intel_connector *connector)
}
/* Wait for encryption confirmation */
- if (intel_wait_for_register(&dev_priv->uncore, PORT_HDCP_STATUS(port),
- HDCP_STATUS_ENC, HDCP_STATUS_ENC,
- ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+ if (intel_de_wait_for_set(dev_priv, PORT_HDCP_STATUS(port),
+ HDCP_STATUS_ENC,
+ ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
DRM_ERROR("Timed out waiting for encryption\n");
return -ETIMEDOUT;
}
@@ -737,8 +736,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
hdcp->hdcp_encrypted = false;
I915_WRITE(PORT_HDCP_CONF(port), 0);
- if (intel_wait_for_register(&dev_priv->uncore,
- PORT_HDCP_STATUS(port), ~0, 0,
+ if (intel_de_wait_for_clear(dev_priv, PORT_HDCP_STATUS(port), ~0,
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
DRM_ERROR("Failed to disable HDCP, timeout clearing status\n");
return -ETIMEDOUT;
@@ -1515,10 +1513,9 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
CTL_LINK_ENCRYPTION_REQ);
}
- ret = intel_wait_for_register(&dev_priv->uncore, HDCP2_STATUS_DDI(port),
- LINK_ENCRYPTION_STATUS,
- LINK_ENCRYPTION_STATUS,
- ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
+ ret = intel_de_wait_for_set(dev_priv, HDCP2_STATUS_DDI(port),
+ LINK_ENCRYPTION_STATUS,
+ ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
return ret;
}
@@ -1536,8 +1533,8 @@ static int hdcp2_disable_encryption(struct intel_connector *connector)
I915_WRITE(HDCP2_CTL_DDI(port),
I915_READ(HDCP2_CTL_DDI(port)) & ~CTL_LINK_ENCRYPTION_REQ);
- ret = intel_wait_for_register(&dev_priv->uncore, HDCP2_STATUS_DDI(port),
- LINK_ENCRYPTION_STATUS, 0x0,
+ ret = intel_de_wait_for_clear(dev_priv, HDCP2_STATUS_DDI(port),
+ LINK_ENCRYPTION_STATUS,
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
if (ret == -ETIMEDOUT)
DRM_DEBUG_KMS("Disable Encryption Timedout");
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 9bf28de10401..e02f0faecf02 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -45,17 +45,17 @@
#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_ddi.h"
+#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dpio_phy.h"
-#include "intel_drv.h"
#include "intel_fifo_underrun.h"
#include "intel_gmbus.h"
#include "intel_hdcp.h"
#include "intel_hdmi.h"
#include "intel_hotplug.h"
#include "intel_lspcon.h"
-#include "intel_sdvo.h"
#include "intel_panel.h"
+#include "intel_sdvo.h"
#include "intel_sideband.h"
static struct drm_device *intel_hdmi_to_dev(struct intel_hdmi *intel_hdmi)
@@ -1514,29 +1514,28 @@ bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port)
return true;
}
-static struct hdcp2_hdmi_msg_data {
+struct hdcp2_hdmi_msg_data {
u8 msg_id;
u32 timeout;
u32 timeout2;
- } hdcp2_msg_data[] = {
- {HDCP_2_2_AKE_INIT, 0, 0},
- {HDCP_2_2_AKE_SEND_CERT, HDCP_2_2_CERT_TIMEOUT_MS, 0},
- {HDCP_2_2_AKE_NO_STORED_KM, 0, 0},
- {HDCP_2_2_AKE_STORED_KM, 0, 0},
- {HDCP_2_2_AKE_SEND_HPRIME, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
- HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS},
- {HDCP_2_2_AKE_SEND_PAIRING_INFO, HDCP_2_2_PAIRING_TIMEOUT_MS,
- 0},
- {HDCP_2_2_LC_INIT, 0, 0},
- {HDCP_2_2_LC_SEND_LPRIME, HDCP_2_2_HDMI_LPRIME_TIMEOUT_MS, 0},
- {HDCP_2_2_SKE_SEND_EKS, 0, 0},
- {HDCP_2_2_REP_SEND_RECVID_LIST,
- HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0},
- {HDCP_2_2_REP_SEND_ACK, 0, 0},
- {HDCP_2_2_REP_STREAM_MANAGE, 0, 0},
- {HDCP_2_2_REP_STREAM_READY, HDCP_2_2_STREAM_READY_TIMEOUT_MS,
- 0},
- };
+};
+
+static const struct hdcp2_hdmi_msg_data hdcp2_msg_data[] = {
+ { HDCP_2_2_AKE_INIT, 0, 0 },
+ { HDCP_2_2_AKE_SEND_CERT, HDCP_2_2_CERT_TIMEOUT_MS, 0 },
+ { HDCP_2_2_AKE_NO_STORED_KM, 0, 0 },
+ { HDCP_2_2_AKE_STORED_KM, 0, 0 },
+ { HDCP_2_2_AKE_SEND_HPRIME, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
+ HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS },
+ { HDCP_2_2_AKE_SEND_PAIRING_INFO, HDCP_2_2_PAIRING_TIMEOUT_MS, 0 },
+ { HDCP_2_2_LC_INIT, 0, 0 },
+ { HDCP_2_2_LC_SEND_LPRIME, HDCP_2_2_HDMI_LPRIME_TIMEOUT_MS, 0 },
+ { HDCP_2_2_SKE_SEND_EKS, 0, 0 },
+ { HDCP_2_2_REP_SEND_RECVID_LIST, HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0 },
+ { HDCP_2_2_REP_SEND_ACK, 0, 0 },
+ { HDCP_2_2_REP_STREAM_MANAGE, 0, 0 },
+ { HDCP_2_2_REP_STREAM_READY, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0 },
+};
static
int intel_hdmi_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index 342587d91d57..56be20f6f47e 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -26,7 +26,7 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_hotplug.h"
/**
@@ -104,6 +104,12 @@ enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
if (IS_CNL_WITH_PORT_F(dev_priv))
return HPD_PORT_E;
return HPD_PORT_F;
+ case PORT_G:
+ return HPD_PORT_G;
+ case PORT_H:
+ return HPD_PORT_H;
+ case PORT_I:
+ return HPD_PORT_I;
default:
MISSING_CASE(port);
return HPD_NONE;
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index 7028d0cf3bb1..f8f1308643a9 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -27,8 +27,8 @@
#include <drm/drm_dp_dual_mode_helper.h>
#include <drm/drm_edid.h>
+#include "intel_display_types.h"
#include "intel_dp.h"
-#include "intel_drv.h"
#include "intel_lspcon.h"
/* LSPCON OUI Vendor ID(signatures) */
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index efefed62a7f8..b7c459a8931c 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -42,7 +42,7 @@
#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_connector.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_gmbus.h"
#include "intel_lvds.h"
#include "intel_panel.h"
@@ -318,8 +318,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder,
I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) | PANEL_POWER_ON);
POSTING_READ(lvds_encoder->reg);
- if (intel_wait_for_register(&dev_priv->uncore,
- PP_STATUS(0), PP_ON, PP_ON, 5000))
+ if (intel_de_wait_for_set(dev_priv, PP_STATUS(0), PP_ON, 5000))
DRM_ERROR("timed out waiting for panel to power on\n");
intel_panel_enable_backlight(pipe_config, conn_state);
@@ -333,8 +332,7 @@ static void intel_disable_lvds(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) & ~PANEL_POWER_ON);
- if (intel_wait_for_register(&dev_priv->uncore,
- PP_STATUS(0), PP_ON, 0, 1000))
+ if (intel_de_wait_for_clear(dev_priv, PP_STATUS(0), PP_ON, 1000))
DRM_ERROR("timed out waiting for panel to power off\n");
I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN);
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index 824881271351..969ade623691 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -35,7 +35,7 @@
#include "display/intel_panel.h"
#include "i915_drv.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_opregion.h"
#define OPREGION_HEADER_OFFSET 0
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 07929726b780..29edfc343716 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -33,7 +33,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_frontbuffer.h"
#include "intel_overlay.h"
@@ -191,7 +191,8 @@ struct intel_overlay {
struct overlay_registers __iomem *regs;
u32 flip_addr;
/* flip handling */
- struct i915_active_request last_flip;
+ struct i915_active last_flip;
+ void (*flip_complete)(struct intel_overlay *ovl);
};
static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
@@ -217,30 +218,25 @@ static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
PCI_DEVFN(0, 0), I830_CLOCK_GATE, val);
}
-static void intel_overlay_submit_request(struct intel_overlay *overlay,
- struct i915_request *rq,
- i915_active_retire_fn retire)
+static struct i915_request *
+alloc_request(struct intel_overlay *overlay, void (*fn)(struct intel_overlay *))
{
- GEM_BUG_ON(i915_active_request_peek(&overlay->last_flip,
- &overlay->i915->drm.struct_mutex));
- i915_active_request_set_retire_fn(&overlay->last_flip, retire,
- &overlay->i915->drm.struct_mutex);
- __i915_active_request_set(&overlay->last_flip, rq);
- i915_request_add(rq);
-}
+ struct i915_request *rq;
+ int err;
-static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
- struct i915_request *rq,
- i915_active_retire_fn retire)
-{
- intel_overlay_submit_request(overlay, rq, retire);
- return i915_active_request_retire(&overlay->last_flip,
- &overlay->i915->drm.struct_mutex);
-}
+ overlay->flip_complete = fn;
-static struct i915_request *alloc_request(struct intel_overlay *overlay)
-{
- return i915_request_create(overlay->context);
+ rq = i915_request_create(overlay->context);
+ if (IS_ERR(rq))
+ return rq;
+
+ err = i915_active_ref(&overlay->last_flip, rq->timeline, rq);
+ if (err) {
+ i915_request_add(rq);
+ return ERR_PTR(err);
+ }
+
+ return rq;
}
/* overlay needs to be disable in OCMD reg */
@@ -252,7 +248,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
WARN_ON(overlay->active);
- rq = alloc_request(overlay);
+ rq = alloc_request(overlay, NULL);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -273,7 +269,9 @@ static int intel_overlay_on(struct intel_overlay *overlay)
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
- return intel_overlay_do_wait_request(overlay, rq, NULL);
+ i915_request_add(rq);
+
+ return i915_active_wait(&overlay->last_flip);
}
static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
@@ -283,9 +281,9 @@ static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
WARN_ON(overlay->old_vma);
- i915_gem_track_fb(overlay->vma ? overlay->vma->obj : NULL,
- vma ? vma->obj : NULL,
- INTEL_FRONTBUFFER_OVERLAY(pipe));
+ intel_frontbuffer_track(overlay->vma ? overlay->vma->obj->frontbuffer : NULL,
+ vma ? vma->obj->frontbuffer : NULL,
+ INTEL_FRONTBUFFER_OVERLAY(pipe));
intel_frontbuffer_flip_prepare(overlay->i915,
INTEL_FRONTBUFFER_OVERLAY(pipe));
@@ -317,7 +315,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
if (tmp & (1 << 17))
DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
- rq = alloc_request(overlay);
+ rq = alloc_request(overlay, NULL);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -332,8 +330,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
intel_ring_advance(rq, cs);
intel_overlay_flip_prepare(overlay, vma);
-
- intel_overlay_submit_request(overlay, rq, NULL);
+ i915_request_add(rq);
return 0;
}
@@ -354,20 +351,13 @@ static void intel_overlay_release_old_vma(struct intel_overlay *overlay)
}
static void
-intel_overlay_release_old_vid_tail(struct i915_active_request *active,
- struct i915_request *rq)
+intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
{
- struct intel_overlay *overlay =
- container_of(active, typeof(*overlay), last_flip);
-
intel_overlay_release_old_vma(overlay);
}
-static void intel_overlay_off_tail(struct i915_active_request *active,
- struct i915_request *rq)
+static void intel_overlay_off_tail(struct intel_overlay *overlay)
{
- struct intel_overlay *overlay =
- container_of(active, typeof(*overlay), last_flip);
struct drm_i915_private *dev_priv = overlay->i915;
intel_overlay_release_old_vma(overlay);
@@ -380,6 +370,16 @@ static void intel_overlay_off_tail(struct i915_active_request *active,
i830_overlay_clock_gating(dev_priv, true);
}
+static void
+intel_overlay_last_flip_retire(struct i915_active *active)
+{
+ struct intel_overlay *overlay =
+ container_of(active, typeof(*overlay), last_flip);
+
+ if (overlay->flip_complete)
+ overlay->flip_complete(overlay);
+}
+
/* overlay needs to be disabled in OCMD reg */
static int intel_overlay_off(struct intel_overlay *overlay)
{
@@ -394,7 +394,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
* of the hw. Do it in both cases */
flip_addr |= OFC_UPDATE;
- rq = alloc_request(overlay);
+ rq = alloc_request(overlay, intel_overlay_off_tail);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -417,17 +417,16 @@ static int intel_overlay_off(struct intel_overlay *overlay)
intel_ring_advance(rq, cs);
intel_overlay_flip_prepare(overlay, NULL);
+ i915_request_add(rq);
- return intel_overlay_do_wait_request(overlay, rq,
- intel_overlay_off_tail);
+ return i915_active_wait(&overlay->last_flip);
}
/* recover from an interruption due to a signal
* We have to be careful not to repeat work forever an make forward progess. */
static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
{
- return i915_active_request_retire(&overlay->last_flip,
- &overlay->i915->drm.struct_mutex);
+ return i915_active_wait(&overlay->last_flip);
}
/* Wait for pending overlay flip and release old frame.
@@ -437,43 +436,40 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
{
struct drm_i915_private *dev_priv = overlay->i915;
+ struct i915_request *rq;
u32 *cs;
- int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
- /* Only wait if there is actually an old frame to release to
+ /*
+ * Only wait if there is actually an old frame to release to
* guarantee forward progress.
*/
if (!overlay->old_vma)
return 0;
- if (I915_READ(GEN2_ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
- /* synchronous slowpath */
- struct i915_request *rq;
+ if (!(I915_READ(GEN2_ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT)) {
+ intel_overlay_release_old_vid_tail(overlay);
+ return 0;
+ }
- rq = alloc_request(overlay);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
+ rq = alloc_request(overlay, intel_overlay_release_old_vid_tail);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
- cs = intel_ring_begin(rq, 2);
- if (IS_ERR(cs)) {
- i915_request_add(rq);
- return PTR_ERR(cs);
- }
+ cs = intel_ring_begin(rq, 2);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
- *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
+ *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
- ret = intel_overlay_do_wait_request(overlay, rq,
- intel_overlay_release_old_vid_tail);
- if (ret)
- return ret;
- } else
- intel_overlay_release_old_vid_tail(&overlay->last_flip, NULL);
+ i915_request_add(rq);
- return 0;
+ return i915_active_wait(&overlay->last_flip);
}
void intel_overlay_reset(struct drm_i915_private *dev_priv)
@@ -772,11 +768,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
ret = PTR_ERR(vma);
goto out_pin_section;
}
- intel_fb_obj_flush(new_bo, ORIGIN_DIRTYFB);
-
- ret = i915_vma_put_fence(vma);
- if (ret)
- goto out_unpin;
+ intel_frontbuffer_flush(new_bo->frontbuffer, ORIGIN_DIRTYFB);
if (!overlay->active) {
u32 oconfig;
@@ -1375,7 +1367,9 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
overlay->contrast = 75;
overlay->saturation = 146;
- INIT_ACTIVE_REQUEST(&overlay->last_flip);
+ i915_active_init(dev_priv,
+ &overlay->last_flip,
+ NULL, intel_overlay_last_flip_retire);
ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv));
if (ret)
@@ -1409,6 +1403,7 @@ void intel_overlay_cleanup(struct drm_i915_private *dev_priv)
WARN_ON(overlay->active);
i915_gem_object_put(overlay->reg_bo);
+ i915_active_fini(&overlay->last_flip);
kfree(overlay);
}
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 39d742094065..bc14e9c0285a 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -35,8 +35,8 @@
#include <linux/pwm.h>
#include "intel_connector.h"
+#include "intel_display_types.h"
#include "intel_dp_aux_backlight.h"
-#include "intel_drv.h"
#include "intel_dsi_dcs_backlight.h"
#include "intel_panel.h"
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
index 9a48f7a01e7e..6260a2082719 100644
--- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
@@ -30,7 +30,7 @@
#include <linux/seq_file.h>
#include "intel_atomic.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_pipe_crc.h"
static const char * const pipe_crc_sources[] = {
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 69d908e6a050..3bfb720560c2 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -26,7 +26,7 @@
#include "display/intel_dp.h"
#include "i915_drv.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_psr.h"
#include "intel_sprite.h"
@@ -825,8 +825,8 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
}
/* Wait till PSR is idle */
- if (intel_wait_for_register(&dev_priv->uncore,
- psr_status, psr_status_mask, 0, 2000))
+ if (intel_de_wait_for_clear(dev_priv, psr_status,
+ psr_status_mask, 2000))
DRM_ERROR("Timed out waiting PSR idle state\n");
/* Disable PSR on Sink */
@@ -988,7 +988,7 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->psr.lock);
- err = intel_wait_for_register(&dev_priv->uncore, reg, mask, 0, 50);
+ err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
if (err)
DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
index 0b749c28541f..399b1542509f 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.c
+++ b/drivers/gpu/drm/i915/display/intel_quirks.c
@@ -5,7 +5,7 @@
#include <linux/dmi.h>
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_quirks.h"
/*
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index c5e2dfd7ef80..adeb1c840976 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -39,7 +39,7 @@
#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_connector.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_fifo_underrun.h"
#include "intel_gmbus.h"
#include "intel_hdmi.h"
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index 53c6594c4588..dea63be1964f 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -40,8 +40,9 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "i915_trace.h"
#include "intel_atomic_plane.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_frontbuffer.h"
#include "intel_pm.h"
#include "intel_psr.h"
@@ -330,6 +331,12 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
return 0;
}
+bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id)
+{
+ return INTEL_GEN(dev_priv) >= 11 &&
+ icl_hdr_plane_mask() & BIT(plane_id);
+}
+
static unsigned int
skl_plane_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.h b/drivers/gpu/drm/i915/display/intel_sprite.h
index 500f6bffb139..093a2d156f1e 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.h
+++ b/drivers/gpu/drm/i915/display/intel_sprite.h
@@ -8,7 +8,6 @@
#include <linux/types.h>
-#include "i915_drv.h"
#include "intel_display.h"
struct drm_device;
@@ -49,11 +48,6 @@ static inline u8 icl_hdr_plane_mask(void)
BIT(PLANE_SPRITE0) | BIT(PLANE_SPRITE1);
}
-static inline bool icl_is_hdr_plane(struct drm_i915_private *dev_priv,
- enum plane_id plane_id)
-{
- return INTEL_GEN(dev_priv) >= 11 &&
- icl_hdr_plane_mask() & BIT(plane_id);
-}
+bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id);
#endif /* __INTEL_SPRITE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index c96a81c2416c..85743a43bee2 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -5,6 +5,7 @@
#include "i915_drv.h"
#include "intel_display.h"
+#include "intel_display_types.h"
#include "intel_dp_mst.h"
#include "intel_tc.h"
@@ -503,6 +504,12 @@ void intel_tc_port_unlock(struct intel_digital_port *dig_port)
wakeref);
}
+bool intel_tc_port_ref_held(struct intel_digital_port *dig_port)
+{
+ return mutex_is_locked(&dig_port->tc_lock) ||
+ dig_port->tc_link_refcount;
+}
+
void intel_tc_port_get_link(struct intel_digital_port *dig_port,
int required_lanes)
{
diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h
index 22fe922ac9cf..783d75531435 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.h
+++ b/drivers/gpu/drm/i915/display/intel_tc.h
@@ -9,7 +9,7 @@
#include <linux/mutex.h>
#include <linux/types.h>
-#include "intel_drv.h"
+struct intel_digital_port;
bool intel_tc_port_connected(struct intel_digital_port *dig_port);
u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port);
@@ -23,12 +23,7 @@ void intel_tc_port_unlock(struct intel_digital_port *dig_port);
void intel_tc_port_get_link(struct intel_digital_port *dig_port,
int required_lanes);
void intel_tc_port_put_link(struct intel_digital_port *dig_port);
-
-static inline int intel_tc_port_ref_held(struct intel_digital_port *dig_port)
-{
- return mutex_is_locked(&dig_port->tc_lock) ||
- dig_port->tc_link_refcount;
-}
+bool intel_tc_port_ref_held(struct intel_digital_port *dig_port);
void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy);
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index 0a95df6c6a57..b70221f5112a 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -37,7 +37,7 @@
#include "i915_drv.h"
#include "intel_connector.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_hotplug.h"
#include "intel_tv.h"
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index 09cd37fb0b1c..dfcd156b5094 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -317,9 +317,6 @@ enum vbt_gmbus_ddi {
ICL_DDC_BUS_PORT_4,
TGL_DDC_BUS_PORT_5,
TGL_DDC_BUS_PORT_6,
- MCC_DDC_BUS_DDI_A = 0x1,
- MCC_DDC_BUS_DDI_B,
- MCC_DDC_BUS_DDI_C = 0x4,
};
#define DP_AUX_A 0x40
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 4ab19c432ef5..598ddb60f9fb 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -9,7 +9,7 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_vdsc.h"
enum ROW_INDEX_BPP {
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index c8002ffd29e7..a71b22bdd95b 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -34,7 +34,7 @@
#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_connector.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_dsi.h"
#include "intel_fifo_underrun.h"
#include "intel_panel.h"
@@ -84,9 +84,8 @@ void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY |
LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY;
- if (intel_wait_for_register(&dev_priv->uncore,
- MIPI_GEN_FIFO_STAT(port), mask, mask,
- 100))
+ if (intel_de_wait_for_set(dev_priv, MIPI_GEN_FIFO_STAT(port),
+ mask, 100))
DRM_ERROR("DPI FIFOs are not empty\n");
}
@@ -154,10 +153,8 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
/* note: this is never true for reads */
if (packet.payload_length) {
- if (intel_wait_for_register(&dev_priv->uncore,
- MIPI_GEN_FIFO_STAT(port),
- data_mask, 0,
- 50))
+ if (intel_de_wait_for_clear(dev_priv, MIPI_GEN_FIFO_STAT(port),
+ data_mask, 50))
DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n");
write_data(dev_priv, data_reg, packet.payload,
@@ -168,10 +165,8 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
I915_WRITE(MIPI_INTR_STAT(port), GEN_READ_DATA_AVAIL);
}
- if (intel_wait_for_register(&dev_priv->uncore,
- MIPI_GEN_FIFO_STAT(port),
- ctrl_mask, 0,
- 50)) {
+ if (intel_de_wait_for_clear(dev_priv, MIPI_GEN_FIFO_STAT(port),
+ ctrl_mask, 50)) {
DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n");
}
@@ -180,10 +175,8 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
/* ->rx_len is set only for reads */
if (msg->rx_len) {
data_mask = GEN_READ_DATA_AVAIL;
- if (intel_wait_for_register(&dev_priv->uncore,
- MIPI_INTR_STAT(port),
- data_mask, data_mask,
- 50))
+ if (intel_de_wait_for_set(dev_priv, MIPI_INTR_STAT(port),
+ data_mask, 50))
DRM_ERROR("Timeout waiting for read data.\n");
read_data(dev_priv, data_reg, msg->rx_buf, msg->rx_len);
@@ -240,9 +233,7 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
I915_WRITE(MIPI_DPI_CONTROL(port), cmd);
mask = SPL_PKT_SENT_INTERRUPT;
- if (intel_wait_for_register(&dev_priv->uncore,
- MIPI_INTR_STAT(port), mask, mask,
- 100))
+ if (intel_de_wait_for_set(dev_priv, MIPI_INTR_STAT(port), mask, 100))
DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd);
return 0;
@@ -359,11 +350,8 @@ static bool glk_dsi_enable_io(struct intel_encoder *encoder)
/* Wait for Pwr ACK */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_wait_for_register(&dev_priv->uncore,
- MIPI_CTRL(port),
- GLK_MIPIIO_PORT_POWERED,
- GLK_MIPIIO_PORT_POWERED,
- 20))
+ if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port),
+ GLK_MIPIIO_PORT_POWERED, 20))
DRM_ERROR("MIPIO port is powergated\n");
}
@@ -385,11 +373,8 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
/* Wait for MIPI PHY status bit to set */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_wait_for_register(&dev_priv->uncore,
- MIPI_CTRL(port),
- GLK_PHY_STATUS_PORT_READY,
- GLK_PHY_STATUS_PORT_READY,
- 20))
+ if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port),
+ GLK_PHY_STATUS_PORT_READY, 20))
DRM_ERROR("PHY is not ON\n");
}
@@ -413,11 +398,8 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
I915_WRITE(MIPI_DEVICE_READY(port), val);
/* Wait for ULPS active */
- if (intel_wait_for_register(&dev_priv->uncore,
- MIPI_CTRL(port),
- GLK_ULPS_NOT_ACTIVE,
- 0,
- 20))
+ if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
+ GLK_ULPS_NOT_ACTIVE, 20))
DRM_ERROR("ULPS not active\n");
/* Exit ULPS */
@@ -440,21 +422,15 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
/* Wait for Stop state */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_wait_for_register(&dev_priv->uncore,
- MIPI_CTRL(port),
- GLK_DATA_LANE_STOP_STATE,
- GLK_DATA_LANE_STOP_STATE,
- 20))
+ if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port),
+ GLK_DATA_LANE_STOP_STATE, 20))
DRM_ERROR("Date lane not in STOP state\n");
}
/* Wait for AFE LATCH */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_wait_for_register(&dev_priv->uncore,
- BXT_MIPI_PORT_CTRL(port),
- AFE_LATCHOUT,
- AFE_LATCHOUT,
- 20))
+ if (intel_de_wait_for_set(dev_priv, BXT_MIPI_PORT_CTRL(port),
+ AFE_LATCHOUT, 20))
DRM_ERROR("D-PHY not entering LP-11 state\n");
}
}
@@ -554,17 +530,15 @@ static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
/* Wait for MIPI PHY status bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_wait_for_register(&dev_priv->uncore,
- MIPI_CTRL(port),
- GLK_PHY_STATUS_PORT_READY, 0, 20))
+ if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
+ GLK_PHY_STATUS_PORT_READY, 20))
DRM_ERROR("PHY is not turning OFF\n");
}
/* Wait for Pwr ACK bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_wait_for_register(&dev_priv->uncore,
- MIPI_CTRL(port),
- GLK_MIPIIO_PORT_POWERED, 0, 20))
+ if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
+ GLK_MIPIIO_PORT_POWERED, 20))
DRM_ERROR("MIPI IO Port is not powergated\n");
}
}
@@ -583,9 +557,8 @@ static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder)
/* Wait for MIPI PHY status bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_wait_for_register(&dev_priv->uncore,
- MIPI_CTRL(port),
- GLK_PHY_STATUS_PORT_READY, 0, 20))
+ if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
+ GLK_PHY_STATUS_PORT_READY, 20))
DRM_ERROR("PHY is not turning OFF\n");
}
@@ -633,9 +606,8 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
* Port A only. MIPI Port C has no similar bit for checking.
*/
if ((IS_GEN9_LP(dev_priv) || port == PORT_A) &&
- intel_wait_for_register(&dev_priv->uncore,
- port_ctrl, AFE_LATCHOUT, 0,
- 30))
+ intel_de_wait_for_clear(dev_priv, port_ctrl,
+ AFE_LATCHOUT, 30))
DRM_ERROR("DSI LP not going Low\n");
/* Disable MIPI PHY transparent latch */
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index 99cc3e2e9c2c..95f39cd0ce02 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -28,7 +28,7 @@
#include <linux/kernel.h>
#include "i915_drv.h"
-#include "intel_drv.h"
+#include "intel_display_types.h"
#include "intel_dsi.h"
#include "intel_sideband.h"
@@ -246,11 +246,8 @@ void bxt_dsi_pll_disable(struct intel_encoder *encoder)
* PLL lock should deassert within 200us.
* Wait up to 1ms before timing out.
*/
- if (intel_wait_for_register(&dev_priv->uncore,
- BXT_DSI_PLL_ENABLE,
- BXT_DSI_PLL_LOCKED,
- 0,
- 1))
+ if (intel_de_wait_for_clear(dev_priv, BXT_DSI_PLL_ENABLE,
+ BXT_DSI_PLL_LOCKED, 1))
DRM_ERROR("Timeout waiting for PLL lock deassertion\n");
}
@@ -396,8 +393,8 @@ static void glk_dsi_program_esc_clock(struct drm_device *dev,
else
txesc2_div = 10;
- I915_WRITE(MIPIO_TXESC_CLK_DIV1, txesc1_div & GLK_TX_ESC_CLK_DIV1_MASK);
- I915_WRITE(MIPIO_TXESC_CLK_DIV2, txesc2_div & GLK_TX_ESC_CLK_DIV2_MASK);
+ I915_WRITE(MIPIO_TXESC_CLK_DIV1, (1 << (txesc1_div - 1)) & GLK_TX_ESC_CLK_DIV1_MASK);
+ I915_WRITE(MIPIO_TXESC_CLK_DIV2, (1 << (txesc2_div - 1)) & GLK_TX_ESC_CLK_DIV2_MASK);
}
/* Program BXT Mipi clocks and dividers */
@@ -530,11 +527,8 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder,
I915_WRITE(BXT_DSI_PLL_ENABLE, val);
/* Timeout and fail if PLL not locked */
- if (intel_wait_for_register(&dev_priv->uncore,
- BXT_DSI_PLL_ENABLE,
- BXT_DSI_PLL_LOCKED,
- BXT_DSI_PLL_LOCKED,
- 1)) {
+ if (intel_de_wait_for_set(dev_priv, BXT_DSI_PLL_ENABLE,
+ BXT_DSI_PLL_LOCKED, 1)) {
DRM_ERROR("Timed out waiting for DSI PLL to lock\n");
return;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
index 6ad93a09968c..3d4f5775a4ba 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
@@ -82,7 +82,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_busy *args = data;
struct drm_i915_gem_object *obj;
- struct reservation_object_list *list;
+ struct dma_resv_list *list;
unsigned int seq;
int err;
@@ -105,7 +105,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* Alternatively, we can trade that extra information on read/write
* activity with
* args->busy =
- * !reservation_object_test_signaled_rcu(obj->resv, true);
+ * !dma_resv_test_signaled_rcu(obj->resv, true);
* to report the overall busyness. This is what the wait-ioctl does.
*
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
index 5295285d5843..b9f504ba3b32 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
@@ -8,87 +8,67 @@
#include "i915_drv.h"
#include "i915_gem_clflush.h"
-
-static DEFINE_SPINLOCK(clflush_lock);
+#include "i915_sw_fence_work.h"
+#include "i915_trace.h"
struct clflush {
- struct dma_fence dma; /* Must be first for dma_fence_free() */
- struct i915_sw_fence wait;
- struct work_struct work;
+ struct dma_fence_work base;
struct drm_i915_gem_object *obj;
};
-static const char *i915_clflush_get_driver_name(struct dma_fence *fence)
-{
- return DRIVER_NAME;
-}
-
-static const char *i915_clflush_get_timeline_name(struct dma_fence *fence)
-{
- return "clflush";
-}
-
-static void i915_clflush_release(struct dma_fence *fence)
-{
- struct clflush *clflush = container_of(fence, typeof(*clflush), dma);
-
- i915_sw_fence_fini(&clflush->wait);
-
- BUILD_BUG_ON(offsetof(typeof(*clflush), dma));
- dma_fence_free(&clflush->dma);
-}
-
-static const struct dma_fence_ops i915_clflush_ops = {
- .get_driver_name = i915_clflush_get_driver_name,
- .get_timeline_name = i915_clflush_get_timeline_name,
- .release = i915_clflush_release,
-};
-
-static void __i915_do_clflush(struct drm_i915_gem_object *obj)
+static void __do_clflush(struct drm_i915_gem_object *obj)
{
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
drm_clflush_sg(obj->mm.pages);
- intel_fb_obj_flush(obj, ORIGIN_CPU);
+ intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
}
-static void i915_clflush_work(struct work_struct *work)
+static int clflush_work(struct dma_fence_work *base)
{
- struct clflush *clflush = container_of(work, typeof(*clflush), work);
- struct drm_i915_gem_object *obj = clflush->obj;
-
- if (i915_gem_object_pin_pages(obj)) {
- DRM_ERROR("Failed to acquire obj->pages for clflushing\n");
- goto out;
- }
+ struct clflush *clflush = container_of(base, typeof(*clflush), base);
+ struct drm_i915_gem_object *obj = fetch_and_zero(&clflush->obj);
+ int err;
- __i915_do_clflush(obj);
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto put;
+ __do_clflush(obj);
i915_gem_object_unpin_pages(obj);
-out:
+put:
i915_gem_object_put(obj);
+ return err;
+}
+
+static void clflush_release(struct dma_fence_work *base)
+{
+ struct clflush *clflush = container_of(base, typeof(*clflush), base);
- dma_fence_signal(&clflush->dma);
- dma_fence_put(&clflush->dma);
+ if (clflush->obj)
+ i915_gem_object_put(clflush->obj);
}
-static int __i915_sw_fence_call
-i915_clflush_notify(struct i915_sw_fence *fence,
- enum i915_sw_fence_notify state)
+static const struct dma_fence_work_ops clflush_ops = {
+ .name = "clflush",
+ .work = clflush_work,
+ .release = clflush_release,
+};
+
+static struct clflush *clflush_work_create(struct drm_i915_gem_object *obj)
{
- struct clflush *clflush = container_of(fence, typeof(*clflush), wait);
+ struct clflush *clflush;
- switch (state) {
- case FENCE_COMPLETE:
- schedule_work(&clflush->work);
- break;
+ GEM_BUG_ON(!obj->cache_dirty);
- case FENCE_FREE:
- dma_fence_put(&clflush->dma);
- break;
- }
+ clflush = kmalloc(sizeof(*clflush), GFP_KERNEL);
+ if (!clflush)
+ return NULL;
- return NOTIFY_DONE;
+ dma_fence_work_init(&clflush->base, &clflush_ops);
+ clflush->obj = i915_gem_object_get(obj); /* obj <-> clflush cycle */
+
+ return clflush;
}
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
@@ -126,33 +106,16 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
clflush = NULL;
if (!(flags & I915_CLFLUSH_SYNC))
- clflush = kmalloc(sizeof(*clflush), GFP_KERNEL);
+ clflush = clflush_work_create(obj);
if (clflush) {
- GEM_BUG_ON(!obj->cache_dirty);
-
- dma_fence_init(&clflush->dma,
- &i915_clflush_ops,
- &clflush_lock,
- to_i915(obj->base.dev)->mm.unordered_timeline,
- 0);
- i915_sw_fence_init(&clflush->wait, i915_clflush_notify);
-
- clflush->obj = i915_gem_object_get(obj);
- INIT_WORK(&clflush->work, i915_clflush_work);
-
- dma_fence_get(&clflush->dma);
-
- i915_sw_fence_await_reservation(&clflush->wait,
- obj->base.resv, NULL,
- true, I915_FENCE_TIMEOUT,
+ i915_sw_fence_await_reservation(&clflush->base.chain,
+ obj->base.resv, NULL, true,
+ I915_FENCE_TIMEOUT,
I915_FENCE_GFP);
-
- reservation_object_add_excl_fence(obj->base.resv,
- &clflush->dma);
-
- i915_sw_fence_commit(&clflush->wait);
+ dma_resv_add_excl_fence(obj->base.resv, &clflush->base.dma);
+ dma_fence_work_commit(&clflush->base);
} else if (obj->mm.pages) {
- __i915_do_clflush(obj);
+ __do_clflush(obj);
} else {
GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
index 2312a0c6af89..f99920652751 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
@@ -2,10 +2,13 @@
/*
* Copyright © 2019 Intel Corporation
*/
-#include "i915_gem_client_blt.h"
+#include "i915_drv.h"
+#include "gt/intel_context.h"
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_engine_pool.h"
+#include "i915_gem_client_blt.h"
#include "i915_gem_object_blt.h"
-#include "intel_drv.h"
struct i915_sleeve {
struct i915_vma *vma;
@@ -152,10 +155,11 @@ static void clear_pages_dma_fence_cb(struct dma_fence *fence,
static void clear_pages_worker(struct work_struct *work)
{
struct clear_pages_work *w = container_of(work, typeof(*w), work);
- struct drm_i915_private *i915 = w->ce->gem_context->i915;
+ struct drm_i915_private *i915 = w->ce->engine->i915;
struct drm_i915_gem_object *obj = w->sleeve->vma->obj;
struct i915_vma *vma = w->sleeve->vma;
struct i915_request *rq;
+ struct i915_vma *batch;
int err = w->dma.error;
if (unlikely(err))
@@ -175,10 +179,16 @@ static void clear_pages_worker(struct work_struct *work)
if (unlikely(err))
goto out_unlock;
- rq = i915_request_create(w->ce);
+ batch = intel_emit_vma_fill_blt(w->ce, vma, w->value);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto out_unpin;
+ }
+
+ rq = intel_context_create_request(w->ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto out_unpin;
+ goto out_batch;
}
/* There's no way the fence has signalled */
@@ -186,6 +196,10 @@ static void clear_pages_worker(struct work_struct *work)
clear_pages_dma_fence_cb))
GEM_BUG_ON(1);
+ err = intel_emit_vma_mark_active(batch, rq);
+ if (unlikely(err))
+ goto out_request;
+
if (w->ce->engine->emit_init_breadcrumb) {
err = w->ce->engine->emit_init_breadcrumb(rq);
if (unlikely(err))
@@ -197,11 +211,13 @@ static void clear_pages_worker(struct work_struct *work)
* keep track of the GPU activity within this vma/request, and
* propagate the signal from the request to w->dma.
*/
- err = i915_active_ref(&vma->active, rq->fence.context, rq);
+ err = i915_active_ref(&vma->active, rq->timeline, rq);
if (err)
goto out_request;
- err = intel_emit_vma_fill_blt(rq, vma, w->value);
+ err = w->ce->engine->emit_bb_start(rq,
+ batch->node.start, batch->node.size,
+ 0);
out_request:
if (unlikely(err)) {
i915_request_skip(rq, err);
@@ -209,6 +225,8 @@ out_request:
}
i915_request_add(rq);
+out_batch:
+ intel_emit_vma_release(w->ce, batch);
out_unpin:
i915_vma_unpin(vma);
out_unlock:
@@ -249,7 +267,6 @@ int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
struct i915_page_sizes *page_sizes,
u32 value)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct clear_pages_work *work;
struct i915_sleeve *sleeve;
int err;
@@ -272,11 +289,7 @@ int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
init_irq_work(&work->irq_work, clear_pages_signal_irq_worker);
- dma_fence_init(&work->dma,
- &clear_pages_work_ops,
- &fence_lock,
- i915->mm.unordered_timeline,
- 0);
+ dma_fence_init(&work->dma, &clear_pages_work_ops, &fence_lock, 0, 0);
i915_sw_fence_init(&work->wait, clear_pages_work_notify);
i915_gem_object_lock(obj);
@@ -287,7 +300,7 @@ int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
if (err < 0) {
dma_fence_set_error(&work->dma, err);
} else {
- reservation_object_add_excl_fence(obj->base.resv, &work->dma);
+ dma_resv_add_excl_fence(obj->base.resv, &work->dma);
err = 0;
}
i915_gem_object_unlock(obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index b28c7ca681a8..1cdfe05514c3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -70,6 +70,7 @@
#include <drm/i915_drm.h>
#include "gt/intel_lrc_reg.h"
+#include "gt/intel_engine_user.h"
#include "i915_gem_context.h"
#include "i915_globals.h"
@@ -158,7 +159,7 @@ lookup_user_engine(struct i915_gem_context *ctx,
if (!engine)
return ERR_PTR(-EINVAL);
- idx = engine->id;
+ idx = engine->legacy_idx;
} else {
idx = ci->engine_instance;
}
@@ -172,7 +173,9 @@ static inline int new_hw_id(struct drm_i915_private *i915, gfp_t gfp)
lockdep_assert_held(&i915->contexts.mutex);
- if (INTEL_GEN(i915) >= 11)
+ if (INTEL_GEN(i915) >= 12)
+ max = GEN12_MAX_CONTEXT_HW_ID;
+ else if (INTEL_GEN(i915) >= 11)
max = GEN11_MAX_CONTEXT_HW_ID;
else if (USES_GUC_SUBMISSION(i915))
/*
@@ -278,6 +281,7 @@ static void free_engines_rcu(struct rcu_head *rcu)
static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
{
+ const struct intel_gt *gt = &ctx->i915->gt;
struct intel_engine_cs *engine;
struct i915_gem_engines *e;
enum intel_engine_id id;
@@ -287,7 +291,7 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
return ERR_PTR(-ENOMEM);
init_rcu_head(&e->rcu);
- for_each_engine(engine, ctx->i915, id) {
+ for_each_engine(engine, gt, id) {
struct intel_context *ce;
ce = intel_context_create(ctx, engine);
@@ -297,8 +301,8 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
}
e->engines[id] = ce;
+ e->num_engines = id + 1;
}
- e->num_engines = id;
return e;
}
@@ -397,30 +401,6 @@ static void context_close(struct i915_gem_context *ctx)
i915_gem_context_put(ctx);
}
-static u32 default_desc_template(const struct drm_i915_private *i915,
- const struct i915_address_space *vm)
-{
- u32 address_mode;
- u32 desc;
-
- desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
-
- address_mode = INTEL_LEGACY_32B_CONTEXT;
- if (vm && i915_vm_is_4lvl(vm))
- address_mode = INTEL_LEGACY_64B_CONTEXT;
- desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
-
- if (IS_GEN(i915, 8))
- desc |= GEN8_CTX_L3LLC_COHERENT;
-
- /* TODO: WaDisableLiteRestore when we start using semaphore
- * signalling between Command Streamers
- * ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
- */
-
- return desc;
-}
-
static struct i915_gem_context *
__create_context(struct drm_i915_private *i915)
{
@@ -458,9 +438,6 @@ __create_context(struct drm_i915_private *i915)
i915_gem_context_set_bannable(ctx);
i915_gem_context_set_recoverable(ctx);
- ctx->ring_size = 4 * PAGE_SIZE;
- ctx->desc_template = default_desc_template(i915, NULL);
-
for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
@@ -471,21 +448,34 @@ err_free:
return ERR_PTR(err);
}
+static void
+context_apply_all(struct i915_gem_context *ctx,
+ void (*fn)(struct intel_context *ce, void *data),
+ void *data)
+{
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
+
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it)
+ fn(ce, data);
+ i915_gem_context_unlock_engines(ctx);
+}
+
+static void __apply_ppgtt(struct intel_context *ce, void *vm)
+{
+ i915_vm_put(ce->vm);
+ ce->vm = i915_vm_get(vm);
+}
+
static struct i915_address_space *
__set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
{
struct i915_address_space *old = ctx->vm;
- struct i915_gem_engines_iter it;
- struct intel_context *ce;
- ctx->vm = i915_vm_get(vm);
- ctx->desc_template = default_desc_template(ctx->i915, vm);
+ GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old));
- for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
- i915_vm_put(ce->vm);
- ce->vm = i915_vm_get(vm);
- }
- i915_gem_context_unlock_engines(ctx);
+ ctx->vm = i915_vm_get(vm);
+ context_apply_all(ctx, __apply_ppgtt, vm);
return old;
}
@@ -501,6 +491,29 @@ static void __assign_ppgtt(struct i915_gem_context *ctx,
i915_vm_put(vm);
}
+static void __set_timeline(struct intel_timeline **dst,
+ struct intel_timeline *src)
+{
+ struct intel_timeline *old = *dst;
+
+ *dst = src ? intel_timeline_get(src) : NULL;
+
+ if (old)
+ intel_timeline_put(old);
+}
+
+static void __apply_timeline(struct intel_context *ce, void *timeline)
+{
+ __set_timeline(&ce->timeline, timeline);
+}
+
+static void __assign_timeline(struct i915_gem_context *ctx,
+ struct intel_timeline *timeline)
+{
+ __set_timeline(&ctx->timeline, timeline);
+ context_apply_all(ctx, __apply_timeline, timeline);
+}
+
static struct i915_gem_context *
i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
{
@@ -543,7 +556,8 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
return ERR_CAST(timeline);
}
- ctx->timeline = timeline;
+ __assign_timeline(ctx, timeline);
+ intel_timeline_put(timeline);
}
trace_i915_context_create(ctx);
@@ -551,53 +565,6 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
return ctx;
}
-/**
- * i915_gem_context_create_gvt - create a GVT GEM context
- * @dev: drm device *
- *
- * This function is used to create a GVT specific GEM context.
- *
- * Returns:
- * pointer to i915_gem_context on success, error pointer if failed
- *
- */
-struct i915_gem_context *
-i915_gem_context_create_gvt(struct drm_device *dev)
-{
- struct i915_gem_context *ctx;
- int ret;
-
- if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
- return ERR_PTR(-ENODEV);
-
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ERR_PTR(ret);
-
- ctx = i915_gem_create_context(to_i915(dev), 0);
- if (IS_ERR(ctx))
- goto out;
-
- ret = i915_gem_context_pin_hw_id(ctx);
- if (ret) {
- context_close(ctx);
- ctx = ERR_PTR(ret);
- goto out;
- }
-
- ctx->file_priv = ERR_PTR(-EBADF);
- i915_gem_context_set_closed(ctx); /* not user accessible */
- i915_gem_context_clear_bannable(ctx);
- i915_gem_context_set_force_single_submission(ctx);
- if (!USES_GUC_SUBMISSION(to_i915(dev)))
- ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
-
- GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
-out:
- mutex_unlock(&dev->struct_mutex);
- return ctx;
-}
-
static void
destroy_kernel_context(struct i915_gem_context **ctxp)
{
@@ -629,7 +596,6 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
i915_gem_context_clear_bannable(ctx);
ctx->sched.priority = I915_USER_PRIORITY(prio);
- ctx->ring_size = PAGE_SIZE;
GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
@@ -944,7 +910,7 @@ static int context_barrier_task(struct i915_gem_context *ctx,
if (emit)
err = emit(rq, data);
if (err == 0)
- err = i915_active_ref(&cb->base, rq->fence.context, rq);
+ err = i915_active_ref(&cb->base, rq->timeline, rq);
i915_request_add(rq);
if (err)
@@ -1194,7 +1160,7 @@ __intel_context_reconfigure_sseu(struct intel_context *ce,
{
int ret;
- GEM_BUG_ON(INTEL_GEN(ce->gem_context->i915) < 8);
+ GEM_BUG_ON(INTEL_GEN(ce->engine->i915) < 8);
ret = intel_context_lock_pinned(ce);
if (ret)
@@ -1216,7 +1182,7 @@ unlock:
static int
intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
{
- struct drm_i915_private *i915 = ce->gem_context->i915;
+ struct drm_i915_private *i915 = ce->engine->i915;
int ret;
ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
@@ -1613,6 +1579,7 @@ set_engines(struct i915_gem_context *ctx,
for (n = 0; n < num_engines; n++) {
struct i915_engine_class_instance ci;
struct intel_engine_cs *engine;
+ struct intel_context *ce;
if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
__free_engines(set.engines, n);
@@ -1635,11 +1602,13 @@ set_engines(struct i915_gem_context *ctx,
return -ENOENT;
}
- set.engines->engines[n] = intel_context_create(ctx, engine);
- if (!set.engines->engines[n]) {
+ ce = intel_context_create(ctx, engine);
+ if (IS_ERR(ce)) {
__free_engines(set.engines, n);
- return -ENOMEM;
+ return PTR_ERR(ce);
}
+
+ set.engines->engines[n] = ce;
}
set.engines->num_engines = num_engines;
@@ -1753,7 +1722,7 @@ get_engines(struct i915_gem_context *ctx,
if (e->engines[n]) {
ci.engine_class = e->engines[n]->engine->uabi_class;
- ci.engine_instance = e->engines[n]->engine->instance;
+ ci.engine_instance = e->engines[n]->engine->uabi_instance;
}
if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) {
@@ -1988,13 +1957,8 @@ unlock:
static int clone_timeline(struct i915_gem_context *dst,
struct i915_gem_context *src)
{
- if (src->timeline) {
- GEM_BUG_ON(src->timeline == dst->timeline);
-
- if (dst->timeline)
- intel_timeline_put(dst->timeline);
- dst->timeline = intel_timeline_get(src->timeline);
- }
+ if (src->timeline)
+ __assign_timeline(dst, src->timeline);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index 106e2ccf7a4c..176978608b6f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -141,8 +141,6 @@ int i915_gem_context_open(struct drm_i915_private *i915,
void i915_gem_context_close(struct drm_file *file);
void i915_gem_context_release(struct kref *ctx_ref);
-struct i915_gem_context *
-i915_gem_context_create_gvt(struct drm_device *dev);
int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index 0ee61482ef94..260d59cc3de8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -169,11 +169,6 @@ struct i915_gem_context {
struct i915_sched_attr sched;
- /** ring_size: size for allocating the per-engine ring buffer */
- u32 ring_size;
- /** desc_template: invariant fields for the HW context descriptor */
- u32 desc_template;
-
/** guilty_count: How many times this context has caused a GPU hang. */
atomic_t guilty_count;
/**
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 570b20ad9e58..96ce95c8ac5a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -6,7 +6,7 @@
#include <linux/dma-buf.h>
#include <linux/highmem.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
#include "i915_drv.h"
#include "i915_gem_object.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index 2e3ce2a69653..9c58e8fac1d9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -221,6 +221,8 @@ restart:
* state and so involves less work.
*/
if (atomic_read(&obj->bind_count)) {
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
/* Before we change the PTE, the GPU must not be accessing it.
* If we wait upon the object, we know that all the bound
* VMA are no longer active.
@@ -232,18 +234,30 @@ restart:
if (ret)
return ret;
- if (!HAS_LLC(to_i915(obj->base.dev)) &&
- cache_level != I915_CACHE_NONE) {
- /* Access to snoopable pages through the GTT is
+ if (!HAS_LLC(i915) && cache_level != I915_CACHE_NONE) {
+ intel_wakeref_t wakeref =
+ intel_runtime_pm_get(&i915->runtime_pm);
+
+ /*
+ * Access to snoopable pages through the GTT is
* incoherent and on some machines causes a hard
* lockup. Relinquish the CPU mmaping to force
* userspace to refault in the pages and we can
* then double check if the GTT mapping is still
* valid for that pointer access.
*/
- i915_gem_object_release_mmap(obj);
+ ret = mutex_lock_interruptible(&i915->ggtt.vm.mutex);
+ if (ret) {
+ intel_runtime_pm_put(&i915->runtime_pm,
+ wakeref);
+ return ret;
+ }
+
+ if (obj->userfault_count)
+ __i915_gem_object_release_mmap(obj);
- /* As we no longer need a fence for GTT access,
+ /*
+ * As we no longer need a fence for GTT access,
* we can relinquish it now (and so prevent having
* to steal a fence from someone else on the next
* fence request). Note GPU activity would have
@@ -251,12 +265,17 @@ restart:
* supposed to be linear.
*/
for_each_ggtt_vma(vma, obj) {
- ret = i915_vma_put_fence(vma);
+ ret = i915_vma_revoke_fence(vma);
if (ret)
- return ret;
+ break;
}
+ mutex_unlock(&i915->ggtt.vm.mutex);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ if (ret)
+ return ret;
} else {
- /* We either have incoherent backing store and
+ /*
+ * We either have incoherent backing store and
* so no GTT access or the architecture is fully
* coherent. In such cases, existing GTT mmaps
* ignore the cache bit in the PTE and we can
@@ -551,13 +570,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
return 0;
}
-static inline enum fb_op_origin
-fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
-{
- return (domain == I915_GEM_DOMAIN_GTT ?
- obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
-}
-
/**
* Called when user space prepares to use an object with the CPU, either
* through the mmap ioctl's mapping or a GTT mapping.
@@ -661,9 +673,8 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
i915_gem_object_unlock(obj);
- if (write_domain != 0)
- intel_fb_obj_invalidate(obj,
- fb_write_origin(obj, write_domain));
+ if (write_domain)
+ intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
out_unpin:
i915_gem_object_unpin_pages(obj);
@@ -783,7 +794,7 @@ int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
}
out:
- intel_fb_obj_invalidate(obj, ORIGIN_CPU);
+ intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
obj->mm.dirty = true;
/* return with the pages pinned */
return 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index cbd7c6e3a1f8..f813fcb8ceb6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -5,7 +5,7 @@
*/
#include <linux/intel-iommu.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
#include <linux/sync_file.h>
#include <linux/uaccess.h>
@@ -16,14 +16,15 @@
#include "gem/i915_gem_ioctls.h"
#include "gt/intel_context.h"
+#include "gt/intel_engine_pool.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
-#include "i915_gem_ioctls.h"
+#include "i915_drv.h"
#include "i915_gem_clflush.h"
#include "i915_gem_context.h"
+#include "i915_gem_ioctls.h"
#include "i915_trace.h"
-#include "intel_drv.h"
enum {
FORCE_CPU_RELOC = 1,
@@ -734,63 +735,6 @@ static int eb_select_context(struct i915_execbuffer *eb)
return 0;
}
-static struct i915_request *__eb_wait_for_ring(struct intel_ring *ring)
-{
- struct i915_request *rq;
-
- /*
- * Completely unscientific finger-in-the-air estimates for suitable
- * maximum user request size (to avoid blocking) and then backoff.
- */
- if (intel_ring_update_space(ring) >= PAGE_SIZE)
- return NULL;
-
- /*
- * Find a request that after waiting upon, there will be at least half
- * the ring available. The hysteresis allows us to compete for the
- * shared ring and should mean that we sleep less often prior to
- * claiming our resources, but not so long that the ring completely
- * drains before we can submit our next request.
- */
- list_for_each_entry(rq, &ring->request_list, ring_link) {
- if (__intel_ring_space(rq->postfix,
- ring->emit, ring->size) > ring->size / 2)
- break;
- }
- if (&rq->ring_link == &ring->request_list)
- return NULL; /* weird, we will check again later for real */
-
- return i915_request_get(rq);
-}
-
-static int eb_wait_for_ring(const struct i915_execbuffer *eb)
-{
- struct i915_request *rq;
- int ret = 0;
-
- /*
- * Apply a light amount of backpressure to prevent excessive hogs
- * from blocking waiting for space whilst holding struct_mutex and
- * keeping all of their resources pinned.
- */
-
- rq = __eb_wait_for_ring(eb->context->ring);
- if (rq) {
- mutex_unlock(&eb->i915->drm.struct_mutex);
-
- if (i915_request_wait(rq,
- I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT) < 0)
- ret = -EINTR;
-
- i915_request_put(rq);
-
- mutex_lock(&eb->i915->drm.struct_mutex);
- }
-
- return ret;
-}
-
static int eb_lookup_vmas(struct i915_execbuffer *eb)
{
struct radix_tree_root *handles_vma = &eb->gem_context->handles_vma;
@@ -1014,11 +958,12 @@ static void reloc_cache_reset(struct reloc_cache *cache)
kunmap_atomic(vaddr);
i915_gem_object_finish_access((struct drm_i915_gem_object *)cache->node.mm);
} else {
- wmb();
+ struct i915_ggtt *ggtt = cache_to_ggtt(cache);
+
+ intel_gt_flush_ggtt_writes(ggtt->vm.gt);
io_mapping_unmap_atomic((void __iomem *)vaddr);
- if (cache->node.allocated) {
- struct i915_ggtt *ggtt = cache_to_ggtt(cache);
+ if (cache->node.allocated) {
ggtt->vm.clear_range(&ggtt->vm,
cache->node.start,
cache->node.size);
@@ -1073,11 +1018,15 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
void *vaddr;
if (cache->vaddr) {
+ intel_gt_flush_ggtt_writes(ggtt->vm.gt);
io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
} else {
struct i915_vma *vma;
int err;
+ if (i915_gem_object_is_tiled(obj))
+ return ERR_PTR(-EINVAL);
+
if (use_cpu_reloc(cache, obj))
return NULL;
@@ -1089,8 +1038,8 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
PIN_MAPPABLE |
- PIN_NONBLOCK |
- PIN_NONFAULT);
+ PIN_NONBLOCK /* NOWARN */ |
+ PIN_NOEVICT);
if (IS_ERR(vma)) {
memset(&cache->node, 0, sizeof(cache->node));
err = drm_mm_insert_node_in_range
@@ -1101,12 +1050,6 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
if (err) /* no inactive aperture space, use cpu reloc */
return NULL;
} else {
- err = i915_vma_put_fence(vma);
- if (err) {
- i915_vma_unpin(vma);
- return ERR_PTR(err);
- }
-
cache->node.start = vma->node.start;
cache->node.mm = (void *)vma;
}
@@ -1114,7 +1057,6 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
offset = cache->node.start;
if (cache->node.allocated) {
- wmb();
ggtt->vm.insert_page(&ggtt->vm,
i915_gem_object_get_dma_address(obj, page),
offset, I915_CACHE_NONE, 0);
@@ -1197,25 +1139,26 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
unsigned int len)
{
struct reloc_cache *cache = &eb->reloc_cache;
- struct drm_i915_gem_object *obj;
+ struct intel_engine_pool_node *pool;
struct i915_request *rq;
struct i915_vma *batch;
u32 *cmd;
int err;
- obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, PAGE_SIZE);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
+ pool = intel_engine_pool_get(&eb->engine->pool, PAGE_SIZE);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
- cmd = i915_gem_object_pin_map(obj,
+ cmd = i915_gem_object_pin_map(pool->obj,
cache->has_llc ?
I915_MAP_FORCE_WB :
I915_MAP_FORCE_WC);
- i915_gem_object_unpin_pages(obj);
- if (IS_ERR(cmd))
- return PTR_ERR(cmd);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto out_pool;
+ }
- batch = i915_vma_instance(obj, vma->vm, NULL);
+ batch = i915_vma_instance(pool->obj, vma->vm, NULL);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
goto err_unmap;
@@ -1231,6 +1174,10 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
goto err_unpin;
}
+ err = intel_engine_pool_mark_active(pool, rq);
+ if (err)
+ goto err_request;
+
err = reloc_move_to_gpu(rq, vma);
if (err)
goto err_request;
@@ -1242,8 +1189,9 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
goto skip_request;
i915_vma_lock(batch);
- GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true));
- err = i915_vma_move_to_active(batch, rq, 0);
+ err = i915_request_await_object(rq, batch->obj, false);
+ if (err == 0)
+ err = i915_vma_move_to_active(batch, rq, 0);
i915_vma_unlock(batch);
if (err)
goto skip_request;
@@ -1256,7 +1204,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
cache->rq_size = 0;
/* Return with batch mapping (cmd) still pinned */
- return 0;
+ goto out_pool;
skip_request:
i915_request_skip(rq, err);
@@ -1265,7 +1213,9 @@ err_request:
err_unpin:
i915_vma_unpin(batch);
err_unmap:
- i915_gem_object_unpin_map(obj);
+ i915_gem_object_unpin_map(pool->obj);
+out_pool:
+ intel_engine_pool_put(pool);
return err;
}
@@ -1313,7 +1263,7 @@ relocate_entry(struct i915_vma *vma,
if (!eb->reloc_cache.vaddr &&
(DBG_FORCE_RELOC == FORCE_GPU_RELOC ||
- !reservation_object_test_signaled_rcu(vma->resv, true))) {
+ !dma_resv_test_signaled_rcu(vma->resv, true))) {
const unsigned int gen = eb->reloc_cache.gen;
unsigned int len;
u32 *batch;
@@ -2009,18 +1959,17 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
{
- struct drm_i915_gem_object *shadow_batch_obj;
+ struct intel_engine_pool_node *pool;
struct i915_vma *vma;
int err;
- shadow_batch_obj = i915_gem_batch_pool_get(&eb->engine->batch_pool,
- PAGE_ALIGN(eb->batch_len));
- if (IS_ERR(shadow_batch_obj))
- return ERR_CAST(shadow_batch_obj);
+ pool = intel_engine_pool_get(&eb->engine->pool, eb->batch_len);
+ if (IS_ERR(pool))
+ return ERR_CAST(pool);
err = intel_engine_cmd_parser(eb->engine,
eb->batch->obj,
- shadow_batch_obj,
+ pool->obj,
eb->batch_start_offset,
eb->batch_len,
is_master);
@@ -2029,12 +1978,12 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
vma = NULL;
else
vma = ERR_PTR(err);
- goto out;
+ goto err;
}
- vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
+ vma = i915_gem_object_ggtt_pin(pool->obj, NULL, 0, 0, 0);
if (IS_ERR(vma))
- goto out;
+ goto err;
eb->vma[eb->buffer_count] = i915_vma_get(vma);
eb->flags[eb->buffer_count] =
@@ -2042,16 +1991,24 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
vma->exec_flags = &eb->flags[eb->buffer_count];
eb->buffer_count++;
-out:
- i915_gem_object_unpin_pages(shadow_batch_obj);
+ vma->private = pool;
+ return vma;
+
+err:
+ intel_engine_pool_put(pool);
return vma;
}
static void
add_to_client(struct i915_request *rq, struct drm_file *file)
{
- rq->file_priv = file->driver_priv;
- list_add_tail(&rq->client_link, &rq->file_priv->mm.request_list);
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+
+ rq->file_priv = file_priv;
+
+ spin_lock(&file_priv->mm.lock);
+ list_add_tail(&rq->client_link, &file_priv->mm.request_list);
+ spin_unlock(&file_priv->mm.lock);
}
static int eb_submit(struct i915_execbuffer *eb)
@@ -2091,6 +2048,12 @@ static int eb_submit(struct i915_execbuffer *eb)
return 0;
}
+static int num_vcs_engines(const struct drm_i915_private *i915)
+{
+ return hweight64(INTEL_INFO(i915)->engine_mask &
+ GENMASK_ULL(VCS0 + I915_MAX_VCS - 1, VCS0));
+}
+
/*
* Find one BSD ring to dispatch the corresponding BSD command.
* The engine index is returned.
@@ -2103,8 +2066,8 @@ gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
/* Check whether the file_priv has already selected one ring. */
if ((int)file_priv->bsd_engine < 0)
- file_priv->bsd_engine = atomic_fetch_xor(1,
- &dev_priv->mm.bsd_engine_dispatch_index);
+ file_priv->bsd_engine =
+ get_random_int() % num_vcs_engines(dev_priv);
return file_priv->bsd_engine;
}
@@ -2117,8 +2080,73 @@ static const enum intel_engine_id user_ring_map[] = {
[I915_EXEC_VEBOX] = VECS0
};
-static int eb_pin_context(struct i915_execbuffer *eb, struct intel_context *ce)
+static struct i915_request *eb_throttle(struct intel_context *ce)
{
+ struct intel_ring *ring = ce->ring;
+ struct intel_timeline *tl = ce->timeline;
+ struct i915_request *rq;
+
+ /*
+ * Completely unscientific finger-in-the-air estimates for suitable
+ * maximum user request size (to avoid blocking) and then backoff.
+ */
+ if (intel_ring_update_space(ring) >= PAGE_SIZE)
+ return NULL;
+
+ /*
+ * Find a request that after waiting upon, there will be at least half
+ * the ring available. The hysteresis allows us to compete for the
+ * shared ring and should mean that we sleep less often prior to
+ * claiming our resources, but not so long that the ring completely
+ * drains before we can submit our next request.
+ */
+ list_for_each_entry(rq, &tl->requests, link) {
+ if (rq->ring != ring)
+ continue;
+
+ if (__intel_ring_space(rq->postfix,
+ ring->emit, ring->size) > ring->size / 2)
+ break;
+ }
+ if (&rq->link == &tl->requests)
+ return NULL; /* weird, we will check again later for real */
+
+ return i915_request_get(rq);
+}
+
+static int
+__eb_pin_context(struct i915_execbuffer *eb, struct intel_context *ce)
+{
+ int err;
+
+ if (likely(atomic_inc_not_zero(&ce->pin_count)))
+ return 0;
+
+ err = mutex_lock_interruptible(&eb->i915->drm.struct_mutex);
+ if (err)
+ return err;
+
+ err = __intel_context_do_pin(ce);
+ mutex_unlock(&eb->i915->drm.struct_mutex);
+
+ return err;
+}
+
+static void
+__eb_unpin_context(struct i915_execbuffer *eb, struct intel_context *ce)
+{
+ if (likely(atomic_add_unless(&ce->pin_count, -1, 1)))
+ return;
+
+ mutex_lock(&eb->i915->drm.struct_mutex);
+ intel_context_unpin(ce);
+ mutex_unlock(&eb->i915->drm.struct_mutex);
+}
+
+static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
+{
+ struct intel_timeline *tl;
+ struct i915_request *rq;
int err;
/*
@@ -2134,18 +2162,64 @@ static int eb_pin_context(struct i915_execbuffer *eb, struct intel_context *ce)
* GGTT space, so do this first before we reserve a seqno for
* ourselves.
*/
- err = intel_context_pin(ce);
+ err = __eb_pin_context(eb, ce);
if (err)
return err;
+ /*
+ * Take a local wakeref for preparing to dispatch the execbuf as
+ * we expect to access the hardware fairly frequently in the
+ * process, and require the engine to be kept awake between accesses.
+ * Upon dispatch, we acquire another prolonged wakeref that we hold
+ * until the timeline is idle, which in turn releases the wakeref
+ * taken on the engine, and the parent device.
+ */
+ tl = intel_context_timeline_lock(ce);
+ if (IS_ERR(tl)) {
+ err = PTR_ERR(tl);
+ goto err_unpin;
+ }
+
+ intel_context_enter(ce);
+ rq = eb_throttle(ce);
+
+ intel_context_timeline_unlock(tl);
+
+ if (rq) {
+ if (i915_request_wait(rq,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT) < 0) {
+ i915_request_put(rq);
+ err = -EINTR;
+ goto err_exit;
+ }
+
+ i915_request_put(rq);
+ }
+
eb->engine = ce->engine;
eb->context = ce;
return 0;
+
+err_exit:
+ mutex_lock(&tl->mutex);
+ intel_context_exit(ce);
+ intel_context_timeline_unlock(tl);
+err_unpin:
+ __eb_unpin_context(eb, ce);
+ return err;
}
-static void eb_unpin_context(struct i915_execbuffer *eb)
+static void eb_unpin_engine(struct i915_execbuffer *eb)
{
- intel_context_unpin(eb->context);
+ struct intel_context *ce = eb->context;
+ struct intel_timeline *tl = ce->timeline;
+
+ mutex_lock(&tl->mutex);
+ intel_context_exit(ce);
+ mutex_unlock(&tl->mutex);
+
+ __eb_unpin_context(eb, ce);
}
static unsigned int
@@ -2163,7 +2237,7 @@ eb_select_legacy_ring(struct i915_execbuffer *eb,
return -1;
}
- if (user_ring_id == I915_EXEC_BSD && HAS_ENGINE(i915, VCS1)) {
+ if (user_ring_id == I915_EXEC_BSD && num_vcs_engines(i915) > 1) {
unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
@@ -2190,9 +2264,9 @@ eb_select_legacy_ring(struct i915_execbuffer *eb,
}
static int
-eb_select_engine(struct i915_execbuffer *eb,
- struct drm_file *file,
- struct drm_i915_gem_execbuffer2 *args)
+eb_pin_engine(struct i915_execbuffer *eb,
+ struct drm_file *file,
+ struct drm_i915_gem_execbuffer2 *args)
{
struct intel_context *ce;
unsigned int idx;
@@ -2207,7 +2281,7 @@ eb_select_engine(struct i915_execbuffer *eb,
if (IS_ERR(ce))
return PTR_ERR(ce);
- err = eb_pin_context(eb, ce);
+ err = __eb_pin_engine(eb, ce);
intel_context_put(ce);
return err;
@@ -2425,25 +2499,12 @@ i915_gem_do_execbuffer(struct drm_device *dev,
if (unlikely(err))
goto err_destroy;
- /*
- * Take a local wakeref for preparing to dispatch the execbuf as
- * we expect to access the hardware fairly frequently in the
- * process. Upon first dispatch, we acquire another prolonged
- * wakeref that we hold until the GPU has been idle for at least
- * 100ms.
- */
- intel_gt_pm_get(&eb.i915->gt);
+ err = eb_pin_engine(&eb, file, args);
+ if (unlikely(err))
+ goto err_context;
err = i915_mutex_lock_interruptible(dev);
if (err)
- goto err_rpm;
-
- err = eb_select_engine(&eb, file, args);
- if (unlikely(err))
- goto err_unlock;
-
- err = eb_wait_for_ring(&eb); /* may temporarily drop struct_mutex */
- if (unlikely(err))
goto err_engine;
err = eb_relocate(&eb);
@@ -2570,6 +2631,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
* to explicitly hold another reference here.
*/
eb.request->batch = eb.batch;
+ if (eb.batch->private)
+ intel_engine_pool_mark_active(eb.batch->private, eb.request);
trace_i915_request_queue(eb.request, eb.batch_flags);
err = eb_submit(&eb);
@@ -2594,15 +2657,15 @@ err_request:
err_batch_unpin:
if (eb.batch_flags & I915_DISPATCH_SECURE)
i915_vma_unpin(eb.batch);
+ if (eb.batch->private)
+ intel_engine_pool_put(eb.batch->private);
err_vma:
if (eb.exec)
eb_release_vmas(&eb);
-err_engine:
- eb_unpin_context(&eb);
-err_unlock:
mutex_unlock(&dev->struct_mutex);
-err_rpm:
- intel_gt_pm_put(&eb.i915->gt);
+err_engine:
+ eb_unpin_engine(&eb);
+err_context:
i915_gem_context_put(eb.gem_context);
err_destroy:
eb_destroy(&eb);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_fence.c b/drivers/gpu/drm/i915/gem/i915_gem_fence.c
index cf0439e6be83..2f6100ec2608 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_fence.c
@@ -69,8 +69,7 @@ i915_gem_object_lock_fence(struct drm_i915_gem_object *obj)
i915_sw_fence_init(&stub->chain, stub_notify);
dma_fence_init(&stub->dma, &stub_fence_ops, &stub->chain.wait.lock,
- to_i915(obj->base.dev)->mm.unordered_timeline,
- 0);
+ 0, 0);
if (i915_sw_fence_await_reservation(&stub->chain,
obj->base.resv, NULL,
@@ -78,7 +77,7 @@ i915_gem_object_lock_fence(struct drm_i915_gem_object *obj)
I915_FENCE_GFP) < 0)
goto err;
- reservation_object_add_excl_fence(obj->base.resv, &stub->dma);
+ dma_resv_add_excl_fence(obj->base.resv, &stub->dma);
return &stub->dma;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index dfa525e37eb8..595539a09e38 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -13,8 +13,8 @@
#include "i915_gem_gtt.h"
#include "i915_gem_ioctls.h"
#include "i915_gem_object.h"
+#include "i915_trace.h"
#include "i915_vma.h"
-#include "intel_drv.h"
static inline bool
__vma_matches(struct vm_area_struct *vma, struct file *filp,
@@ -101,9 +101,6 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
up_write(&mm->mmap_sem);
if (IS_ERR_VALUE(addr))
goto err;
-
- /* This may race, but that's ok, it only gets set */
- WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
}
i915_gem_object_put(obj);
@@ -267,15 +264,15 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
/* Now pin it into the GTT as needed */
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
PIN_MAPPABLE |
- PIN_NONBLOCK |
- PIN_NONFAULT);
+ PIN_NONBLOCK /* NOWARN */ |
+ PIN_NOSEARCH);
if (IS_ERR(vma)) {
/* Use a partial view if it is bigger than available space */
struct i915_ggtt_view view =
compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
unsigned int flags;
- flags = PIN_MAPPABLE;
+ flags = PIN_MAPPABLE | PIN_NOSEARCH;
if (view.type == I915_GGTT_VIEW_NORMAL)
flags |= PIN_NONBLOCK; /* avoid warnings for pinned */
@@ -283,10 +280,9 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
* Userspace is now writing through an untracked VMA, abandon
* all hope that the hardware is able to track future writes.
*/
- obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
- if (IS_ERR(vma) && !view.type) {
+ if (IS_ERR(vma)) {
flags = PIN_MAPPABLE;
view.type = I915_GGTT_VIEW_PARTIAL;
vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
@@ -310,14 +306,17 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
if (ret)
goto err_fence;
- /* Mark as being mmapped into userspace for later revocation */
assert_rpm_wakelock_held(rpm);
+
+ /* Mark as being mmapped into userspace for later revocation */
+ mutex_lock(&i915->ggtt.vm.mutex);
if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
list_add(&obj->userfault_link, &i915->ggtt.userfault_list);
+ mutex_unlock(&i915->ggtt.vm.mutex);
+
if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
intel_wakeref_auto(&i915->ggtt.userfault_wakeref,
msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
- GEM_BUG_ON(!obj->userfault_count);
i915_vma_set_ggtt_write(vma);
@@ -412,8 +411,8 @@ void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
* requirement that operations to the GGTT be made holding the RPM
* wakeref.
*/
- lockdep_assert_held(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ mutex_lock(&i915->ggtt.vm.mutex);
if (!obj->userfault_count)
goto out;
@@ -430,6 +429,7 @@ void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
wmb();
out:
+ mutex_unlock(&i915->ggtt.vm.mutex);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index d5197a2a106f..d7855dc5a5c5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -29,6 +29,7 @@
#include "i915_gem_context.h"
#include "i915_gem_object.h"
#include "i915_globals.h"
+#include "i915_trace.h"
static struct i915_global_object {
struct i915_global base;
@@ -45,16 +46,6 @@ void i915_gem_object_free(struct drm_i915_gem_object *obj)
return kmem_cache_free(global.slab_objects, obj);
}
-static void
-frontbuffer_retire(struct i915_active_request *active,
- struct i915_request *request)
-{
- struct drm_i915_gem_object *obj =
- container_of(active, typeof(*obj), frontbuffer_write);
-
- intel_fb_obj_flush(obj, ORIGIN_CS);
-}
-
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops)
{
@@ -63,17 +54,14 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
spin_lock_init(&obj->vma.lock);
INIT_LIST_HEAD(&obj->vma.list);
+ INIT_LIST_HEAD(&obj->mm.link);
+
INIT_LIST_HEAD(&obj->lut_list);
- INIT_LIST_HEAD(&obj->batch_pool_link);
init_rcu_head(&obj->rcu);
obj->ops = ops;
- obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
- i915_active_request_init(&obj->frontbuffer_write,
- NULL, frontbuffer_retire);
-
obj->mm.madv = I915_MADV_WILLNEED;
INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
mutex_init(&obj->mm.get_page.lock);
@@ -152,7 +140,7 @@ static void __i915_gem_free_object_rcu(struct rcu_head *head)
container_of(head, typeof(*obj), rcu);
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- reservation_object_fini(&obj->base._resv);
+ dma_resv_fini(&obj->base._resv);
i915_gem_object_free(obj);
GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
@@ -185,7 +173,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
GEM_BUG_ON(atomic_read(&obj->bind_count));
GEM_BUG_ON(obj->userfault_count);
- GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
GEM_BUG_ON(!list_empty(&obj->lut_list));
atomic_set(&obj->mm.pages_pin_count, 0);
@@ -209,48 +196,18 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
void i915_gem_flush_free_objects(struct drm_i915_private *i915)
{
- struct llist_node *freed;
-
- /* Free the oldest, most stale object to keep the free_list short */
- freed = NULL;
- if (!llist_empty(&i915->mm.free_list)) { /* quick test for hotpath */
- /* Only one consumer of llist_del_first() allowed */
- spin_lock(&i915->mm.free_lock);
- freed = llist_del_first(&i915->mm.free_list);
- spin_unlock(&i915->mm.free_lock);
- }
- if (unlikely(freed)) {
- freed->next = NULL;
+ struct llist_node *freed = llist_del_all(&i915->mm.free_list);
+
+ if (unlikely(freed))
__i915_gem_free_objects(i915, freed);
- }
}
static void __i915_gem_free_work(struct work_struct *work)
{
struct drm_i915_private *i915 =
container_of(work, struct drm_i915_private, mm.free_work);
- struct llist_node *freed;
- /*
- * All file-owned VMA should have been released by this point through
- * i915_gem_close_object(), or earlier by i915_gem_context_close().
- * However, the object may also be bound into the global GTT (e.g.
- * older GPUs without per-process support, or for direct access through
- * the GTT either for the user or for scanout). Those VMA still need to
- * unbound now.
- */
-
- spin_lock(&i915->mm.free_lock);
- while ((freed = llist_del_all(&i915->mm.free_list))) {
- spin_unlock(&i915->mm.free_lock);
-
- __i915_gem_free_objects(i915, freed);
- if (need_resched())
- return;
-
- spin_lock(&i915->mm.free_lock);
- }
- spin_unlock(&i915->mm.free_lock);
+ i915_gem_flush_free_objects(i915);
}
void i915_gem_free_object(struct drm_gem_object *gem_obj)
@@ -258,6 +215,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ GEM_BUG_ON(i915_gem_object_is_framebuffer(obj));
+
/*
* Before we free the object, make sure any pure RCU-only
* read-side critical sections are complete, e.g.
@@ -273,14 +232,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
* or else we may oom whilst there are plenty of deferred
* freed objects.
*/
- if (i915_gem_object_has_pages(obj) &&
- i915_gem_object_is_shrinkable(obj)) {
- unsigned long flags;
-
- spin_lock_irqsave(&i915->mm.obj_lock, flags);
- list_del_init(&obj->mm.link);
- spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
- }
+ i915_gem_object_make_unshrinkable(obj);
/*
* Since we require blocking on struct_mutex to unbind the freed
@@ -296,13 +248,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
queue_work(i915->wq, &i915->mm.free_work);
}
-static inline enum fb_op_origin
-fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
-{
- return (domain == I915_GEM_DOMAIN_GTT ?
- obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
-}
-
static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
return !(obj->cache_level == I915_CACHE_NONE ||
@@ -325,8 +270,7 @@ i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
for_each_ggtt_vma(vma, obj)
intel_gt_flush_ggtt_writes(vma->vm->gt);
- intel_fb_obj_flush(obj,
- fb_write_origin(obj, I915_GEM_DOMAIN_GTT));
+ intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
for_each_ggtt_vma(vma, obj) {
if (vma->iomap)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 67aea07ea019..5efb9936e05b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -99,22 +99,22 @@ i915_gem_object_put(struct drm_i915_gem_object *obj)
__drm_gem_object_put(&obj->base);
}
-#define assert_object_held(obj) reservation_object_assert_held((obj)->base.resv)
+#define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv)
static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
{
- reservation_object_lock(obj->base.resv, NULL);
+ dma_resv_lock(obj->base.resv, NULL);
}
static inline int
i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj)
{
- return reservation_object_lock_interruptible(obj->base.resv, NULL);
+ return dma_resv_lock_interruptible(obj->base.resv, NULL);
}
static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
{
- reservation_object_unlock(obj->base.resv);
+ dma_resv_unlock(obj->base.resv);
}
struct dma_fence *
@@ -161,7 +161,7 @@ i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
static inline bool
i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
{
- return READ_ONCE(obj->framebuffer_references);
+ return READ_ONCE(obj->frontbuffer);
}
static inline unsigned int
@@ -367,7 +367,7 @@ i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
struct dma_fence *fence;
rcu_read_lock();
- fence = reservation_object_get_excl_rcu(obj->base.resv);
+ fence = dma_resv_get_excl_rcu(obj->base.resv);
rcu_read_unlock();
if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
@@ -394,6 +394,10 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
unsigned int flags);
void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
+void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
+void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
+void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj);
+
static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
if (obj->cache_dirty)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
index 685064af32d1..6415f9a17e2d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
@@ -3,44 +3,124 @@
* Copyright © 2019 Intel Corporation
*/
-#include "i915_gem_object_blt.h"
-
+#include "i915_drv.h"
+#include "gt/intel_context.h"
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_engine_pool.h"
+#include "gt/intel_gt.h"
#include "i915_gem_clflush.h"
-#include "intel_drv.h"
+#include "i915_gem_object_blt.h"
-int intel_emit_vma_fill_blt(struct i915_request *rq,
- struct i915_vma *vma,
- u32 value)
+struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
+ struct i915_vma *vma,
+ u32 value)
{
- u32 *cs;
-
- cs = intel_ring_begin(rq, 8);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- if (INTEL_GEN(rq->i915) >= 8) {
- *cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (7 - 2);
- *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
- *cs++ = 0;
- *cs++ = vma->size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
- *cs++ = lower_32_bits(vma->node.start);
- *cs++ = upper_32_bits(vma->node.start);
- *cs++ = value;
- *cs++ = MI_NOOP;
- } else {
- *cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
- *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
- *cs++ = 0;
- *cs++ = vma->size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
- *cs++ = vma->node.start;
- *cs++ = value;
- *cs++ = MI_NOOP;
- *cs++ = MI_NOOP;
+ struct drm_i915_private *i915 = ce->vm->i915;
+ const u32 block_size = S16_MAX * PAGE_SIZE;
+ struct intel_engine_pool_node *pool;
+ struct i915_vma *batch;
+ u64 offset;
+ u64 count;
+ u64 rem;
+ u32 size;
+ u32 *cmd;
+ int err;
+
+ GEM_BUG_ON(intel_engine_is_virtual(ce->engine));
+ intel_engine_pm_get(ce->engine);
+
+ count = div_u64(vma->size, block_size);
+ size = (1 + 8 * count) * sizeof(u32);
+ size = round_up(size, PAGE_SIZE);
+ pool = intel_engine_pool_get(&ce->engine->pool, size);
+ if (IS_ERR(pool)) {
+ err = PTR_ERR(pool);
+ goto out_pm;
+ }
+
+ cmd = i915_gem_object_pin_map(pool->obj, I915_MAP_WC);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto out_put;
+ }
+
+ rem = vma->size;
+ offset = vma->node.start;
+
+ do {
+ u32 size = min_t(u64, rem, block_size);
+
+ GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX);
+
+ if (INTEL_GEN(i915) >= 8) {
+ *cmd++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (7 - 2);
+ *cmd++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
+ *cmd++ = 0;
+ *cmd++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
+ *cmd++ = lower_32_bits(offset);
+ *cmd++ = upper_32_bits(offset);
+ *cmd++ = value;
+ } else {
+ *cmd++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
+ *cmd++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
+ *cmd++ = 0;
+ *cmd++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
+ *cmd++ = offset;
+ *cmd++ = value;
+ }
+
+ /* Allow ourselves to be preempted in between blocks. */
+ *cmd++ = MI_ARB_CHECK;
+
+ offset += size;
+ rem -= size;
+ } while (rem);
+
+ *cmd = MI_BATCH_BUFFER_END;
+ intel_gt_chipset_flush(ce->vm->gt);
+
+ i915_gem_object_unpin_map(pool->obj);
+
+ batch = i915_vma_instance(pool->obj, ce->vm, NULL);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto out_put;
}
- intel_ring_advance(rq, cs);
+ err = i915_vma_pin(batch, 0, 0, PIN_USER);
+ if (unlikely(err))
+ goto out_put;
+
+ batch->private = pool;
+ return batch;
- return 0;
+out_put:
+ intel_engine_pool_put(pool);
+out_pm:
+ intel_engine_pm_put(ce->engine);
+ return ERR_PTR(err);
+}
+
+int intel_emit_vma_mark_active(struct i915_vma *vma, struct i915_request *rq)
+{
+ int err;
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, false);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, 0);
+ i915_vma_unlock(vma);
+ if (unlikely(err))
+ return err;
+
+ return intel_engine_pool_mark_active(vma->private, rq);
+}
+
+void intel_emit_vma_release(struct intel_context *ce, struct i915_vma *vma)
+{
+ i915_vma_unpin(vma);
+ intel_engine_pool_put(vma->private);
+ intel_engine_pm_put(ce->engine);
}
int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
@@ -48,6 +128,7 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
u32 value)
{
struct i915_request *rq;
+ struct i915_vma *batch;
struct i915_vma *vma;
int err;
@@ -65,12 +146,22 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
i915_gem_object_unlock(obj);
}
- rq = i915_request_create(ce);
+ batch = intel_emit_vma_fill_blt(ce, vma, value);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto out_unpin;
+ }
+
+ rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto out_unpin;
+ goto out_batch;
}
+ err = intel_emit_vma_mark_active(batch, rq);
+ if (unlikely(err))
+ goto out_request;
+
err = i915_request_await_object(rq, obj, true);
if (unlikely(err))
goto out_request;
@@ -82,22 +173,229 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
}
i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ err = i915_request_await_object(rq, vma->obj, true);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
if (unlikely(err))
goto out_request;
- err = intel_emit_vma_fill_blt(rq, vma, value);
+ err = ce->engine->emit_bb_start(rq,
+ batch->node.start, batch->node.size,
+ 0);
out_request:
if (unlikely(err))
i915_request_skip(rq, err);
i915_request_add(rq);
+out_batch:
+ intel_emit_vma_release(ce, batch);
out_unpin:
i915_vma_unpin(vma);
return err;
}
+struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
+ struct i915_vma *src,
+ struct i915_vma *dst)
+{
+ struct drm_i915_private *i915 = ce->vm->i915;
+ const u32 block_size = S16_MAX * PAGE_SIZE;
+ struct intel_engine_pool_node *pool;
+ struct i915_vma *batch;
+ u64 src_offset, dst_offset;
+ u64 count, rem;
+ u32 size, *cmd;
+ int err;
+
+ GEM_BUG_ON(src->size != dst->size);
+
+ GEM_BUG_ON(intel_engine_is_virtual(ce->engine));
+ intel_engine_pm_get(ce->engine);
+
+ count = div_u64(dst->size, block_size);
+ size = (1 + 11 * count) * sizeof(u32);
+ size = round_up(size, PAGE_SIZE);
+ pool = intel_engine_pool_get(&ce->engine->pool, size);
+ if (IS_ERR(pool)) {
+ err = PTR_ERR(pool);
+ goto out_pm;
+ }
+
+ cmd = i915_gem_object_pin_map(pool->obj, I915_MAP_WC);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto out_put;
+ }
+
+ rem = src->size;
+ src_offset = src->node.start;
+ dst_offset = dst->node.start;
+
+ do {
+ size = min_t(u64, rem, block_size);
+ GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX);
+
+ if (INTEL_GEN(i915) >= 9) {
+ *cmd++ = GEN9_XY_FAST_COPY_BLT_CMD | (10 - 2);
+ *cmd++ = BLT_DEPTH_32 | PAGE_SIZE;
+ *cmd++ = 0;
+ *cmd++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
+ *cmd++ = lower_32_bits(dst_offset);
+ *cmd++ = upper_32_bits(dst_offset);
+ *cmd++ = 0;
+ *cmd++ = PAGE_SIZE;
+ *cmd++ = lower_32_bits(src_offset);
+ *cmd++ = upper_32_bits(src_offset);
+ } else if (INTEL_GEN(i915) >= 8) {
+ *cmd++ = XY_SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (10 - 2);
+ *cmd++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | PAGE_SIZE;
+ *cmd++ = 0;
+ *cmd++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
+ *cmd++ = lower_32_bits(dst_offset);
+ *cmd++ = upper_32_bits(dst_offset);
+ *cmd++ = 0;
+ *cmd++ = PAGE_SIZE;
+ *cmd++ = lower_32_bits(src_offset);
+ *cmd++ = upper_32_bits(src_offset);
+ } else {
+ *cmd++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
+ *cmd++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | PAGE_SIZE;
+ *cmd++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE;
+ *cmd++ = dst_offset;
+ *cmd++ = PAGE_SIZE;
+ *cmd++ = src_offset;
+ }
+
+ /* Allow ourselves to be preempted in between blocks. */
+ *cmd++ = MI_ARB_CHECK;
+
+ src_offset += size;
+ dst_offset += size;
+ rem -= size;
+ } while (rem);
+
+ *cmd = MI_BATCH_BUFFER_END;
+ intel_gt_chipset_flush(ce->vm->gt);
+
+ i915_gem_object_unpin_map(pool->obj);
+
+ batch = i915_vma_instance(pool->obj, ce->vm, NULL);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto out_put;
+ }
+
+ err = i915_vma_pin(batch, 0, 0, PIN_USER);
+ if (unlikely(err))
+ goto out_put;
+
+ batch->private = pool;
+ return batch;
+
+out_put:
+ intel_engine_pool_put(pool);
+out_pm:
+ intel_engine_pm_put(ce->engine);
+ return ERR_PTR(err);
+}
+
+static int move_to_gpu(struct i915_vma *vma, struct i915_request *rq, bool write)
+{
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ if (obj->cache_dirty & ~obj->cache_coherent)
+ i915_gem_clflush_object(obj, 0);
+
+ return i915_request_await_object(rq, obj, write);
+}
+
+int i915_gem_object_copy_blt(struct drm_i915_gem_object *src,
+ struct drm_i915_gem_object *dst,
+ struct intel_context *ce)
+{
+ struct drm_gem_object *objs[] = { &src->base, &dst->base };
+ struct i915_address_space *vm = ce->vm;
+ struct i915_vma *vma[2], *batch;
+ struct ww_acquire_ctx acquire;
+ struct i915_request *rq;
+ int err, i;
+
+ vma[0] = i915_vma_instance(src, vm, NULL);
+ if (IS_ERR(vma[0]))
+ return PTR_ERR(vma[0]);
+
+ err = i915_vma_pin(vma[0], 0, 0, PIN_USER);
+ if (unlikely(err))
+ return err;
+
+ vma[1] = i915_vma_instance(dst, vm, NULL);
+ if (IS_ERR(vma[1]))
+ goto out_unpin_src;
+
+ err = i915_vma_pin(vma[1], 0, 0, PIN_USER);
+ if (unlikely(err))
+ goto out_unpin_src;
+
+ batch = intel_emit_vma_copy_blt(ce, vma[0], vma[1]);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto out_unpin_dst;
+ }
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_batch;
+ }
+
+ err = intel_emit_vma_mark_active(batch, rq);
+ if (unlikely(err))
+ goto out_request;
+
+ err = drm_gem_lock_reservations(objs, ARRAY_SIZE(objs), &acquire);
+ if (unlikely(err))
+ goto out_request;
+
+ for (i = 0; i < ARRAY_SIZE(vma); i++) {
+ err = move_to_gpu(vma[i], rq, i);
+ if (unlikely(err))
+ goto out_unlock;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(vma); i++) {
+ unsigned int flags = i ? EXEC_OBJECT_WRITE : 0;
+
+ err = i915_vma_move_to_active(vma[i], rq, flags);
+ if (unlikely(err))
+ goto out_unlock;
+ }
+
+ if (rq->engine->emit_init_breadcrumb) {
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (unlikely(err))
+ goto out_unlock;
+ }
+
+ err = rq->engine->emit_bb_start(rq,
+ batch->node.start, batch->node.size,
+ 0);
+out_unlock:
+ drm_gem_unlock_reservations(objs, ARRAY_SIZE(objs), &acquire);
+out_request:
+ if (unlikely(err))
+ i915_request_skip(rq, err);
+
+ i915_request_add(rq);
+out_batch:
+ intel_emit_vma_release(ce, batch);
+out_unpin_dst:
+ i915_vma_unpin(vma[1]);
+out_unpin_src:
+ i915_vma_unpin(vma[0]);
+ return err;
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/i915_gem_object_blt.c"
#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h
index 7ec7de6ac0c0..243a43a87824 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h
@@ -8,17 +8,30 @@
#include <linux/types.h>
+#include "gt/intel_context.h"
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_engine_pool.h"
+#include "i915_vma.h"
+
struct drm_i915_gem_object;
-struct intel_context;
-struct i915_request;
-struct i915_vma;
-int intel_emit_vma_fill_blt(struct i915_request *rq,
- struct i915_vma *vma,
- u32 value);
+struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
+ struct i915_vma *vma,
+ u32 value);
+
+struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
+ struct i915_vma *src,
+ struct i915_vma *dst);
+
+int intel_emit_vma_mark_active(struct i915_vma *vma, struct i915_request *rq);
+void intel_emit_vma_release(struct intel_context *ce, struct i915_vma *vma);
int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
struct intel_context *ce,
u32 value);
+int i915_gem_object_copy_blt(struct drm_i915_gem_object *src,
+ struct drm_i915_gem_object *dst,
+ struct intel_context *ce);
+
#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 34b51fad02de..ede0eb4218a8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -13,6 +13,7 @@
#include "i915_selftest.h"
struct drm_i915_gem_object;
+struct intel_fronbuffer;
/*
* struct i915_lut_handle tracks the fast lookups from handle to vma used
@@ -114,7 +115,6 @@ struct drm_i915_gem_object {
unsigned int userfault_count;
struct list_head userfault_link;
- struct list_head batch_pool_link;
I915_SELFTEST_DECLARE(struct list_head st_link);
/*
@@ -142,9 +142,7 @@ struct drm_i915_gem_object {
*/
u16 write_domain;
- atomic_t frontbuffer_bits;
- unsigned int frontbuffer_ggtt_origin; /* write once */
- struct i915_active_request frontbuffer_write;
+ struct intel_frontbuffer *frontbuffer;
/** Current tiling stride for the object, if it's tiled. */
unsigned int tiling_and_stride;
@@ -225,9 +223,6 @@ struct drm_i915_gem_object {
bool quirked:1;
} mm;
- /** References from framebuffers, locks out tiling changes. */
- unsigned int framebuffer_references;
-
/** Record of address bit 17 of each page at last unbind. */
unsigned long *bit_17;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 65eb430cedba..18f0ce0135c1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -153,24 +153,13 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
struct sg_table *
__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct sg_table *pages;
pages = fetch_and_zero(&obj->mm.pages);
if (IS_ERR_OR_NULL(pages))
return pages;
- if (i915_gem_object_is_shrinkable(obj)) {
- unsigned long flags;
-
- spin_lock_irqsave(&i915->mm.obj_lock, flags);
-
- list_del(&obj->mm.link);
- i915->mm.shrink_count--;
- i915->mm.shrink_memory -= obj->base.size;
-
- spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
- }
+ i915_gem_object_make_unshrinkable(obj);
if (obj->mm.mapping) {
void *ptr;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index 102fd7a23d3d..768356908160 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -133,9 +133,16 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
drm_pci_free(obj->base.dev, obj->phys_handle);
}
+static void phys_release(struct drm_i915_gem_object *obj)
+{
+ fput(obj->base.filp);
+}
+
static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
.get_pages = i915_gem_object_get_pages_phys,
.put_pages = i915_gem_object_put_pages_phys,
+
+ .release = phys_release,
};
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index b5561cbdc5ea..92e53c25424c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -34,12 +34,9 @@ static void i915_gem_park(struct drm_i915_private *i915)
lockdep_assert_held(&i915->drm.struct_mutex);
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, i915, id)
call_idle_barriers(engine); /* cleanup after wedging */
- i915_gem_batch_pool_fini(&engine->batch_pool);
- }
- intel_timelines_park(i915);
i915_vma_parked(i915);
i915_globals_park();
@@ -132,7 +129,9 @@ static bool switch_to_kernel_context_sync(struct intel_gt *gt)
}
} while (i915_retire_requests(gt->i915) && result);
- GEM_BUG_ON(gt->awake);
+ if (intel_gt_pm_wait_for_idle(gt))
+ result = false;
+
return result;
}
@@ -163,13 +162,6 @@ void i915_gem_suspend(struct drm_i915_private *i915)
mutex_unlock(&i915->drm.struct_mutex);
- /*
- * Assert that we successfully flushed all the work and
- * reset the GPU back to its idle, low power state.
- */
- GEM_BUG_ON(i915->gt.awake);
- flush_work(&i915->gem.idle_work);
-
cancel_delayed_work_sync(&i915->gt.hangcheck.work);
i915_gem_drain_freed_objects(i915);
@@ -246,8 +238,6 @@ void i915_gem_resume(struct drm_i915_private *i915)
{
GEM_TRACE("\n");
- WARN_ON(i915->gt.awake);
-
mutex_lock(&i915->drm.struct_mutex);
intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index d2a1158868e7..4c4954e8ce0a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -10,6 +10,7 @@
#include "i915_drv.h"
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
+#include "i915_trace.h"
/*
* Move pages to appropriate lru and release the pagevec, decrementing the
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 3f4c6bdcc3c3..edd21d14e64f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -459,13 +459,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
return NOTIFY_DONE;
}
-/**
- * i915_gem_shrinker_register - Register the i915 shrinker
- * @i915: i915 device
- *
- * This function registers and sets up the i915 shrinker and OOM handler.
- */
-void i915_gem_shrinker_register(struct drm_i915_private *i915)
+void i915_gem_driver_register__shrinker(struct drm_i915_private *i915)
{
i915->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
i915->mm.shrinker.count_objects = i915_gem_shrinker_count;
@@ -480,13 +474,7 @@ void i915_gem_shrinker_register(struct drm_i915_private *i915)
WARN_ON(register_vmap_purge_notifier(&i915->mm.vmap_notifier));
}
-/**
- * i915_gem_shrinker_unregister - Unregisters the i915 shrinker
- * @i915: i915 device
- *
- * This function unregisters the i915 shrinker and OOM handler.
- */
-void i915_gem_shrinker_unregister(struct drm_i915_private *i915)
+void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915)
{
WARN_ON(unregister_vmap_purge_notifier(&i915->mm.vmap_notifier));
WARN_ON(unregister_oom_notifier(&i915->mm.oom_notifier));
@@ -530,3 +518,61 @@ void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
if (unlock)
mutex_release(&i915->drm.struct_mutex.dep_map, 0, _RET_IP_);
}
+
+#define obj_to_i915(obj__) to_i915((obj__)->base.dev)
+
+void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj)
+{
+ /*
+ * We can only be called while the pages are pinned or when
+ * the pages are released. If pinned, we should only be called
+ * from a single caller under controlled conditions; and on release
+ * only one caller may release us. Neither the two may cross.
+ */
+ if (!list_empty(&obj->mm.link)) { /* pinned by caller */
+ struct drm_i915_private *i915 = obj_to_i915(obj);
+ unsigned long flags;
+
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
+ GEM_BUG_ON(list_empty(&obj->mm.link));
+
+ list_del_init(&obj->mm.link);
+ i915->mm.shrink_count--;
+ i915->mm.shrink_memory -= obj->base.size;
+
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+ }
+}
+
+static void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj,
+ struct list_head *head)
+{
+ GEM_BUG_ON(!i915_gem_object_has_pages(obj));
+ GEM_BUG_ON(!list_empty(&obj->mm.link));
+
+ if (i915_gem_object_is_shrinkable(obj)) {
+ struct drm_i915_private *i915 = obj_to_i915(obj);
+ unsigned long flags;
+
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
+ GEM_BUG_ON(!kref_read(&obj->base.refcount));
+
+ list_add_tail(&obj->mm.link, head);
+ i915->mm.shrink_count++;
+ i915->mm.shrink_memory += obj->base.size;
+
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+ }
+}
+
+void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj)
+{
+ __i915_gem_object_make_shrinkable(obj,
+ &obj_to_i915(obj)->mm.shrink_list);
+}
+
+void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj)
+{
+ __i915_gem_object_make_shrinkable(obj,
+ &obj_to_i915(obj)->mm.purge_list);
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.h b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.h
new file mode 100644
index 000000000000..b397d7785789
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_GEM_SHRINKER_H__
+#define __I915_GEM_SHRINKER_H__
+
+#include <linux/bits.h>
+
+struct drm_i915_private;
+struct mutex;
+
+/* i915_gem_shrinker.c */
+unsigned long i915_gem_shrink(struct drm_i915_private *i915,
+ unsigned long target,
+ unsigned long *nr_scanned,
+ unsigned flags);
+#define I915_SHRINK_UNBOUND BIT(0)
+#define I915_SHRINK_BOUND BIT(1)
+#define I915_SHRINK_ACTIVE BIT(2)
+#define I915_SHRINK_VMAPS BIT(3)
+#define I915_SHRINK_WRITEBACK BIT(4)
+
+unsigned long i915_gem_shrink_all(struct drm_i915_private *i915);
+void i915_gem_driver_register__shrinker(struct drm_i915_private *i915);
+void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915);
+void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
+ struct mutex *mutex);
+
+#endif /* __I915_GEM_SHRINKER_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 639c852bad12..aa533b4ab5f5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -11,6 +11,7 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "i915_gem_stolen.h"
/*
* The BIOS typically reserves some of the system's memory for the exclusive
@@ -362,12 +363,16 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
mutex_init(&dev_priv->mm.stolen_lock);
if (intel_vgpu_active(dev_priv)) {
- DRM_INFO("iGVT-g active, disabling use of stolen memory\n");
+ dev_notice(dev_priv->drm.dev,
+ "%s, disabling use of stolen memory\n",
+ "iGVT-g active");
return 0;
}
if (intel_vtd_active() && INTEL_GEN(dev_priv) < 8) {
- DRM_INFO("DMAR active, disabling use of stolen memory\n");
+ dev_notice(dev_priv->drm.dev,
+ "%s, disabling use of stolen memory\n",
+ "DMAR active");
return 0;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
new file mode 100644
index 000000000000..2289644d8604
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_GEM_STOLEN_H__
+#define __I915_GEM_STOLEN_H__
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct drm_mm_node;
+struct drm_i915_gem_object;
+
+int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
+ struct drm_mm_node *node, u64 size,
+ unsigned alignment);
+int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
+ struct drm_mm_node *node, u64 size,
+ unsigned alignment, u64 start,
+ u64 end);
+void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
+ struct drm_mm_node *node);
+int i915_gem_init_stolen(struct drm_i915_private *dev_priv);
+void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv);
+struct drm_i915_gem_object *
+i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
+ resource_size_t size);
+struct drm_i915_gem_object *
+i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
+ resource_size_t stolen_offset,
+ resource_size_t gtt_offset,
+ resource_size_t size);
+
+#endif /* __I915_GEM_STOLEN_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index b9d2bb15e4a6..74da35611d7c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -12,11 +12,10 @@
#include <drm/i915_drm.h>
+#include "i915_drv.h"
#include "i915_gem_ioctls.h"
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
-#include "i915_trace.h"
-#include "intel_drv.h"
struct i915_mm_struct {
struct mm_struct *mm;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index fa46a54bcbe7..8af55cd3e690 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -31,7 +31,7 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
}
static long
-i915_gem_object_wait_reservation(struct reservation_object *resv,
+i915_gem_object_wait_reservation(struct dma_resv *resv,
unsigned int flags,
long timeout)
{
@@ -43,7 +43,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
unsigned int count, i;
int ret;
- ret = reservation_object_get_fences_rcu(resv,
+ ret = dma_resv_get_fences_rcu(resv,
&excl, &count, &shared);
if (ret)
return ret;
@@ -72,7 +72,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
*/
prune_fences = count && timeout >= 0;
} else {
- excl = reservation_object_get_excl_rcu(resv);
+ excl = dma_resv_get_excl_rcu(resv);
}
if (excl && timeout >= 0)
@@ -84,10 +84,10 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
* Opportunistically prune the fences iff we know they have *all* been
* signaled.
*/
- if (prune_fences && reservation_object_trylock(resv)) {
- if (reservation_object_test_signaled_rcu(resv, true))
- reservation_object_add_excl_fence(resv, NULL);
- reservation_object_unlock(resv);
+ if (prune_fences && dma_resv_trylock(resv)) {
+ if (dma_resv_test_signaled_rcu(resv, true))
+ dma_resv_add_excl_fence(resv, NULL);
+ dma_resv_unlock(resv);
}
return timeout;
@@ -140,7 +140,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
unsigned int count, i;
int ret;
- ret = reservation_object_get_fences_rcu(obj->base.resv,
+ ret = dma_resv_get_fences_rcu(obj->base.resv,
&excl, &count, &shared);
if (ret)
return ret;
@@ -152,7 +152,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
kfree(shared);
} else {
- excl = reservation_object_get_excl_rcu(obj->base.resv);
+ excl = dma_resv_get_excl_rcu(obj->base.resv);
}
if (excl) {
diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.c b/drivers/gpu/drm/i915/gem/i915_gemfs.c
index 099f3397aada..5e6e8c91ab38 100644
--- a/drivers/gpu/drm/i915/gem/i915_gemfs.c
+++ b/drivers/gpu/drm/i915/gem/i915_gemfs.c
@@ -20,31 +20,18 @@ int i915_gemfs_init(struct drm_i915_private *i915)
if (!type)
return -ENODEV;
- gemfs = kern_mount(type);
- if (IS_ERR(gemfs))
- return PTR_ERR(gemfs);
-
/*
- * Enable huge-pages for objects that are at least HPAGE_PMD_SIZE, most
- * likely 2M. Note that within_size may overallocate huge-pages, if say
- * we allocate an object of size 2M + 4K, we may get 2M + 2M, but under
- * memory pressure shmem should split any huge-pages which can be
- * shrunk.
+ * By creating our own shmemfs mountpoint, we can pass in
+ * mount flags that better match our usecase.
+ *
+ * One example, although it is probably better with a per-file
+ * control, is selecting huge page allocations ("huge=within_size").
+ * Currently unused due to bandwidth issues (slow reads) on Broadwell+.
*/
- if (has_transparent_hugepage()) {
- struct super_block *sb = gemfs->mnt_sb;
- /* FIXME: Disabled until we get W/A for read BW issue. */
- char options[] = "huge=never";
- int flags = 0;
- int err;
-
- err = sb->s_op->remount_fs(sb, &flags, options);
- if (err) {
- kern_unmount(gemfs);
- return err;
- }
- }
+ gemfs = kern_mount(type);
+ if (IS_ERR(gemfs))
+ return PTR_ERR(gemfs);
i915->mm.gemfs = gemfs;
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 6cbd4a668c9a..8de83c6d81f5 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -879,126 +879,22 @@ out_object_put:
return err;
}
-static struct i915_vma *
-gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val)
-{
- struct drm_i915_private *i915 = vma->vm->i915;
- const int gen = INTEL_GEN(i915);
- unsigned int count = vma->size >> PAGE_SHIFT;
- struct drm_i915_gem_object *obj;
- struct i915_vma *batch;
- unsigned int size;
- u32 *cmd;
- int n;
- int err;
-
- size = (1 + 4 * count) * sizeof(u32);
- size = round_up(size, PAGE_SIZE);
- obj = i915_gem_object_create_internal(i915, size);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
- if (IS_ERR(cmd)) {
- err = PTR_ERR(cmd);
- goto err;
- }
-
- offset += vma->node.start;
-
- for (n = 0; n < count; n++) {
- if (gen >= 8) {
- *cmd++ = MI_STORE_DWORD_IMM_GEN4;
- *cmd++ = lower_32_bits(offset);
- *cmd++ = upper_32_bits(offset);
- *cmd++ = val;
- } else if (gen >= 4) {
- *cmd++ = MI_STORE_DWORD_IMM_GEN4 |
- (gen < 6 ? MI_USE_GGTT : 0);
- *cmd++ = 0;
- *cmd++ = offset;
- *cmd++ = val;
- } else {
- *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
- *cmd++ = offset;
- *cmd++ = val;
- }
-
- offset += PAGE_SIZE;
- }
-
- *cmd = MI_BATCH_BUFFER_END;
- intel_gt_chipset_flush(vma->vm->gt);
-
- i915_gem_object_unpin_map(obj);
-
- batch = i915_vma_instance(obj, vma->vm, NULL);
- if (IS_ERR(batch)) {
- err = PTR_ERR(batch);
- goto err;
- }
-
- err = i915_vma_pin(batch, 0, 0, PIN_USER);
- if (err)
- goto err;
-
- return batch;
-
-err:
- i915_gem_object_put(obj);
-
- return ERR_PTR(err);
-}
-
static int gpu_write(struct i915_vma *vma,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
- u32 dword,
- u32 value)
+ u32 dw,
+ u32 val)
{
- struct i915_request *rq;
- struct i915_vma *batch;
int err;
- GEM_BUG_ON(!intel_engine_can_store_dword(engine));
-
- batch = gpu_write_dw(vma, dword * sizeof(u32), value);
- if (IS_ERR(batch))
- return PTR_ERR(batch);
-
- rq = igt_request_alloc(ctx, engine);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_batch;
- }
-
- i915_vma_lock(batch);
- err = i915_vma_move_to_active(batch, rq, 0);
- i915_vma_unlock(batch);
- if (err)
- goto err_request;
-
- i915_vma_lock(vma);
- err = i915_gem_object_set_to_gtt_domain(vma->obj, false);
- if (err == 0)
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
- i915_vma_unlock(vma);
+ i915_gem_object_lock(vma->obj);
+ err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
+ i915_gem_object_unlock(vma->obj);
if (err)
- goto err_request;
-
- err = engine->emit_bb_start(rq,
- batch->node.start, batch->node.size,
- 0);
-err_request:
- if (err)
- i915_request_skip(rq, err);
- i915_request_add(rq);
-err_batch:
- i915_vma_unpin(batch);
- i915_vma_close(batch);
- i915_vma_put(batch);
+ return err;
- return err;
+ return igt_gpu_fill_dw(vma, ctx, engine, dw * sizeof(u32),
+ vma->size >> PAGE_SHIFT, val);
}
static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index 275c28926067..d8804a847945 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -9,6 +9,7 @@
#include "selftests/igt_flush_test.h"
#include "selftests/mock_drm.h"
+#include "huge_gem_object.h"
#include "mock_context.h"
static int igt_client_fill(void *arg)
@@ -24,15 +25,19 @@ static int igt_client_fill(void *arg)
prandom_seed_state(&prng, i915_selftest.random_seed);
do {
- u32 sz = prandom_u32_state(&prng) % SZ_32M;
+ const u32 max_block_size = S16_MAX * PAGE_SIZE;
+ u32 sz = min_t(u64, ce->vm->total >> 4, prandom_u32_state(&prng));
+ u32 phys_sz = sz % (max_block_size + 1);
u32 val = prandom_u32_state(&prng);
u32 i;
sz = round_up(sz, PAGE_SIZE);
+ phys_sz = round_up(phys_sz, PAGE_SIZE);
- pr_debug("%s with sz=%x, val=%x\n", __func__, sz, val);
+ pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__,
+ phys_sz, sz, val);
- obj = i915_gem_object_create_internal(i915, sz);
+ obj = huge_gem_object(i915, phys_sz, sz);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto err_flush;
@@ -54,7 +59,8 @@ static int igt_client_fill(void *arg)
* values after we do the set_to_cpu_domain and pick it up as a
* test failure.
*/
- memset32(vaddr, val ^ 0xdeadbeaf, obj->base.size / sizeof(u32));
+ memset32(vaddr, val ^ 0xdeadbeaf,
+ huge_gem_object_phys_size(obj) / sizeof(u32));
if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
obj->cache_dirty = true;
@@ -71,7 +77,7 @@ static int igt_client_fill(void *arg)
if (err)
goto err_unpin;
- for (i = 0; i < obj->base.size / sizeof(u32); ++i) {
+ for (i = 0; i < huge_gem_object_phys_size(obj) / sizeof(u32); ++i) {
if (vaddr[i] != val) {
pr_err("vaddr[%u]=%x, expected=%x\n", i,
vaddr[i], val);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
index a1a4b53cdc4a..0ff7a89aadca 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
@@ -228,7 +228,9 @@ static int gpu_set(struct drm_i915_gem_object *obj,
intel_ring_advance(rq, cs);
i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ err = i915_request_await_object(rq, vma->obj, true);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
i915_vma_unpin(vma);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index 7f9f6701b32c..3e6f4a65d356 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -156,70 +156,6 @@ out_unlock:
return err;
}
-static struct i915_vma *
-gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
-{
- struct drm_i915_gem_object *obj;
- const int gen = INTEL_GEN(vma->vm->i915);
- unsigned long n, size;
- u32 *cmd;
- int err;
-
- size = (4 * count + 1) * sizeof(u32);
- size = round_up(size, PAGE_SIZE);
- obj = i915_gem_object_create_internal(vma->vm->i915, size);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
- if (IS_ERR(cmd)) {
- err = PTR_ERR(cmd);
- goto err;
- }
-
- GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size);
- offset += vma->node.start;
-
- for (n = 0; n < count; n++) {
- if (gen >= 8) {
- *cmd++ = MI_STORE_DWORD_IMM_GEN4;
- *cmd++ = lower_32_bits(offset);
- *cmd++ = upper_32_bits(offset);
- *cmd++ = value;
- } else if (gen >= 4) {
- *cmd++ = MI_STORE_DWORD_IMM_GEN4 |
- (gen < 6 ? MI_USE_GGTT : 0);
- *cmd++ = 0;
- *cmd++ = offset;
- *cmd++ = value;
- } else {
- *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
- *cmd++ = offset;
- *cmd++ = value;
- }
- offset += PAGE_SIZE;
- }
- *cmd = MI_BATCH_BUFFER_END;
- i915_gem_object_flush_map(obj);
- i915_gem_object_unpin_map(obj);
-
- vma = i915_vma_instance(obj, vma->vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err)
- goto err;
-
- return vma;
-
-err:
- i915_gem_object_put(obj);
- return ERR_PTR(err);
-}
-
static unsigned long real_page_count(struct drm_i915_gem_object *obj)
{
return huge_gem_object_phys_size(obj) >> PAGE_SHIFT;
@@ -236,10 +172,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
unsigned int dw)
{
struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm;
- struct i915_request *rq;
struct i915_vma *vma;
- struct i915_vma *batch;
- unsigned int flags;
int err;
GEM_BUG_ON(obj->base.size > vm->total);
@@ -250,7 +183,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
return PTR_ERR(vma);
i915_gem_object_lock(obj);
- err = i915_gem_object_set_to_gtt_domain(obj, false);
+ err = i915_gem_object_set_to_gtt_domain(obj, true);
i915_gem_object_unlock(obj);
if (err)
return err;
@@ -259,70 +192,23 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
if (err)
return err;
- /* Within the GTT the huge objects maps every page onto
+ /*
+ * Within the GTT the huge objects maps every page onto
* its 1024 real pages (using phys_pfn = dma_pfn % 1024).
* We set the nth dword within the page using the nth
* mapping via the GTT - this should exercise the GTT mapping
* whilst checking that each context provides a unique view
* into the object.
*/
- batch = gpu_fill_dw(vma,
- (dw * real_page_count(obj)) << PAGE_SHIFT |
- (dw * sizeof(u32)),
- real_page_count(obj),
- dw);
- if (IS_ERR(batch)) {
- err = PTR_ERR(batch);
- goto err_vma;
- }
-
- rq = igt_request_alloc(ctx, engine);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_batch;
- }
-
- flags = 0;
- if (INTEL_GEN(vm->i915) <= 5)
- flags |= I915_DISPATCH_SECURE;
-
- err = engine->emit_bb_start(rq,
- batch->node.start, batch->node.size,
- flags);
- if (err)
- goto err_request;
-
- i915_vma_lock(batch);
- err = i915_vma_move_to_active(batch, rq, 0);
- i915_vma_unlock(batch);
- if (err)
- goto skip_request;
-
- i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
- i915_vma_unlock(vma);
- if (err)
- goto skip_request;
-
- i915_request_add(rq);
-
- i915_vma_unpin(batch);
- i915_vma_close(batch);
- i915_vma_put(batch);
-
+ err = igt_gpu_fill_dw(vma,
+ ctx,
+ engine,
+ (dw * real_page_count(obj)) << PAGE_SHIFT |
+ (dw * sizeof(u32)),
+ real_page_count(obj),
+ dw);
i915_vma_unpin(vma);
- return 0;
-
-skip_request:
- i915_request_skip(rq, err);
-err_request:
- i915_request_add(rq);
-err_batch:
- i915_vma_unpin(batch);
- i915_vma_put(batch);
-err_vma:
- i915_vma_unpin(vma);
return err;
}
@@ -780,13 +666,17 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
goto err_request;
i915_vma_lock(batch);
- err = i915_vma_move_to_active(batch, rq, 0);
+ err = i915_request_await_object(rq, batch->obj, false);
+ if (err == 0)
+ err = i915_vma_move_to_active(batch, rq, 0);
i915_vma_unlock(batch);
if (err)
goto skip_request;
i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ err = i915_request_await_object(rq, vma->obj, true);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
if (err)
goto skip_request;
@@ -821,8 +711,7 @@ err_vma:
#define TEST_RESET BIT(2)
static int
-__sseu_prepare(struct drm_i915_private *i915,
- const char *name,
+__sseu_prepare(const char *name,
unsigned int flags,
struct intel_context *ce,
struct igt_spinner **spin)
@@ -838,14 +727,11 @@ __sseu_prepare(struct drm_i915_private *i915,
if (!*spin)
return -ENOMEM;
- ret = igt_spinner_init(*spin, i915);
+ ret = igt_spinner_init(*spin, ce->engine->gt);
if (ret)
goto err_free;
- rq = igt_spinner_create_request(*spin,
- ce->gem_context,
- ce->engine,
- MI_NOOP);
+ rq = igt_spinner_create_request(*spin, ce, MI_NOOP);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
goto err_fini;
@@ -871,8 +757,7 @@ err_free:
}
static int
-__read_slice_count(struct drm_i915_private *i915,
- struct intel_context *ce,
+__read_slice_count(struct intel_context *ce,
struct drm_i915_gem_object *obj,
struct igt_spinner *spin,
u32 *rpcs)
@@ -901,7 +786,7 @@ __read_slice_count(struct drm_i915_private *i915,
return ret;
}
- if (INTEL_GEN(i915) >= 11) {
+ if (INTEL_GEN(ce->engine->i915) >= 11) {
s_mask = GEN11_RPCS_S_CNT_MASK;
s_shift = GEN11_RPCS_S_CNT_SHIFT;
} else {
@@ -944,8 +829,7 @@ __check_rpcs(const char *name, u32 rpcs, int slices, unsigned int expected,
}
static int
-__sseu_finish(struct drm_i915_private *i915,
- const char *name,
+__sseu_finish(const char *name,
unsigned int flags,
struct intel_context *ce,
struct drm_i915_gem_object *obj,
@@ -962,14 +846,13 @@ __sseu_finish(struct drm_i915_private *i915,
goto out;
}
- ret = __read_slice_count(i915, ce, obj,
+ ret = __read_slice_count(ce, obj,
flags & TEST_RESET ? NULL : spin, &rpcs);
ret = __check_rpcs(name, rpcs, ret, expected, "Context", "!");
if (ret)
goto out;
- ret = __read_slice_count(i915, ce->engine->kernel_context, obj,
- NULL, &rpcs);
+ ret = __read_slice_count(ce->engine->kernel_context, obj, NULL, &rpcs);
ret = __check_rpcs(name, rpcs, ret, slices, "Kernel context", "!");
out:
@@ -977,11 +860,12 @@ out:
igt_spinner_end(spin);
if ((flags & TEST_IDLE) && ret == 0) {
- ret = i915_gem_wait_for_idle(i915, 0, MAX_SCHEDULE_TIMEOUT);
+ ret = i915_gem_wait_for_idle(ce->engine->i915,
+ 0, MAX_SCHEDULE_TIMEOUT);
if (ret)
return ret;
- ret = __read_slice_count(i915, ce, obj, NULL, &rpcs);
+ ret = __read_slice_count(ce, obj, NULL, &rpcs);
ret = __check_rpcs(name, rpcs, ret, expected,
"Context", " after idle!");
}
@@ -990,8 +874,7 @@ out:
}
static int
-__sseu_test(struct drm_i915_private *i915,
- const char *name,
+__sseu_test(const char *name,
unsigned int flags,
struct intel_context *ce,
struct drm_i915_gem_object *obj,
@@ -1000,7 +883,7 @@ __sseu_test(struct drm_i915_private *i915,
struct igt_spinner *spin = NULL;
int ret;
- ret = __sseu_prepare(i915, name, flags, ce, &spin);
+ ret = __sseu_prepare(name, flags, ce, &spin);
if (ret)
return ret;
@@ -1008,7 +891,7 @@ __sseu_test(struct drm_i915_private *i915,
if (ret)
goto out_spin;
- ret = __sseu_finish(i915, name, flags, ce, obj,
+ ret = __sseu_finish(name, flags, ce, obj,
hweight32(sseu.slice_mask), spin);
out_spin:
@@ -1088,22 +971,22 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
goto out_context;
/* First set the default mask. */
- ret = __sseu_test(i915, name, flags, ce, obj, engine->sseu);
+ ret = __sseu_test(name, flags, ce, obj, engine->sseu);
if (ret)
goto out_fail;
/* Then set a power-gated configuration. */
- ret = __sseu_test(i915, name, flags, ce, obj, pg_sseu);
+ ret = __sseu_test(name, flags, ce, obj, pg_sseu);
if (ret)
goto out_fail;
/* Back to defaults. */
- ret = __sseu_test(i915, name, flags, ce, obj, engine->sseu);
+ ret = __sseu_test(name, flags, ce, obj, engine->sseu);
if (ret)
goto out_fail;
/* One last power-gated configuration for the road. */
- ret = __sseu_test(i915, name, flags, ce, obj, pg_sseu);
+ ret = __sseu_test(name, flags, ce, obj, pg_sseu);
if (ret)
goto out_fail;
@@ -1339,7 +1222,9 @@ static int write_to_scratch(struct i915_gem_context *ctx,
goto err_request;
i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, 0);
+ err = i915_request_await_object(rq, vma->obj, false);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, 0);
i915_vma_unlock(vma);
if (err)
goto skip_request;
@@ -1436,7 +1321,9 @@ static int read_from_scratch(struct i915_gem_context *ctx,
goto err_request;
i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ err = i915_request_await_object(rq, vma->obj, true);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
if (err)
goto skip_request;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 01857c12f12f..1d27babff0ce 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -351,7 +351,10 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
}
i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ err = i915_request_await_object(rq, vma->obj, true);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq,
+ EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
i915_request_add(rq);
@@ -382,7 +385,7 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
static void disable_retire_worker(struct drm_i915_private *i915)
{
- i915_gem_shrinker_unregister(i915);
+ i915_gem_driver_unregister__shrinker(i915);
intel_gt_pm_get(&i915->gt);
@@ -398,7 +401,7 @@ static void restore_retire_worker(struct drm_i915_private *i915)
igt_flush_test(i915, I915_WAIT_LOCKED);
mutex_unlock(&i915->drm.struct_mutex);
- i915_gem_shrinker_register(i915);
+ i915_gem_driver_register__shrinker(i915);
}
static void mmap_offset_lock(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
index 19843acc84d3..c21d747e7d05 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
@@ -9,6 +9,7 @@
#include "selftests/igt_flush_test.h"
#include "selftests/mock_drm.h"
+#include "huge_gem_object.h"
#include "mock_context.h"
static int igt_fill_blt(void *arg)
@@ -23,16 +24,26 @@ static int igt_fill_blt(void *arg)
prandom_seed_state(&prng, i915_selftest.random_seed);
+ /*
+ * XXX: needs some threads to scale all these tests, also maybe throw
+ * in submission from higher priority context to see if we are
+ * preempted for very large objects...
+ */
+
do {
- u32 sz = prandom_u32_state(&prng) % SZ_32M;
+ const u32 max_block_size = S16_MAX * PAGE_SIZE;
+ u32 sz = min_t(u64, ce->vm->total >> 4, prandom_u32_state(&prng));
+ u32 phys_sz = sz % (max_block_size + 1);
u32 val = prandom_u32_state(&prng);
u32 i;
sz = round_up(sz, PAGE_SIZE);
+ phys_sz = round_up(phys_sz, PAGE_SIZE);
- pr_debug("%s with sz=%x, val=%x\n", __func__, sz, val);
+ pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__,
+ phys_sz, sz, val);
- obj = i915_gem_object_create_internal(i915, sz);
+ obj = huge_gem_object(i915, phys_sz, sz);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto err_flush;
@@ -48,7 +59,8 @@ static int igt_fill_blt(void *arg)
* Make sure the potentially async clflush does its job, if
* required.
*/
- memset32(vaddr, val ^ 0xdeadbeaf, obj->base.size / sizeof(u32));
+ memset32(vaddr, val ^ 0xdeadbeaf,
+ huge_gem_object_phys_size(obj) / sizeof(u32));
if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
obj->cache_dirty = true;
@@ -65,7 +77,7 @@ static int igt_fill_blt(void *arg)
if (err)
goto err_unpin;
- for (i = 0; i < obj->base.size / sizeof(u32); ++i) {
+ for (i = 0; i < huge_gem_object_phys_size(obj) / sizeof(u32); ++i) {
if (vaddr[i] != val) {
pr_err("vaddr[%u]=%x, expected=%x\n", i,
vaddr[i], val);
@@ -91,10 +103,116 @@ err_flush:
return err;
}
+static int igt_copy_blt(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_context *ce = i915->engine[BCS0]->kernel_context;
+ struct drm_i915_gem_object *src, *dst;
+ struct rnd_state prng;
+ IGT_TIMEOUT(end);
+ u32 *vaddr;
+ int err = 0;
+
+ prandom_seed_state(&prng, i915_selftest.random_seed);
+
+ do {
+ const u32 max_block_size = S16_MAX * PAGE_SIZE;
+ u32 sz = min_t(u64, ce->vm->total >> 4, prandom_u32_state(&prng));
+ u32 phys_sz = sz % (max_block_size + 1);
+ u32 val = prandom_u32_state(&prng);
+ u32 i;
+
+ sz = round_up(sz, PAGE_SIZE);
+ phys_sz = round_up(phys_sz, PAGE_SIZE);
+
+ pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__,
+ phys_sz, sz, val);
+
+ src = huge_gem_object(i915, phys_sz, sz);
+ if (IS_ERR(src)) {
+ err = PTR_ERR(src);
+ goto err_flush;
+ }
+
+ vaddr = i915_gem_object_pin_map(src, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_put_src;
+ }
+
+ memset32(vaddr, val,
+ huge_gem_object_phys_size(src) / sizeof(u32));
+
+ i915_gem_object_unpin_map(src);
+
+ if (!(src->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
+ src->cache_dirty = true;
+
+ dst = huge_gem_object(i915, phys_sz, sz);
+ if (IS_ERR(dst)) {
+ err = PTR_ERR(dst);
+ goto err_put_src;
+ }
+
+ vaddr = i915_gem_object_pin_map(dst, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_put_dst;
+ }
+
+ memset32(vaddr, val ^ 0xdeadbeaf,
+ huge_gem_object_phys_size(dst) / sizeof(u32));
+
+ if (!(dst->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
+ dst->cache_dirty = true;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ err = i915_gem_object_copy_blt(src, dst, ce);
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (err)
+ goto err_unpin;
+
+ i915_gem_object_lock(dst);
+ err = i915_gem_object_set_to_cpu_domain(dst, false);
+ i915_gem_object_unlock(dst);
+ if (err)
+ goto err_unpin;
+
+ for (i = 0; i < huge_gem_object_phys_size(dst) / sizeof(u32); ++i) {
+ if (vaddr[i] != val) {
+ pr_err("vaddr[%u]=%x, expected=%x\n", i,
+ vaddr[i], val);
+ err = -EINVAL;
+ goto err_unpin;
+ }
+ }
+
+ i915_gem_object_unpin_map(dst);
+
+ i915_gem_object_put(src);
+ i915_gem_object_put(dst);
+ } while (!time_after(jiffies, end));
+
+ goto err_flush;
+
+err_unpin:
+ i915_gem_object_unpin_map(dst);
+err_put_dst:
+ i915_gem_object_put(dst);
+err_put_src:
+ i915_gem_object_put(src);
+err_flush:
+ if (err == -ENOMEM)
+ err = 0;
+
+ return err;
+}
+
int i915_gem_object_blt_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_fill_blt),
+ SUBTEST(igt_copy_blt),
};
if (intel_gt_is_wedged(&i915->gt))
diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
index b232e6d2cd92..57ece53c1075 100644
--- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
+++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
@@ -9,6 +9,8 @@
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_pm.h"
#include "gt/intel_context.h"
+#include "i915_vma.h"
+#include "i915_drv.h"
#include "i915_request.h"
@@ -23,7 +25,7 @@ igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
* GGTT space, so do this first before we reserve a seqno for
* ourselves.
*/
- ce = i915_gem_context_get_engine(ctx, engine->id);
+ ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
if (IS_ERR(ce))
return ERR_CAST(ce);
@@ -32,3 +34,140 @@ igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
return rq;
}
+
+struct i915_vma *
+igt_emit_store_dw(struct i915_vma *vma,
+ u64 offset,
+ unsigned long count,
+ u32 val)
+{
+ struct drm_i915_gem_object *obj;
+ const int gen = INTEL_GEN(vma->vm->i915);
+ unsigned long n, size;
+ u32 *cmd;
+ int err;
+
+ size = (4 * count + 1) * sizeof(u32);
+ size = round_up(size, PAGE_SIZE);
+ obj = i915_gem_object_create_internal(vma->vm->i915, size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto err;
+ }
+
+ GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size);
+ offset += vma->node.start;
+
+ for (n = 0; n < count; n++) {
+ if (gen >= 8) {
+ *cmd++ = MI_STORE_DWORD_IMM_GEN4;
+ *cmd++ = lower_32_bits(offset);
+ *cmd++ = upper_32_bits(offset);
+ *cmd++ = val;
+ } else if (gen >= 4) {
+ *cmd++ = MI_STORE_DWORD_IMM_GEN4 |
+ (gen < 6 ? MI_USE_GGTT : 0);
+ *cmd++ = 0;
+ *cmd++ = offset;
+ *cmd++ = val;
+ } else {
+ *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
+ *cmd++ = offset;
+ *cmd++ = val;
+ }
+ offset += PAGE_SIZE;
+ }
+ *cmd = MI_BATCH_BUFFER_END;
+ i915_gem_object_unpin_map(obj);
+
+ vma = i915_vma_instance(obj, vma->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto err;
+
+ return vma;
+
+err:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+}
+
+int igt_gpu_fill_dw(struct i915_vma *vma,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ u64 offset,
+ unsigned long count,
+ u32 val)
+{
+ struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm;
+ struct i915_request *rq;
+ struct i915_vma *batch;
+ unsigned int flags;
+ int err;
+
+ GEM_BUG_ON(vma->size > vm->total);
+ GEM_BUG_ON(!intel_engine_can_store_dword(engine));
+ GEM_BUG_ON(!i915_vma_is_pinned(vma));
+
+ batch = igt_emit_store_dw(vma, offset, count, val);
+ if (IS_ERR(batch))
+ return PTR_ERR(batch);
+
+ rq = igt_request_alloc(ctx, engine);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_batch;
+ }
+
+ flags = 0;
+ if (INTEL_GEN(vm->i915) <= 5)
+ flags |= I915_DISPATCH_SECURE;
+
+ err = engine->emit_bb_start(rq,
+ batch->node.start, batch->node.size,
+ flags);
+ if (err)
+ goto err_request;
+
+ i915_vma_lock(batch);
+ err = i915_request_await_object(rq, batch->obj, false);
+ if (err == 0)
+ err = i915_vma_move_to_active(batch, rq, 0);
+ i915_vma_unlock(batch);
+ if (err)
+ goto skip_request;
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, true);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(vma);
+ if (err)
+ goto skip_request;
+
+ i915_request_add(rq);
+
+ i915_vma_unpin(batch);
+ i915_vma_close(batch);
+ i915_vma_put(batch);
+
+ return 0;
+
+skip_request:
+ i915_request_skip(rq, err);
+err_request:
+ i915_request_add(rq);
+err_batch:
+ i915_vma_unpin(batch);
+ i915_vma_put(batch);
+ return err;
+}
diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h
index 0f17251cf75d..361a7ef866b0 100644
--- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h
+++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h
@@ -7,11 +7,27 @@
#ifndef __IGT_GEM_UTILS_H__
#define __IGT_GEM_UTILS_H__
+#include <linux/types.h>
+
struct i915_request;
struct i915_gem_context;
struct intel_engine_cs;
+struct i915_vma;
struct i915_request *
igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine);
+struct i915_vma *
+igt_emit_store_dw(struct i915_vma *vma,
+ u64 offset,
+ unsigned long count,
+ u32 val);
+
+int igt_gpu_fill_dw(struct i915_vma *vma,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ u64 offset,
+ unsigned long count,
+ u32 val);
+
#endif /* __IGT_GEM_UTILS_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index c092bdf5f0bf..09c68dda2098 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -27,6 +27,7 @@
#include <uapi/linux/sched/types.h>
#include "i915_drv.h"
+#include "i915_trace.h"
static void irq_enable(struct intel_engine_cs *engine)
{
@@ -34,9 +35,9 @@ static void irq_enable(struct intel_engine_cs *engine)
return;
/* Caller disables interrupts */
- spin_lock(&engine->i915->irq_lock);
+ spin_lock(&engine->gt->irq_lock);
engine->irq_enable(engine);
- spin_unlock(&engine->i915->irq_lock);
+ spin_unlock(&engine->gt->irq_lock);
}
static void irq_disable(struct intel_engine_cs *engine)
@@ -45,9 +46,9 @@ static void irq_disable(struct intel_engine_cs *engine)
return;
/* Caller disables interrupts */
- spin_lock(&engine->i915->irq_lock);
+ spin_lock(&engine->gt->irq_lock);
engine->irq_disable(engine);
- spin_unlock(&engine->i915->irq_lock);
+ spin_unlock(&engine->gt->irq_lock);
}
static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
@@ -66,14 +67,15 @@ static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ unsigned long flags;
if (!b->irq_armed)
return;
- spin_lock_irq(&b->irq_lock);
+ spin_lock_irqsave(&b->irq_lock, flags);
if (b->irq_armed)
__intel_breadcrumbs_disarm_irq(b);
- spin_unlock_irq(&b->irq_lock);
+ spin_unlock_irqrestore(&b->irq_lock, flags);
}
static inline bool __request_completed(const struct i915_request *rq)
@@ -112,18 +114,18 @@ __dma_fence_signal__timestamp(struct dma_fence *fence, ktime_t timestamp)
}
static void
-__dma_fence_signal__notify(struct dma_fence *fence)
+__dma_fence_signal__notify(struct dma_fence *fence,
+ const struct list_head *list)
{
struct dma_fence_cb *cur, *tmp;
lockdep_assert_held(fence->lock);
lockdep_assert_irqs_disabled();
- list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
+ list_for_each_entry_safe(cur, tmp, list, node) {
INIT_LIST_HEAD(&cur->node);
cur->func(fence, cur);
}
- INIT_LIST_HEAD(&fence->cb_list);
}
void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
@@ -185,11 +187,12 @@ void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
list_for_each_safe(pos, next, &signal) {
struct i915_request *rq =
list_entry(pos, typeof(*rq), signal_link);
-
- __dma_fence_signal__timestamp(&rq->fence, timestamp);
+ struct list_head cb_list;
spin_lock(&rq->lock);
- __dma_fence_signal__notify(&rq->fence);
+ list_replace(&rq->fence.cb_list, &cb_list);
+ __dma_fence_signal__timestamp(&rq->fence, timestamp);
+ __dma_fence_signal__notify(&rq->fence, &cb_list);
spin_unlock(&rq->lock);
i915_request_put(rq);
@@ -211,28 +214,6 @@ static void signal_irq_work(struct irq_work *work)
intel_engine_breadcrumbs_irq(engine);
}
-void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine)
-{
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
-
- spin_lock_irq(&b->irq_lock);
- if (!b->irq_enabled++)
- irq_enable(engine);
- GEM_BUG_ON(!b->irq_enabled); /* no overflow! */
- spin_unlock_irq(&b->irq_lock);
-}
-
-void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine)
-{
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
-
- spin_lock_irq(&b->irq_lock);
- GEM_BUG_ON(!b->irq_enabled); /* no underflow! */
- if (!--b->irq_enabled)
- irq_disable(engine);
- spin_unlock_irq(&b->irq_lock);
-}
-
static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
{
struct intel_engine_cs *engine =
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index f30441a140f8..f55691d151ae 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -53,6 +53,14 @@ int __intel_context_do_pin(struct intel_context *ce)
if (likely(!atomic_read(&ce->pin_count))) {
intel_wakeref_t wakeref;
+ if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
+ err = ce->ops->alloc(ce);
+ if (unlikely(err))
+ goto err;
+
+ __set_bit(CONTEXT_ALLOC_BIT, &ce->flags);
+ }
+
err = 0;
with_intel_runtime_pm(&ce->engine->i915->runtime_pm, wakeref)
err = ce->ops->pin(ce);
@@ -60,7 +68,7 @@ int __intel_context_do_pin(struct intel_context *ce)
goto err;
GEM_TRACE("%s context:%llx pin ring:{head:%04x, tail:%04x}\n",
- ce->engine->name, ce->ring->timeline->fence_context,
+ ce->engine->name, ce->timeline->fence_context,
ce->ring->head, ce->ring->tail);
i915_gem_context_get(ce->gem_context); /* for ctx->ppgtt */
@@ -90,7 +98,7 @@ void intel_context_unpin(struct intel_context *ce)
if (likely(atomic_dec_and_test(&ce->pin_count))) {
GEM_TRACE("%s context:%llx retire\n",
- ce->engine->name, ce->ring->timeline->fence_context);
+ ce->engine->name, ce->timeline->fence_context);
ce->ops->unpin(ce);
@@ -118,7 +126,7 @@ static int __context_pin_state(struct i915_vma *vma)
* And mark it as a globally pinned object to let the shrinker know
* it cannot reclaim the object until we release it.
*/
- vma->obj->pin_global++;
+ i915_vma_make_unshrinkable(vma);
vma->obj->mm.dirty = true;
return 0;
@@ -126,8 +134,8 @@ static int __context_pin_state(struct i915_vma *vma)
static void __context_unpin_state(struct i915_vma *vma)
{
- vma->obj->pin_global--;
__i915_vma_unpin(vma);
+ i915_vma_make_shrinkable(vma);
}
static void __intel_context_retire(struct i915_active *active)
@@ -135,11 +143,12 @@ static void __intel_context_retire(struct i915_active *active)
struct intel_context *ce = container_of(active, typeof(*ce), active);
GEM_TRACE("%s context:%llx retire\n",
- ce->engine->name, ce->ring->timeline->fence_context);
+ ce->engine->name, ce->timeline->fence_context);
if (ce->state)
__context_unpin_state(ce->state);
+ intel_timeline_unpin(ce->timeline);
intel_ring_unpin(ce->ring);
intel_context_put(ce);
}
@@ -155,30 +164,54 @@ static int __intel_context_active(struct i915_active *active)
if (err)
goto err_put;
+ err = intel_timeline_pin(ce->timeline);
+ if (err)
+ goto err_ring;
+
if (!ce->state)
return 0;
err = __context_pin_state(ce->state);
if (err)
- goto err_ring;
+ goto err_timeline;
+
+ return 0;
+
+err_timeline:
+ intel_timeline_unpin(ce->timeline);
+err_ring:
+ intel_ring_unpin(ce->ring);
+err_put:
+ intel_context_put(ce);
+ return err;
+}
+
+int intel_context_active_acquire(struct intel_context *ce)
+{
+ int err;
+
+ err = i915_active_acquire(&ce->active);
+ if (err)
+ return err;
/* Preallocate tracking nodes */
if (!i915_gem_context_is_kernel(ce->gem_context)) {
err = i915_active_acquire_preallocate_barrier(&ce->active,
ce->engine);
- if (err)
- goto err_state;
+ if (err) {
+ i915_active_release(&ce->active);
+ return err;
+ }
}
return 0;
+}
-err_state:
- __context_unpin_state(ce->state);
-err_ring:
- intel_ring_unpin(ce->ring);
-err_put:
- intel_context_put(ce);
- return err;
+void intel_context_active_release(struct intel_context *ce)
+{
+ /* Nodes preallocated in intel_context_active() */
+ i915_active_acquire_barrier(&ce->active);
+ i915_active_release(&ce->active);
}
void
@@ -192,10 +225,13 @@ intel_context_init(struct intel_context *ce,
ce->gem_context = ctx;
ce->vm = i915_vm_get(ctx->vm ?: &engine->gt->ggtt->vm);
+ if (ctx->timeline)
+ ce->timeline = intel_timeline_get(ctx->timeline);
ce->engine = engine;
ce->ops = engine->cops;
ce->sseu = engine->sseu;
+ ce->ring = __intel_context_ring_size(SZ_16K);
INIT_LIST_HEAD(&ce->signal_link);
INIT_LIST_HEAD(&ce->signals);
@@ -208,6 +244,8 @@ intel_context_init(struct intel_context *ce,
void intel_context_fini(struct intel_context *ce)
{
+ if (ce->timeline)
+ intel_timeline_put(ce->timeline);
i915_vm_put(ce->vm);
mutex_destroy(&ce->pin_mutex);
@@ -242,17 +280,19 @@ int __init i915_global_context_init(void)
void intel_context_enter_engine(struct intel_context *ce)
{
intel_engine_pm_get(ce->engine);
+ intel_timeline_enter(ce->timeline);
}
void intel_context_exit_engine(struct intel_context *ce)
{
+ intel_timeline_exit(ce->timeline);
intel_engine_pm_put(ce->engine);
}
int intel_context_prepare_remote_request(struct intel_context *ce,
struct i915_request *rq)
{
- struct intel_timeline *tl = ce->ring->timeline;
+ struct intel_timeline *tl = ce->timeline;
int err;
/* Only suitable for use in remotely modifying this context */
@@ -266,10 +306,10 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
/* Queue this switch after current activity by this context. */
err = i915_active_request_set(&tl->last_request, rq);
+ mutex_unlock(&tl->mutex);
if (err)
- goto unlock;
+ return err;
}
- lockdep_assert_held(&tl->mutex);
/*
* Guarantee context image and the timeline remains pinned until the
@@ -279,12 +319,7 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
* words transfer the pinned ce object to tracked active request.
*/
GEM_BUG_ON(i915_active_is_idle(&ce->active));
- err = i915_active_ref(&ce->active, rq->fence.context, rq);
-
-unlock:
- if (rq->timeline != tl)
- mutex_unlock(&tl->mutex);
- return err;
+ return i915_active_ref(&ce->active, rq->timeline, rq);
}
struct i915_request *intel_context_create_request(struct intel_context *ce)
@@ -301,3 +336,7 @@ struct i915_request *intel_context_create_request(struct intel_context *ce)
return rq;
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_context.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index 23c7e4c0ce7c..dd742ac2fbdb 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -12,6 +12,7 @@
#include "i915_active.h"
#include "intel_context_types.h"
#include "intel_engine_types.h"
+#include "intel_timeline_types.h"
void intel_context_init(struct intel_context *ce,
struct i915_gem_context *ctx,
@@ -88,33 +89,27 @@ void intel_context_exit_engine(struct intel_context *ce);
static inline void intel_context_enter(struct intel_context *ce)
{
+ lockdep_assert_held(&ce->timeline->mutex);
if (!ce->active_count++)
ce->ops->enter(ce);
}
static inline void intel_context_mark_active(struct intel_context *ce)
{
+ lockdep_assert_held(&ce->timeline->mutex);
++ce->active_count;
}
static inline void intel_context_exit(struct intel_context *ce)
{
+ lockdep_assert_held(&ce->timeline->mutex);
GEM_BUG_ON(!ce->active_count);
if (!--ce->active_count)
ce->ops->exit(ce);
}
-static inline int intel_context_active_acquire(struct intel_context *ce)
-{
- return i915_active_acquire(&ce->active);
-}
-
-static inline void intel_context_active_release(struct intel_context *ce)
-{
- /* Nodes preallocated in intel_context_active() */
- i915_active_acquire_barrier(&ce->active);
- i915_active_release(&ce->active);
-}
+int intel_context_active_acquire(struct intel_context *ce);
+void intel_context_active_release(struct intel_context *ce);
static inline struct intel_context *intel_context_get(struct intel_context *ce)
{
@@ -127,17 +122,24 @@ static inline void intel_context_put(struct intel_context *ce)
kref_put(&ce->ref, ce->ops->destroy);
}
-static inline int __must_check
+static inline struct intel_timeline *__must_check
intel_context_timeline_lock(struct intel_context *ce)
- __acquires(&ce->ring->timeline->mutex)
+ __acquires(&ce->timeline->mutex)
{
- return mutex_lock_interruptible(&ce->ring->timeline->mutex);
+ struct intel_timeline *tl = ce->timeline;
+ int err;
+
+ err = mutex_lock_interruptible(&tl->mutex);
+ if (err)
+ return ERR_PTR(err);
+
+ return tl;
}
-static inline void intel_context_timeline_unlock(struct intel_context *ce)
- __releases(&ce->ring->timeline->mutex)
+static inline void intel_context_timeline_unlock(struct intel_timeline *tl)
+ __releases(&tl->mutex)
{
- mutex_unlock(&ce->ring->timeline->mutex);
+ mutex_unlock(&tl->mutex);
}
int intel_context_prepare_remote_request(struct intel_context *ce,
@@ -145,4 +147,9 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
struct i915_request *intel_context_create_request(struct intel_context *ce);
+static inline struct intel_ring *__intel_context_ring_size(u64 sz)
+{
+ return u64_to_ptr(struct intel_ring, sz);
+}
+
#endif /* __INTEL_CONTEXT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 68a7e979b1a9..bf9cedfccbf0 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -23,6 +23,8 @@ struct intel_context;
struct intel_ring;
struct intel_context_ops {
+ int (*alloc)(struct intel_context *ce);
+
int (*pin)(struct intel_context *ce);
void (*unpin)(struct intel_context *ce);
@@ -39,9 +41,7 @@ struct intel_context {
struct intel_engine_cs *engine;
struct intel_engine_cs *inflight;
#define intel_context_inflight(ce) ptr_mask_bits((ce)->inflight, 2)
-#define intel_context_inflight_count(ce) ptr_unmask_bits((ce)->inflight, 2)
-#define intel_context_inflight_inc(ce) ptr_count_inc(&(ce)->inflight)
-#define intel_context_inflight_dec(ce) ptr_count_dec(&(ce)->inflight)
+#define intel_context_inflight_count(ce) ptr_unmask_bits((ce)->inflight, 2)
struct i915_address_space *vm;
struct i915_gem_context *gem_context;
@@ -51,11 +51,15 @@ struct intel_context {
struct i915_vma *state;
struct intel_ring *ring;
+ struct intel_timeline *timeline;
+
+ unsigned long flags;
+#define CONTEXT_ALLOC_BIT 0
u32 *lrc_reg_state;
u64 lrc_desc;
- unsigned int active_count; /* notionally protected by timeline->mutex */
+ unsigned int active_count; /* protected by timeline->mutex */
atomic_t pin_count;
struct mutex pin_mutex; /* guards pinning and associated on-gpuing */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index db5c73ce86ee..d3c6993f4f46 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -9,7 +9,6 @@
#include <linux/random.h>
#include <linux/seqlock.h>
-#include "i915_gem_batch_pool.h"
#include "i915_pmu.h"
#include "i915_reg.h"
#include "i915_request.h"
@@ -123,8 +122,6 @@ hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
return "unknown";
}
-void intel_engines_set_scheduler_caps(struct drm_i915_private *i915);
-
static inline unsigned int
execlists_num_ports(const struct intel_engine_execlists * const execlists)
{
@@ -139,9 +136,6 @@ execlists_active(const struct intel_engine_execlists *execlists)
return READ_ONCE(*execlists->active);
}
-void
-execlists_cancel_port_requests(struct intel_engine_execlists * const execlists);
-
struct i915_request *
execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
@@ -199,9 +193,7 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
#define CNL_HWS_CSB_WRITE_INDEX 0x2f
struct intel_ring *
-intel_engine_create_ring(struct intel_engine_cs *engine,
- struct intel_timeline *timeline,
- int size);
+intel_engine_create_ring(struct intel_engine_cs *engine, int size);
int intel_ring_pin(struct intel_ring *ring);
void intel_ring_reset(struct intel_ring *ring, u32 tail);
unsigned int intel_ring_update_space(struct intel_ring *ring);
@@ -343,9 +335,6 @@ void intel_engine_init_execlists(struct intel_engine_cs *engine);
void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
-void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine);
-void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine);
-
void intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
@@ -423,7 +412,6 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine);
bool intel_engines_are_idle(struct intel_gt *gt);
void intel_engines_reset_default_submission(struct intel_gt *gt);
-unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915);
bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
@@ -432,9 +420,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
struct drm_printer *m,
const char *header, ...);
-struct intel_engine_cs *
-intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance);
-
static inline void intel_engine_context_in(struct intel_engine_cs *engine)
{
unsigned long flags;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 65cbf1d9118d..82630db0394b 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -32,6 +32,8 @@
#include "intel_engine.h"
#include "intel_engine_pm.h"
+#include "intel_engine_pool.h"
+#include "intel_engine_user.h"
#include "intel_context.h"
#include "intel_lrc.h"
#include "intel_reset.h"
@@ -53,30 +55,6 @@
#define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE)
-struct engine_class_info {
- const char *name;
- u8 uabi_class;
-};
-
-static const struct engine_class_info intel_engine_classes[] = {
- [RENDER_CLASS] = {
- .name = "rcs",
- .uabi_class = I915_ENGINE_CLASS_RENDER,
- },
- [COPY_ENGINE_CLASS] = {
- .name = "bcs",
- .uabi_class = I915_ENGINE_CLASS_COPY,
- },
- [VIDEO_DECODE_CLASS] = {
- .name = "vcs",
- .uabi_class = I915_ENGINE_CLASS_VIDEO,
- },
- [VIDEO_ENHANCEMENT_CLASS] = {
- .name = "vecs",
- .uabi_class = I915_ENGINE_CLASS_VIDEO_ENHANCE,
- },
-};
-
#define MAX_MMIO_BASES 3
struct engine_info {
unsigned int hw_id;
@@ -186,6 +164,7 @@ u32 intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
default:
MISSING_CASE(INTEL_GEN(dev_priv));
return DEFAULT_LR_CONTEXT_RENDER_SIZE;
+ case 12:
case 11:
return GEN11_LR_CONTEXT_RENDER_SIZE;
case 10:
@@ -257,11 +236,16 @@ static u32 __engine_mmio_base(struct drm_i915_private *i915,
return bases[i].base;
}
-static void __sprint_engine_name(char *name, const struct engine_info *info)
+static void __sprint_engine_name(struct intel_engine_cs *engine)
{
- WARN_ON(snprintf(name, INTEL_ENGINE_CS_MAX_NAME, "%s%u",
- intel_engine_classes[info->class].name,
- info->instance) >= INTEL_ENGINE_CS_MAX_NAME);
+ /*
+ * Before we know what the uABI name for this engine will be,
+ * we still would like to keep track of this engine in the debug logs.
+ * We throw in a ' here as a reminder that this isn't its final name.
+ */
+ GEM_WARN_ON(snprintf(engine->name, sizeof(engine->name), "%s'%u",
+ intel_engine_class_repr(engine->class),
+ engine->instance) >= sizeof(engine->name));
}
void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask)
@@ -285,15 +269,11 @@ static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
intel_engine_set_hwsp_writemask(engine, ~0u);
}
-static int
-intel_engine_setup(struct drm_i915_private *dev_priv,
- enum intel_engine_id id)
+static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
{
const struct engine_info *info = &intel_engines[id];
struct intel_engine_cs *engine;
- GEM_BUG_ON(info->class >= ARRAY_SIZE(intel_engine_classes));
-
BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
@@ -303,10 +283,9 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
if (GEM_DEBUG_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
return -EINVAL;
- if (GEM_DEBUG_WARN_ON(dev_priv->engine_class[info->class][info->instance]))
+ if (GEM_DEBUG_WARN_ON(gt->engine_class[info->class][info->instance]))
return -EINVAL;
- GEM_BUG_ON(dev_priv->engine[id]);
engine = kzalloc(sizeof(*engine), GFP_KERNEL);
if (!engine)
return -ENOMEM;
@@ -315,14 +294,15 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
engine->id = id;
engine->mask = BIT(id);
- engine->i915 = dev_priv;
- engine->gt = &dev_priv->gt;
- engine->uncore = &dev_priv->uncore;
- __sprint_engine_name(engine->name, info);
+ engine->i915 = gt->i915;
+ engine->gt = gt;
+ engine->uncore = gt->uncore;
engine->hw_id = engine->guc_id = info->hw_id;
- engine->mmio_base = __engine_mmio_base(dev_priv, info->mmio_bases);
+ engine->mmio_base = __engine_mmio_base(gt->i915, info->mmio_bases);
+
engine->class = info->class;
engine->instance = info->instance;
+ __sprint_engine_name(engine);
/*
* To be overridden by the backend on setup. However to facilitate
@@ -330,14 +310,12 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
*/
engine->destroy = (typeof(engine->destroy))kfree;
- engine->uabi_class = intel_engine_classes[info->class].uabi_class;
-
- engine->context_size = intel_engine_context_size(dev_priv,
+ engine->context_size = intel_engine_context_size(gt->i915,
engine->class);
if (WARN_ON(engine->context_size > BIT(20)))
engine->context_size = 0;
if (engine->context_size)
- DRIVER_CAPS(dev_priv)->has_logical_contexts = true;
+ DRIVER_CAPS(gt->i915)->has_logical_contexts = true;
/* Nothing to do here, execute in order of dependencies */
engine->schedule = NULL;
@@ -349,8 +327,11 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
/* Scrub mmio state on takeover */
intel_engine_sanitize_mmio(engine);
- dev_priv->engine_class[info->class][info->instance] = engine;
- dev_priv->engine[id] = engine;
+ gt->engine_class[info->class][info->instance] = engine;
+
+ intel_engine_add_user(engine);
+ gt->i915->engine[id] = engine;
+
return 0;
}
@@ -426,14 +407,14 @@ int intel_engines_init_mmio(struct drm_i915_private *i915)
WARN_ON(engine_mask &
GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES));
- if (i915_inject_probe_failure())
+ if (i915_inject_probe_failure(i915))
return -ENODEV;
for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
if (!HAS_ENGINE(i915, i))
continue;
- err = intel_engine_setup(i915, i);
+ err = intel_engine_setup(&i915->gt, i);
if (err)
goto cleanup;
@@ -492,11 +473,6 @@ cleanup:
return err;
}
-static void intel_engine_init_batch_pool(struct intel_engine_cs *engine)
-{
- i915_gem_batch_pool_init(&engine->batch_pool, engine);
-}
-
void intel_engine_init_execlists(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
@@ -622,10 +598,11 @@ static int intel_engine_setup_common(struct intel_engine_cs *engine)
intel_engine_init_breadcrumbs(engine);
intel_engine_init_execlists(engine);
intel_engine_init_hangcheck(engine);
- intel_engine_init_batch_pool(engine);
intel_engine_init_cmd_parser(engine);
intel_engine_init__pm(engine);
+ intel_engine_pool_init(&engine->pool);
+
/* Use the whole device by default */
engine->sseu =
intel_sseu_from_device_info(&RUNTIME_INFO(engine->i915)->sseu);
@@ -680,47 +657,6 @@ cleanup:
return err;
}
-void intel_engines_set_scheduler_caps(struct drm_i915_private *i915)
-{
- static const struct {
- u8 engine;
- u8 sched;
- } map[] = {
-#define MAP(x, y) { ilog2(I915_ENGINE_##x), ilog2(I915_SCHEDULER_CAP_##y) }
- MAP(HAS_PREEMPTION, PREEMPTION),
- MAP(HAS_SEMAPHORES, SEMAPHORES),
- MAP(SUPPORTS_STATS, ENGINE_BUSY_STATS),
-#undef MAP
- };
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- u32 enabled, disabled;
-
- enabled = 0;
- disabled = 0;
- for_each_engine(engine, i915, id) { /* all engines must agree! */
- int i;
-
- if (engine->schedule)
- enabled |= (I915_SCHEDULER_CAP_ENABLED |
- I915_SCHEDULER_CAP_PRIORITY);
- else
- disabled |= (I915_SCHEDULER_CAP_ENABLED |
- I915_SCHEDULER_CAP_PRIORITY);
-
- for (i = 0; i < ARRAY_SIZE(map); i++) {
- if (engine->flags & BIT(map[i].engine))
- enabled |= BIT(map[i].sched);
- else
- disabled |= BIT(map[i].sched);
- }
- }
-
- i915->caps.scheduler = enabled & ~disabled;
- if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED))
- i915->caps.scheduler = 0;
-}
-
struct measure_breadcrumb {
struct i915_request rq;
struct intel_timeline timeline;
@@ -744,8 +680,6 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
engine->status_page.vma))
goto out_frame;
- INIT_LIST_HEAD(&frame->ring.request_list);
- frame->ring.timeline = &frame->timeline;
frame->ring.vaddr = frame->cs;
frame->ring.size = sizeof(frame->cs);
frame->ring.effective_size = frame->ring.size;
@@ -772,26 +706,6 @@ out_frame:
return dw;
}
-static int pin_context(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- struct intel_context **out)
-{
- struct intel_context *ce;
- int err;
-
- ce = i915_gem_context_get_engine(ctx, engine->id);
- if (IS_ERR(ce))
- return PTR_ERR(ce);
-
- err = intel_context_pin(ce);
- intel_context_put(ce);
- if (err)
- return err;
-
- *out = ce;
- return 0;
-}
-
void
intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass)
{
@@ -813,6 +727,27 @@ intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass)
#endif
}
+static struct intel_context *
+create_kernel_context(struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+ int err;
+
+ ce = intel_context_create(engine->i915->kernel_context, engine);
+ if (IS_ERR(ce))
+ return ce;
+
+ ce->ring = __intel_context_ring_size(SZ_4K);
+
+ err = intel_context_pin(ce);
+ if (err) {
+ intel_context_put(ce);
+ return ERR_PTR(err);
+ }
+
+ return ce;
+}
+
/**
* intel_engines_init_common - initialize cengine state which might require hw access
* @engine: Engine to initialize.
@@ -826,22 +761,24 @@ intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass)
*/
int intel_engine_init_common(struct intel_engine_cs *engine)
{
- struct drm_i915_private *i915 = engine->i915;
+ struct intel_context *ce;
int ret;
engine->set_default_submission(engine);
- /* We may need to do things with the shrinker which
+ /*
+ * We may need to do things with the shrinker which
* require us to immediately switch back to the default
* context. This can cause a problem as pinning the
* default context also requires GTT space which may not
* be available. To avoid this we always pin the default
* context.
*/
- ret = pin_context(i915->kernel_context, engine,
- &engine->kernel_context);
- if (ret)
- return ret;
+ ce = create_kernel_context(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ engine->kernel_context = ce;
ret = measure_breadcrumb_dw(engine);
if (ret < 0)
@@ -852,7 +789,8 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
return 0;
err_unpin:
- intel_context_unpin(engine->kernel_context);
+ intel_context_unpin(ce);
+ intel_context_put(ce);
return ret;
}
@@ -869,14 +807,15 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
cleanup_status_page(engine);
+ intel_engine_pool_fini(&engine->pool);
intel_engine_fini_breadcrumbs(engine);
intel_engine_cleanup_cmd_parser(engine);
- i915_gem_batch_pool_fini(&engine->batch_pool);
if (engine->default_state)
i915_gem_object_put(engine->default_state);
intel_context_unpin(engine->kernel_context);
+ intel_context_put(engine->kernel_context);
GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
intel_wa_list_free(&engine->ctx_wa_list);
@@ -1069,16 +1008,12 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
static bool ring_is_idle(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
- intel_wakeref_t wakeref;
bool idle = true;
if (I915_SELFTEST_ONLY(!engine->mmio_base))
return true;
- /* If the whole device is asleep, the engine must be idle */
- wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
- if (!wakeref)
+ if (!intel_engine_pm_get_if_awake(engine))
return true;
/* First check that no commands are left in the ring */
@@ -1087,11 +1022,11 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
idle = false;
/* No bit for gen2, so assume the CS parser is idle */
- if (INTEL_GEN(dev_priv) > 2 &&
+ if (INTEL_GEN(engine->i915) > 2 &&
!(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE))
idle = false;
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_engine_pm_put(engine);
return idle;
}
@@ -1190,20 +1125,6 @@ bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
}
}
-unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- unsigned int which;
-
- which = 0;
- for_each_engine(engine, i915, id)
- if (engine->default_state)
- which |= BIT(engine->uabi_class);
-
- return which;
-}
-
static int print_sched_attr(struct drm_i915_private *i915,
const struct i915_sched_attr *attr,
char *buf, int x, int len)
@@ -1281,7 +1202,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
unsigned long flags;
u64 addr;
- if (engine->id == RCS0 && IS_GEN_RANGE(dev_priv, 4, 7))
+ if (engine->id == RENDER_CLASS && IS_GEN_RANGE(dev_priv, 4, 7))
drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID));
drm_printf(m, "\tRING_START: 0x%08x\n",
ENGINE_READ(engine, RING_START));
@@ -1483,6 +1404,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
}
spin_unlock_irqrestore(&engine->active.lock, flags);
+ drm_printf(m, "\tMMIO base: 0x%08x\n", engine->mmio_base);
wakeref = intel_runtime_pm_get_if_in_use(&engine->i915->runtime_pm);
if (wakeref) {
intel_engine_print_registers(engine, m);
@@ -1501,29 +1423,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
intel_engine_print_breadcrumbs(engine, m);
}
-static u8 user_class_map[] = {
- [I915_ENGINE_CLASS_RENDER] = RENDER_CLASS,
- [I915_ENGINE_CLASS_COPY] = COPY_ENGINE_CLASS,
- [I915_ENGINE_CLASS_VIDEO] = VIDEO_DECODE_CLASS,
- [I915_ENGINE_CLASS_VIDEO_ENHANCE] = VIDEO_ENHANCEMENT_CLASS,
-};
-
-struct intel_engine_cs *
-intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
-{
- if (class >= ARRAY_SIZE(user_class_map))
- return NULL;
-
- class = user_class_map[class];
-
- GEM_BUG_ON(class > MAX_ENGINE_CLASS);
-
- if (instance > MAX_ENGINE_INSTANCE)
- return NULL;
-
- return i915->engine_class[class][instance];
-}
-
/**
* intel_enable_engine_stats() - Enable engine busy tracking on engine
* @engine: engine to enable stats collection
@@ -1561,7 +1460,7 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
for (port = execlists->pending; (rq = *port); port++) {
/* Exclude any contexts already counted in active */
- if (intel_context_inflight_count(rq->hw_context) == 1)
+ if (!intel_context_inflight_count(rq->hw_context))
engine->stats.active++;
}
@@ -1675,5 +1574,7 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "mock_engine.c"
+#include "selftest_engine.c"
#include "selftest_engine_cs.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index e74fbf04a68d..a372d4ea9370 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -8,6 +8,7 @@
#include "intel_engine.h"
#include "intel_engine_pm.h"
+#include "intel_engine_pool.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
@@ -36,28 +37,34 @@ static int __engine_unpark(struct intel_wakeref *wf)
return 0;
}
-void intel_engine_pm_get(struct intel_engine_cs *engine)
+#if IS_ENABLED(CONFIG_LOCKDEP)
+
+static inline void __timeline_mark_lock(struct intel_context *ce)
{
- intel_wakeref_get(&engine->i915->runtime_pm, &engine->wakeref, __engine_unpark);
+ unsigned long flags;
+
+ local_irq_save(flags);
+ mutex_acquire(&ce->timeline->mutex.dep_map, 2, 0, _THIS_IP_);
+ local_irq_restore(flags);
}
-void intel_engine_park(struct intel_engine_cs *engine)
+static inline void __timeline_mark_unlock(struct intel_context *ce)
{
- /*
- * We are committed now to parking this engine, make sure there
- * will be no more interrupts arriving later and the engine
- * is truly idle.
- */
- if (wait_for(intel_engine_is_idle(engine), 10)) {
- struct drm_printer p = drm_debug_printer(__func__);
+ mutex_release(&ce->timeline->mutex.dep_map, 0, _THIS_IP_);
+}
- dev_err(engine->i915->drm.dev,
- "%s is not idle before parking\n",
- engine->name);
- intel_engine_dump(engine, &p, NULL);
- }
+#else
+
+static inline void __timeline_mark_lock(struct intel_context *ce)
+{
+}
+
+static inline void __timeline_mark_unlock(struct intel_context *ce)
+{
}
+#endif /* !IS_ENABLED(CONFIG_LOCKDEP) */
+
static bool switch_to_kernel_context(struct intel_engine_cs *engine)
{
struct i915_request *rq;
@@ -82,17 +89,29 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
* retiring the last request, thus all rings should be empty and
* all timelines idle.
*/
+ __timeline_mark_lock(engine->kernel_context);
+
rq = __i915_request_create(engine->kernel_context, GFP_NOWAIT);
if (IS_ERR(rq))
/* Context switch failed, hope for the best! Maybe reset? */
return true;
+ intel_timeline_enter(rq->timeline);
+
/* Check again on the next retirement. */
engine->wakeref_serial = engine->serial + 1;
+ i915_request_add_active_barriers(rq);
- i915_request_add_barriers(rq);
+ /* Install ourselves as a preemption barrier */
+ rq->sched.attr.priority = I915_PRIORITY_UNPREEMPTABLE;
__i915_request_commit(rq);
+ /* Release our exclusive hold on the engine */
+ __intel_wakeref_defer_park(&engine->wakeref);
+ __i915_request_queue(rq, NULL);
+
+ __timeline_mark_unlock(engine->kernel_context);
+
return false;
}
@@ -116,6 +135,7 @@ static int __engine_park(struct intel_wakeref *wf)
GEM_TRACE("%s\n", engine->name);
intel_engine_disarm_breadcrumbs(engine);
+ intel_engine_pool_park(&engine->pool);
/* Must be reset upon idling, or we may miss the busy wakeup. */
GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN);
@@ -134,12 +154,18 @@ static int __engine_park(struct intel_wakeref *wf)
return 0;
}
-void intel_engine_pm_put(struct intel_engine_cs *engine)
-{
- intel_wakeref_put(&engine->i915->runtime_pm, &engine->wakeref, __engine_park);
-}
+static const struct intel_wakeref_ops wf_ops = {
+ .get = __engine_unpark,
+ .put = __engine_park,
+};
void intel_engine_init__pm(struct intel_engine_cs *engine)
{
- intel_wakeref_init(&engine->wakeref);
+ struct intel_runtime_pm *rpm = &engine->i915->runtime_pm;
+
+ intel_wakeref_init(&engine->wakeref, rpm, &wf_ops);
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_engine_pm.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
index 015ac72d7ad0..739c50fefcef 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
@@ -10,24 +10,26 @@
#include "intel_engine_types.h"
#include "intel_wakeref.h"
-struct drm_i915_private;
-
-void intel_engine_pm_get(struct intel_engine_cs *engine);
-void intel_engine_pm_put(struct intel_engine_cs *engine);
-
static inline bool
intel_engine_pm_is_awake(const struct intel_engine_cs *engine)
{
return intel_wakeref_is_active(&engine->wakeref);
}
-static inline bool
-intel_engine_pm_get_if_awake(struct intel_engine_cs *engine)
+static inline void intel_engine_pm_get(struct intel_engine_cs *engine)
+{
+ intel_wakeref_get(&engine->wakeref);
+}
+
+static inline bool intel_engine_pm_get_if_awake(struct intel_engine_cs *engine)
{
return intel_wakeref_get_if_active(&engine->wakeref);
}
-void intel_engine_park(struct intel_engine_cs *engine);
+static inline void intel_engine_pm_put(struct intel_engine_cs *engine)
+{
+ intel_wakeref_put(&engine->wakeref);
+}
void intel_engine_init__pm(struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.c b/drivers/gpu/drm/i915/gt/intel_engine_pool.c
new file mode 100644
index 000000000000..4cd54c569911
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pool.c
@@ -0,0 +1,177 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#include "gem/i915_gem_object.h"
+
+#include "i915_drv.h"
+#include "intel_engine_pm.h"
+#include "intel_engine_pool.h"
+
+static struct intel_engine_cs *to_engine(struct intel_engine_pool *pool)
+{
+ return container_of(pool, struct intel_engine_cs, pool);
+}
+
+static struct list_head *
+bucket_for_size(struct intel_engine_pool *pool, size_t sz)
+{
+ int n;
+
+ /*
+ * Compute a power-of-two bucket, but throw everything greater than
+ * 16KiB into the same bucket: i.e. the buckets hold objects of
+ * (1 page, 2 pages, 4 pages, 8+ pages).
+ */
+ n = fls(sz >> PAGE_SHIFT) - 1;
+ if (n >= ARRAY_SIZE(pool->cache_list))
+ n = ARRAY_SIZE(pool->cache_list) - 1;
+
+ return &pool->cache_list[n];
+}
+
+static void node_free(struct intel_engine_pool_node *node)
+{
+ i915_gem_object_put(node->obj);
+ i915_active_fini(&node->active);
+ kfree(node);
+}
+
+static int pool_active(struct i915_active *ref)
+{
+ struct intel_engine_pool_node *node =
+ container_of(ref, typeof(*node), active);
+ struct dma_resv *resv = node->obj->base.resv;
+ int err;
+
+ if (dma_resv_trylock(resv)) {
+ dma_resv_add_excl_fence(resv, NULL);
+ dma_resv_unlock(resv);
+ }
+
+ err = i915_gem_object_pin_pages(node->obj);
+ if (err)
+ return err;
+
+ /* Hide this pinned object from the shrinker until retired */
+ i915_gem_object_make_unshrinkable(node->obj);
+
+ return 0;
+}
+
+static void pool_retire(struct i915_active *ref)
+{
+ struct intel_engine_pool_node *node =
+ container_of(ref, typeof(*node), active);
+ struct intel_engine_pool *pool = node->pool;
+ struct list_head *list = bucket_for_size(pool, node->obj->base.size);
+ unsigned long flags;
+
+ GEM_BUG_ON(!intel_engine_pm_is_awake(to_engine(pool)));
+
+ i915_gem_object_unpin_pages(node->obj);
+
+ /* Return this object to the shrinker pool */
+ i915_gem_object_make_purgeable(node->obj);
+
+ spin_lock_irqsave(&pool->lock, flags);
+ list_add(&node->link, list);
+ spin_unlock_irqrestore(&pool->lock, flags);
+}
+
+static struct intel_engine_pool_node *
+node_create(struct intel_engine_pool *pool, size_t sz)
+{
+ struct intel_engine_cs *engine = to_engine(pool);
+ struct intel_engine_pool_node *node;
+ struct drm_i915_gem_object *obj;
+
+ node = kmalloc(sizeof(*node),
+ GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+ if (!node)
+ return ERR_PTR(-ENOMEM);
+
+ node->pool = pool;
+ i915_active_init(engine->i915, &node->active, pool_active, pool_retire);
+
+ obj = i915_gem_object_create_internal(engine->i915, sz);
+ if (IS_ERR(obj)) {
+ i915_active_fini(&node->active);
+ kfree(node);
+ return ERR_CAST(obj);
+ }
+
+ node->obj = obj;
+ return node;
+}
+
+struct intel_engine_pool_node *
+intel_engine_pool_get(struct intel_engine_pool *pool, size_t size)
+{
+ struct intel_engine_pool_node *node;
+ struct list_head *list;
+ unsigned long flags;
+ int ret;
+
+ GEM_BUG_ON(!intel_engine_pm_is_awake(to_engine(pool)));
+
+ size = PAGE_ALIGN(size);
+ list = bucket_for_size(pool, size);
+
+ spin_lock_irqsave(&pool->lock, flags);
+ list_for_each_entry(node, list, link) {
+ if (node->obj->base.size < size)
+ continue;
+ list_del(&node->link);
+ break;
+ }
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+ if (&node->link == list) {
+ node = node_create(pool, size);
+ if (IS_ERR(node))
+ return node;
+ }
+
+ ret = i915_active_acquire(&node->active);
+ if (ret) {
+ node_free(node);
+ return ERR_PTR(ret);
+ }
+
+ return node;
+}
+
+void intel_engine_pool_init(struct intel_engine_pool *pool)
+{
+ int n;
+
+ spin_lock_init(&pool->lock);
+ for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
+ INIT_LIST_HEAD(&pool->cache_list[n]);
+}
+
+void intel_engine_pool_park(struct intel_engine_pool *pool)
+{
+ int n;
+
+ for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
+ struct list_head *list = &pool->cache_list[n];
+ struct intel_engine_pool_node *node, *nn;
+
+ list_for_each_entry_safe(node, nn, list, link)
+ node_free(node);
+
+ INIT_LIST_HEAD(list);
+ }
+}
+
+void intel_engine_pool_fini(struct intel_engine_pool *pool)
+{
+ int n;
+
+ for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
+ GEM_BUG_ON(!list_empty(&pool->cache_list[n]));
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.h b/drivers/gpu/drm/i915/gt/intel_engine_pool.h
new file mode 100644
index 000000000000..8d069efd9457
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pool.h
@@ -0,0 +1,34 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#ifndef INTEL_ENGINE_POOL_H
+#define INTEL_ENGINE_POOL_H
+
+#include "intel_engine_pool_types.h"
+#include "i915_active.h"
+#include "i915_request.h"
+
+struct intel_engine_pool_node *
+intel_engine_pool_get(struct intel_engine_pool *pool, size_t size);
+
+static inline int
+intel_engine_pool_mark_active(struct intel_engine_pool_node *node,
+ struct i915_request *rq)
+{
+ return i915_active_ref(&node->active, rq->timeline, rq);
+}
+
+static inline void
+intel_engine_pool_put(struct intel_engine_pool_node *node)
+{
+ i915_active_release(&node->active);
+}
+
+void intel_engine_pool_init(struct intel_engine_pool *pool);
+void intel_engine_pool_park(struct intel_engine_pool *pool);
+void intel_engine_pool_fini(struct intel_engine_pool *pool);
+
+#endif /* INTEL_ENGINE_POOL_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool_types.h b/drivers/gpu/drm/i915/gt/intel_engine_pool_types.h
new file mode 100644
index 000000000000..e31ee361b76f
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pool_types.h
@@ -0,0 +1,29 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#ifndef INTEL_ENGINE_POOL_TYPES_H
+#define INTEL_ENGINE_POOL_TYPES_H
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
+#include "i915_active_types.h"
+
+struct drm_i915_gem_object;
+
+struct intel_engine_pool {
+ spinlock_t lock;
+ struct list_head cache_list[4];
+};
+
+struct intel_engine_pool_node {
+ struct i915_active active;
+ struct drm_i915_gem_object *obj;
+ struct list_head link;
+ struct intel_engine_pool *pool;
+};
+
+#endif /* INTEL_ENGINE_POOL_TYPES_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index da61dd329210..a82cea95c2f2 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -12,19 +12,40 @@
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/llist.h>
+#include <linux/rbtree.h>
#include <linux/timer.h>
#include <linux/types.h>
#include "i915_gem.h"
-#include "i915_gem_batch_pool.h"
#include "i915_pmu.h"
#include "i915_priolist_types.h"
#include "i915_selftest.h"
-#include "gt/intel_timeline_types.h"
+#include "intel_engine_pool_types.h"
#include "intel_sseu.h"
+#include "intel_timeline_types.h"
#include "intel_wakeref.h"
#include "intel_workarounds_types.h"
+/* Legacy HW Engine ID */
+
+#define RCS0_HW 0
+#define VCS0_HW 1
+#define BCS0_HW 2
+#define VECS0_HW 3
+#define VCS1_HW 4
+#define VCS2_HW 6
+#define VCS3_HW 7
+#define VECS1_HW 12
+
+/* Gen11+ HW Engine class + instance */
+#define RENDER_CLASS 0
+#define VIDEO_DECODE_CLASS 1
+#define VIDEO_ENHANCEMENT_CLASS 2
+#define COPY_ENGINE_CLASS 3
+#define OTHER_CLASS 4
+#define MAX_ENGINE_CLASS 4
+#define MAX_ENGINE_INSTANCE 3
+
#define I915_MAX_SLICES 3
#define I915_MAX_SUBSLICES 8
@@ -68,10 +89,6 @@ struct intel_ring {
struct i915_vma *vma;
void *vaddr;
- struct intel_timeline *timeline;
- struct list_head request_list;
- struct list_head active_link;
-
/*
* As we have two types of rings, one global to the engine used
* by ringbuffer submission and those that are exclusive to a
@@ -208,6 +225,16 @@ struct intel_engine_execlists {
unsigned int port_mask;
/**
+ * @switch_priority_hint: Second context priority.
+ *
+ * We submit multiple contexts to the HW simultaneously and would
+ * like to occasionally switch between them to emulate timeslicing.
+ * To know when timeslicing is suitable, we track the priority of
+ * the context submitted second.
+ */
+ int switch_priority_hint;
+
+ /**
* @queue_priority_hint: Highest pending priority.
*
* When we add requests into the queue, or adjust the priority of
@@ -263,22 +290,27 @@ struct intel_engine_cs {
char name[INTEL_ENGINE_CS_MAX_NAME];
enum intel_engine_id id;
+ enum intel_engine_id legacy_idx;
+
unsigned int hw_id;
unsigned int guc_id;
- intel_engine_mask_t mask;
- u8 uabi_class;
+ intel_engine_mask_t mask;
u8 class;
u8 instance;
+
+ u8 uabi_class;
+ u8 uabi_instance;
+
u32 context_size;
u32 mmio_base;
u32 uabi_capabilities;
- struct intel_sseu sseu;
+ struct rb_node uabi_node;
- struct intel_ring *buffer;
+ struct intel_sseu sseu;
struct {
spinlock_t lock;
@@ -298,6 +330,11 @@ struct intel_engine_cs {
struct drm_i915_gem_object *default_state;
void *pinned_default_state;
+ struct {
+ struct intel_ring *ring;
+ struct intel_timeline *timeline;
+ } legacy;
+
/* Rather than have every client wait upon all user interrupts,
* with the herd waking after every interrupt and each doing the
* heavyweight seqno dance, we delegate the task (of being the
@@ -354,7 +391,7 @@ struct intel_engine_cs {
* when the command parser is enabled. Prevents the client from
* modifying the batch contents after software parsing.
*/
- struct i915_gem_batch_pool batch_pool;
+ struct intel_engine_pool pool;
struct intel_hw_status_page status_page;
struct i915_ctx_workarounds wa_ctx;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.c b/drivers/gpu/drm/i915/gt/intel_engine_user.c
new file mode 100644
index 000000000000..77cd5de83930
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_user.c
@@ -0,0 +1,303 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/list.h>
+#include <linux/list_sort.h>
+#include <linux/llist.h>
+
+#include "i915_drv.h"
+#include "intel_engine.h"
+#include "intel_engine_user.h"
+
+struct intel_engine_cs *
+intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
+{
+ struct rb_node *p = i915->uabi_engines.rb_node;
+
+ while (p) {
+ struct intel_engine_cs *it =
+ rb_entry(p, typeof(*it), uabi_node);
+
+ if (class < it->uabi_class)
+ p = p->rb_left;
+ else if (class > it->uabi_class ||
+ instance > it->uabi_instance)
+ p = p->rb_right;
+ else if (instance < it->uabi_instance)
+ p = p->rb_left;
+ else
+ return it;
+ }
+
+ return NULL;
+}
+
+void intel_engine_add_user(struct intel_engine_cs *engine)
+{
+ llist_add((struct llist_node *)&engine->uabi_node,
+ (struct llist_head *)&engine->i915->uabi_engines);
+}
+
+static const u8 uabi_classes[] = {
+ [RENDER_CLASS] = I915_ENGINE_CLASS_RENDER,
+ [COPY_ENGINE_CLASS] = I915_ENGINE_CLASS_COPY,
+ [VIDEO_DECODE_CLASS] = I915_ENGINE_CLASS_VIDEO,
+ [VIDEO_ENHANCEMENT_CLASS] = I915_ENGINE_CLASS_VIDEO_ENHANCE,
+};
+
+static int engine_cmp(void *priv, struct list_head *A, struct list_head *B)
+{
+ const struct intel_engine_cs *a =
+ container_of((struct rb_node *)A, typeof(*a), uabi_node);
+ const struct intel_engine_cs *b =
+ container_of((struct rb_node *)B, typeof(*b), uabi_node);
+
+ if (uabi_classes[a->class] < uabi_classes[b->class])
+ return -1;
+ if (uabi_classes[a->class] > uabi_classes[b->class])
+ return 1;
+
+ if (a->instance < b->instance)
+ return -1;
+ if (a->instance > b->instance)
+ return 1;
+
+ return 0;
+}
+
+static struct llist_node *get_engines(struct drm_i915_private *i915)
+{
+ return llist_del_all((struct llist_head *)&i915->uabi_engines);
+}
+
+static void sort_engines(struct drm_i915_private *i915,
+ struct list_head *engines)
+{
+ struct llist_node *pos, *next;
+
+ llist_for_each_safe(pos, next, get_engines(i915)) {
+ struct intel_engine_cs *engine =
+ container_of((struct rb_node *)pos, typeof(*engine),
+ uabi_node);
+ list_add((struct list_head *)&engine->uabi_node, engines);
+ }
+ list_sort(NULL, engines, engine_cmp);
+}
+
+static void set_scheduler_caps(struct drm_i915_private *i915)
+{
+ static const struct {
+ u8 engine;
+ u8 sched;
+ } map[] = {
+#define MAP(x, y) { ilog2(I915_ENGINE_##x), ilog2(I915_SCHEDULER_CAP_##y) }
+ MAP(HAS_PREEMPTION, PREEMPTION),
+ MAP(HAS_SEMAPHORES, SEMAPHORES),
+ MAP(SUPPORTS_STATS, ENGINE_BUSY_STATS),
+#undef MAP
+ };
+ struct intel_engine_cs *engine;
+ u32 enabled, disabled;
+
+ enabled = 0;
+ disabled = 0;
+ for_each_uabi_engine(engine, i915) { /* all engines must agree! */
+ int i;
+
+ if (engine->schedule)
+ enabled |= (I915_SCHEDULER_CAP_ENABLED |
+ I915_SCHEDULER_CAP_PRIORITY);
+ else
+ disabled |= (I915_SCHEDULER_CAP_ENABLED |
+ I915_SCHEDULER_CAP_PRIORITY);
+
+ for (i = 0; i < ARRAY_SIZE(map); i++) {
+ if (engine->flags & BIT(map[i].engine))
+ enabled |= BIT(map[i].sched);
+ else
+ disabled |= BIT(map[i].sched);
+ }
+ }
+
+ i915->caps.scheduler = enabled & ~disabled;
+ if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED))
+ i915->caps.scheduler = 0;
+}
+
+const char *intel_engine_class_repr(u8 class)
+{
+ static const char * const uabi_names[] = {
+ [RENDER_CLASS] = "rcs",
+ [COPY_ENGINE_CLASS] = "bcs",
+ [VIDEO_DECODE_CLASS] = "vcs",
+ [VIDEO_ENHANCEMENT_CLASS] = "vecs",
+ };
+
+ if (class >= ARRAY_SIZE(uabi_names) || !uabi_names[class])
+ return "xxx";
+
+ return uabi_names[class];
+}
+
+struct legacy_ring {
+ struct intel_gt *gt;
+ u8 class;
+ u8 instance;
+};
+
+static int legacy_ring_idx(const struct legacy_ring *ring)
+{
+ static const struct {
+ u8 base, max;
+ } map[] = {
+ [RENDER_CLASS] = { RCS0, 1 },
+ [COPY_ENGINE_CLASS] = { BCS0, 1 },
+ [VIDEO_DECODE_CLASS] = { VCS0, I915_MAX_VCS },
+ [VIDEO_ENHANCEMENT_CLASS] = { VECS0, I915_MAX_VECS },
+ };
+
+ if (GEM_DEBUG_WARN_ON(ring->class >= ARRAY_SIZE(map)))
+ return -1;
+
+ if (GEM_DEBUG_WARN_ON(ring->instance >= map[ring->class].max))
+ return -1;
+
+ return map[ring->class].base + ring->instance;
+}
+
+static void add_legacy_ring(struct legacy_ring *ring,
+ struct intel_engine_cs *engine)
+{
+ int idx;
+
+ if (engine->gt != ring->gt || engine->class != ring->class) {
+ ring->gt = engine->gt;
+ ring->class = engine->class;
+ ring->instance = 0;
+ }
+
+ idx = legacy_ring_idx(ring);
+ if (unlikely(idx == -1))
+ return;
+
+ GEM_BUG_ON(idx >= ARRAY_SIZE(ring->gt->engine));
+ ring->gt->engine[idx] = engine;
+ ring->instance++;
+
+ engine->legacy_idx = idx;
+}
+
+void intel_engines_driver_register(struct drm_i915_private *i915)
+{
+ struct legacy_ring ring = {};
+ u8 uabi_instances[4] = {};
+ struct list_head *it, *next;
+ struct rb_node **p, *prev;
+ LIST_HEAD(engines);
+
+ sort_engines(i915, &engines);
+
+ prev = NULL;
+ p = &i915->uabi_engines.rb_node;
+ list_for_each_safe(it, next, &engines) {
+ struct intel_engine_cs *engine =
+ container_of((struct rb_node *)it, typeof(*engine),
+ uabi_node);
+ char old[sizeof(engine->name)];
+
+ GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes));
+ engine->uabi_class = uabi_classes[engine->class];
+
+ GEM_BUG_ON(engine->uabi_class >= ARRAY_SIZE(uabi_instances));
+ engine->uabi_instance = uabi_instances[engine->uabi_class]++;
+
+ /* Replace the internal name with the final user facing name */
+ memcpy(old, engine->name, sizeof(engine->name));
+ scnprintf(engine->name, sizeof(engine->name), "%s%u",
+ intel_engine_class_repr(engine->class),
+ engine->uabi_instance);
+ DRM_DEBUG_DRIVER("renamed %s to %s\n", old, engine->name);
+
+ rb_link_node(&engine->uabi_node, prev, p);
+ rb_insert_color(&engine->uabi_node, &i915->uabi_engines);
+
+ GEM_BUG_ON(intel_engine_lookup_user(i915,
+ engine->uabi_class,
+ engine->uabi_instance) != engine);
+
+ /* Fix up the mapping to match default execbuf::user_map[] */
+ add_legacy_ring(&ring, engine);
+
+ prev = &engine->uabi_node;
+ p = &prev->rb_right;
+ }
+
+ if (IS_ENABLED(CONFIG_DRM_I915_SELFTESTS) &&
+ IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
+ struct intel_engine_cs *engine;
+ unsigned int isolation;
+ int class, inst;
+ int errors = 0;
+
+ for (class = 0; class < ARRAY_SIZE(uabi_instances); class++) {
+ for (inst = 0; inst < uabi_instances[class]; inst++) {
+ engine = intel_engine_lookup_user(i915,
+ class, inst);
+ if (!engine) {
+ pr_err("UABI engine not found for { class:%d, instance:%d }\n",
+ class, inst);
+ errors++;
+ continue;
+ }
+
+ if (engine->uabi_class != class ||
+ engine->uabi_instance != inst) {
+ pr_err("Wrong UABI engine:%s { class:%d, instance:%d } found for { class:%d, instance:%d }\n",
+ engine->name,
+ engine->uabi_class,
+ engine->uabi_instance,
+ class, inst);
+ errors++;
+ continue;
+ }
+ }
+ }
+
+ /*
+ * Make sure that classes with multiple engine instances all
+ * share the same basic configuration.
+ */
+ isolation = intel_engines_has_context_isolation(i915);
+ for_each_uabi_engine(engine, i915) {
+ unsigned int bit = BIT(engine->uabi_class);
+ unsigned int expected = engine->default_state ? bit : 0;
+
+ if ((isolation & bit) != expected) {
+ pr_err("mismatching default context state for class %d on engine %s\n",
+ engine->uabi_class, engine->name);
+ errors++;
+ }
+ }
+
+ if (WARN(errors, "Invalid UABI engine mapping found"))
+ i915->uabi_engines = RB_ROOT;
+ }
+
+ set_scheduler_caps(i915);
+}
+
+unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ unsigned int which;
+
+ which = 0;
+ for_each_uabi_engine(engine, i915)
+ if (engine->default_state)
+ which |= BIT(engine->uabi_class);
+
+ return which;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.h b/drivers/gpu/drm/i915/gt/intel_engine_user.h
new file mode 100644
index 000000000000..f845ea1cbfaa
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_user.h
@@ -0,0 +1,25 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_ENGINE_USER_H
+#define INTEL_ENGINE_USER_H
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct intel_engine_cs;
+
+struct intel_engine_cs *
+intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance);
+
+unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915);
+
+void intel_engine_add_user(struct intel_engine_cs *engine);
+void intel_engines_driver_register(struct drm_i915_private *i915);
+
+const char *intel_engine_class_repr(u8 class);
+
+#endif /* INTEL_ENGINE_USER_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
index 69f34737325f..86e00a2db8a4 100644
--- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
@@ -186,11 +186,12 @@
#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
-#define COLOR_BLT_CMD (2<<29 | 0x40<<22 | (5-2))
+#define COLOR_BLT_CMD (2 << 29 | 0x40 << 22 | (5 - 2))
#define XY_COLOR_BLT_CMD (2 << 29 | 0x50 << 22)
-#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
-#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
-#define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5)
+#define SRC_COPY_BLT_CMD (2 << 29 | 0x43 << 22)
+#define GEN9_XY_FAST_COPY_BLT_CMD (2 << 29 | 0x42 << 22)
+#define XY_SRC_COPY_BLT_CMD (2 << 29 | 0x53 << 22)
+#define XY_MONO_SRC_COPY_IMM_BLT (2 << 29 | 0x71 << 22 | 5)
#define BLT_WRITE_A (2<<20)
#define BLT_WRITE_RGB (1<<20)
#define BLT_WRITE_RGBA (BLT_WRITE_RGB | BLT_WRITE_A)
@@ -207,6 +208,8 @@
#define DISPLAY_PLANE_A (0<<20)
#define DISPLAY_PLANE_B (1<<20)
#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|((len)-2))
+#define PIPE_CONTROL_COMMAND_CACHE_INVALIDATE (1<<29) /* gen11+ */
+#define PIPE_CONTROL_TILE_CACHE_FLUSH (1<<28) /* gen11+ */
#define PIPE_CONTROL_FLUSH_L3 (1<<27)
#define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */
#define PIPE_CONTROL_MMIO_WRITE (1<<23)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index f7e69db4019d..d48ec9a76ed1 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -4,7 +4,6 @@
*/
#include "i915_drv.h"
-
#include "intel_gt.h"
#include "intel_gt_pm.h"
#include "intel_uncore.h"
@@ -14,14 +13,15 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
gt->i915 = i915;
gt->uncore = &i915->uncore;
- INIT_LIST_HEAD(&gt->active_rings);
- INIT_LIST_HEAD(&gt->closed_vma);
+ spin_lock_init(&gt->irq_lock);
+ INIT_LIST_HEAD(&gt->closed_vma);
spin_lock_init(&gt->closed_lock);
intel_gt_init_hangcheck(gt);
intel_gt_init_reset(gt);
intel_gt_pm_init_early(gt);
+ intel_uc_init_early(&gt->uc);
}
void intel_gt_init_hw(struct drm_i915_private *i915)
@@ -79,7 +79,10 @@ intel_gt_clear_error_registers(struct intel_gt *gt,
I915_MASTER_ERROR_INTERRUPT);
}
- if (INTEL_GEN(i915) >= 8) {
+ if (INTEL_GEN(i915) >= 12) {
+ rmw_clear(uncore, GEN12_RING_FAULT_REG, RING_FAULT_VALID);
+ intel_uncore_posting_read(uncore, GEN12_RING_FAULT_REG);
+ } else if (INTEL_GEN(i915) >= 8) {
rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID);
intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
} else if (INTEL_GEN(i915) >= 6) {
@@ -117,14 +120,27 @@ static void gen6_check_faults(struct intel_gt *gt)
static void gen8_check_faults(struct intel_gt *gt)
{
struct intel_uncore *uncore = gt->uncore;
- u32 fault = intel_uncore_read(uncore, GEN8_RING_FAULT_REG);
+ i915_reg_t fault_reg, fault_data0_reg, fault_data1_reg;
+ u32 fault;
+ if (INTEL_GEN(gt->i915) >= 12) {
+ fault_reg = GEN12_RING_FAULT_REG;
+ fault_data0_reg = GEN12_FAULT_TLB_DATA0;
+ fault_data1_reg = GEN12_FAULT_TLB_DATA1;
+ } else {
+ fault_reg = GEN8_RING_FAULT_REG;
+ fault_data0_reg = GEN8_FAULT_TLB_DATA0;
+ fault_data1_reg = GEN8_FAULT_TLB_DATA1;
+ }
+
+ fault = intel_uncore_read(uncore, fault_reg);
if (fault & RING_FAULT_VALID) {
u32 fault_data0, fault_data1;
u64 fault_addr;
- fault_data0 = intel_uncore_read(uncore, GEN8_FAULT_TLB_DATA0);
- fault_data1 = intel_uncore_read(uncore, GEN8_FAULT_TLB_DATA1);
+ fault_data0 = intel_uncore_read(uncore, fault_data0_reg);
+ fault_data1 = intel_uncore_read(uncore, fault_data1_reg);
+
fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
((u64)fault_data0 << 12);
@@ -231,7 +247,8 @@ int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
if (ret)
goto err_unref;
- gt->scratch = vma;
+ gt->scratch = i915_vma_make_unshrinkable(vma);
+
return 0;
err_unref:
@@ -244,7 +261,8 @@ void intel_gt_fini_scratch(struct intel_gt *gt)
i915_vma_unpin_and_release(&gt->scratch, 0);
}
-void intel_gt_cleanup_early(struct intel_gt *gt)
+void intel_gt_driver_late_release(struct intel_gt *gt)
{
+ intel_uc_driver_late_release(&gt->uc);
intel_gt_fini_reset(gt);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index 640bb0531f5b..4920cb351f10 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -30,7 +30,7 @@ static inline struct intel_gt *huc_to_gt(struct intel_huc *huc)
void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915);
void intel_gt_init_hw(struct drm_i915_private *i915);
-void intel_gt_cleanup_early(struct intel_gt *gt);
+void intel_gt_driver_late_release(struct intel_gt *gt);
void intel_gt_check_and_clear_faults(struct intel_gt *gt);
void intel_gt_clear_error_registers(struct intel_gt *gt,
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
new file mode 100644
index 000000000000..34a4fb624bf7
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -0,0 +1,455 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/sched/clock.h>
+
+#include "i915_drv.h"
+#include "i915_irq.h"
+#include "intel_gt.h"
+#include "intel_gt_irq.h"
+#include "intel_uncore.h"
+
+static void guc_irq_handler(struct intel_guc *guc, u16 iir)
+{
+ if (iir & GUC_INTR_GUC2HOST)
+ intel_guc_to_host_event_handler(guc);
+}
+
+static void
+cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
+{
+ bool tasklet = false;
+
+ if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
+ tasklet = true;
+
+ if (iir & GT_RENDER_USER_INTERRUPT) {
+ intel_engine_breadcrumbs_irq(engine);
+ tasklet |= intel_engine_needs_breadcrumb_tasklet(engine);
+ }
+
+ if (tasklet)
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+}
+
+static u32
+gen11_gt_engine_identity(struct intel_gt *gt,
+ const unsigned int bank, const unsigned int bit)
+{
+ void __iomem * const regs = gt->uncore->regs;
+ u32 timeout_ts;
+ u32 ident;
+
+ lockdep_assert_held(&gt->irq_lock);
+
+ raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
+
+ /*
+ * NB: Specs do not specify how long to spin wait,
+ * so we do ~100us as an educated guess.
+ */
+ timeout_ts = (local_clock() >> 10) + 100;
+ do {
+ ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank));
+ } while (!(ident & GEN11_INTR_DATA_VALID) &&
+ !time_after32(local_clock() >> 10, timeout_ts));
+
+ if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
+ DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
+ bank, bit, ident);
+ return 0;
+ }
+
+ raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank),
+ GEN11_INTR_DATA_VALID);
+
+ return ident;
+}
+
+static void
+gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
+ const u16 iir)
+{
+ if (instance == OTHER_GUC_INSTANCE)
+ return guc_irq_handler(&gt->uc.guc, iir);
+
+ if (instance == OTHER_GTPM_INSTANCE)
+ return gen11_rps_irq_handler(gt, iir);
+
+ WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
+ instance, iir);
+}
+
+static void
+gen11_engine_irq_handler(struct intel_gt *gt, const u8 class,
+ const u8 instance, const u16 iir)
+{
+ struct intel_engine_cs *engine;
+
+ if (instance <= MAX_ENGINE_INSTANCE)
+ engine = gt->engine_class[class][instance];
+ else
+ engine = NULL;
+
+ if (likely(engine))
+ return cs_irq_handler(engine, iir);
+
+ WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
+ class, instance);
+}
+
+static void
+gen11_gt_identity_handler(struct intel_gt *gt, const u32 identity)
+{
+ const u8 class = GEN11_INTR_ENGINE_CLASS(identity);
+ const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity);
+ const u16 intr = GEN11_INTR_ENGINE_INTR(identity);
+
+ if (unlikely(!intr))
+ return;
+
+ if (class <= COPY_ENGINE_CLASS)
+ return gen11_engine_irq_handler(gt, class, instance, intr);
+
+ if (class == OTHER_CLASS)
+ return gen11_other_irq_handler(gt, instance, intr);
+
+ WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n",
+ class, instance, intr);
+}
+
+static void
+gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank)
+{
+ void __iomem * const regs = gt->uncore->regs;
+ unsigned long intr_dw;
+ unsigned int bit;
+
+ lockdep_assert_held(&gt->irq_lock);
+
+ intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
+
+ for_each_set_bit(bit, &intr_dw, 32) {
+ const u32 ident = gen11_gt_engine_identity(gt, bank, bit);
+
+ gen11_gt_identity_handler(gt, ident);
+ }
+
+ /* Clear must be after shared has been served for engine */
+ raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw);
+}
+
+void gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl)
+{
+ unsigned int bank;
+
+ spin_lock(&gt->irq_lock);
+
+ for (bank = 0; bank < 2; bank++) {
+ if (master_ctl & GEN11_GT_DW_IRQ(bank))
+ gen11_gt_bank_handler(gt, bank);
+ }
+
+ spin_unlock(&gt->irq_lock);
+}
+
+bool gen11_gt_reset_one_iir(struct intel_gt *gt,
+ const unsigned int bank, const unsigned int bit)
+{
+ void __iomem * const regs = gt->uncore->regs;
+ u32 dw;
+
+ lockdep_assert_held(&gt->irq_lock);
+
+ dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
+ if (dw & BIT(bit)) {
+ /*
+ * According to the BSpec, DW_IIR bits cannot be cleared without
+ * first servicing the Selector & Shared IIR registers.
+ */
+ gen11_gt_engine_identity(gt, bank, bit);
+
+ /*
+ * We locked GT INT DW by reading it. If we want to (try
+ * to) recover from this successfully, we need to clear
+ * our bit, otherwise we are locking the register for
+ * everybody.
+ */
+ raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit));
+
+ return true;
+ }
+
+ return false;
+}
+
+void gen11_gt_irq_reset(struct intel_gt *gt)
+{
+ struct intel_uncore *uncore = gt->uncore;
+
+ /* Disable RCS, BCS, VCS and VECS class engines. */
+ intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0);
+ intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0);
+
+ /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
+ intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~0);
+ intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~0);
+ intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~0);
+ intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~0);
+ intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~0);
+
+ intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
+ intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
+ intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
+ intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0);
+}
+
+void gen11_gt_irq_postinstall(struct intel_gt *gt)
+{
+ const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT;
+ struct intel_uncore *uncore = gt->uncore;
+ const u32 dmask = irqs << 16 | irqs;
+ const u32 smask = irqs << 16;
+
+ BUILD_BUG_ON(irqs & 0xffff0000);
+
+ /* Enable RCS, BCS, VCS and VECS class interrupts. */
+ intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask);
+ intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask);
+
+ /* Unmask irqs on RCS, BCS, VCS and VECS engines. */
+ intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~smask);
+ intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~smask);
+ intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~dmask);
+ intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~dmask);
+ intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~dmask);
+
+ /*
+ * RPS interrupts will get enabled/disabled on demand when RPS itself
+ * is enabled/disabled.
+ */
+ gt->pm_ier = 0x0;
+ gt->pm_imr = ~gt->pm_ier;
+ intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
+ intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
+
+ /* Same thing for GuC interrupts */
+ intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
+ intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0);
+}
+
+void gen5_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
+{
+ if (gt_iir & GT_RENDER_USER_INTERRUPT)
+ intel_engine_breadcrumbs_irq(gt->engine_class[RENDER_CLASS][0]);
+ if (gt_iir & ILK_BSD_USER_INTERRUPT)
+ intel_engine_breadcrumbs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0]);
+}
+
+static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir)
+{
+ if (!HAS_L3_DPF(gt->i915))
+ return;
+
+ spin_lock(&gt->irq_lock);
+ gen5_gt_disable_irq(gt, GT_PARITY_ERROR(gt->i915));
+ spin_unlock(&gt->irq_lock);
+
+ if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
+ gt->i915->l3_parity.which_slice |= 1 << 1;
+
+ if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
+ gt->i915->l3_parity.which_slice |= 1 << 0;
+
+ schedule_work(&gt->i915->l3_parity.error_work);
+}
+
+void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
+{
+ if (gt_iir & GT_RENDER_USER_INTERRUPT)
+ intel_engine_breadcrumbs_irq(gt->engine_class[RENDER_CLASS][0]);
+ if (gt_iir & GT_BSD_USER_INTERRUPT)
+ intel_engine_breadcrumbs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0]);
+ if (gt_iir & GT_BLT_USER_INTERRUPT)
+ intel_engine_breadcrumbs_irq(gt->engine_class[COPY_ENGINE_CLASS][0]);
+
+ if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
+ GT_BSD_CS_ERROR_INTERRUPT |
+ GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
+ DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
+
+ if (gt_iir & GT_PARITY_ERROR(gt->i915))
+ gen7_parity_error_irq_handler(gt, gt_iir);
+}
+
+void gen8_gt_irq_ack(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4])
+{
+ void __iomem * const regs = gt->uncore->regs;
+
+ if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
+ gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0));
+ if (likely(gt_iir[0]))
+ raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]);
+ }
+
+ if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
+ gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1));
+ if (likely(gt_iir[1]))
+ raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]);
+ }
+
+ if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
+ gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2));
+ if (likely(gt_iir[2]))
+ raw_reg_write(regs, GEN8_GT_IIR(2), gt_iir[2]);
+ }
+
+ if (master_ctl & GEN8_GT_VECS_IRQ) {
+ gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3));
+ if (likely(gt_iir[3]))
+ raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]);
+ }
+}
+
+void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4])
+{
+ if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
+ cs_irq_handler(gt->engine_class[RENDER_CLASS][0],
+ gt_iir[0] >> GEN8_RCS_IRQ_SHIFT);
+ cs_irq_handler(gt->engine_class[COPY_ENGINE_CLASS][0],
+ gt_iir[0] >> GEN8_BCS_IRQ_SHIFT);
+ }
+
+ if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
+ cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][0],
+ gt_iir[1] >> GEN8_VCS0_IRQ_SHIFT);
+ cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][1],
+ gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT);
+ }
+
+ if (master_ctl & GEN8_GT_VECS_IRQ) {
+ cs_irq_handler(gt->engine_class[VIDEO_ENHANCEMENT_CLASS][0],
+ gt_iir[3] >> GEN8_VECS_IRQ_SHIFT);
+ }
+
+ if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
+ gen6_rps_irq_handler(gt->i915, gt_iir[2]);
+ guc_irq_handler(&gt->uc.guc, gt_iir[2] >> 16);
+ }
+}
+
+void gen8_gt_irq_reset(struct intel_gt *gt)
+{
+ struct intel_uncore *uncore = gt->uncore;
+
+ GEN8_IRQ_RESET_NDX(uncore, GT, 0);
+ GEN8_IRQ_RESET_NDX(uncore, GT, 1);
+ GEN8_IRQ_RESET_NDX(uncore, GT, 2);
+ GEN8_IRQ_RESET_NDX(uncore, GT, 3);
+}
+
+void gen8_gt_irq_postinstall(struct intel_gt *gt)
+{
+ struct intel_uncore *uncore = gt->uncore;
+
+ /* These are interrupts we'll toggle with the ring mask register */
+ u32 gt_interrupts[] = {
+ (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
+ GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT),
+
+ (GT_RENDER_USER_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
+ GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT),
+
+ 0,
+
+ (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)
+ };
+
+ gt->pm_ier = 0x0;
+ gt->pm_imr = ~gt->pm_ier;
+ GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
+ GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
+ /*
+ * RPS interrupts will get enabled/disabled on demand when RPS itself
+ * is enabled/disabled. Same wil be the case for GuC interrupts.
+ */
+ GEN8_IRQ_INIT_NDX(uncore, GT, 2, gt->pm_imr, gt->pm_ier);
+ GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
+}
+
+static void gen5_gt_update_irq(struct intel_gt *gt,
+ u32 interrupt_mask,
+ u32 enabled_irq_mask)
+{
+ lockdep_assert_held(&gt->irq_lock);
+
+ GEM_BUG_ON(enabled_irq_mask & ~interrupt_mask);
+
+ gt->gt_imr &= ~interrupt_mask;
+ gt->gt_imr |= (~enabled_irq_mask & interrupt_mask);
+ intel_uncore_write(gt->uncore, GTIMR, gt->gt_imr);
+}
+
+void gen5_gt_enable_irq(struct intel_gt *gt, u32 mask)
+{
+ gen5_gt_update_irq(gt, mask, mask);
+ intel_uncore_posting_read_fw(gt->uncore, GTIMR);
+}
+
+void gen5_gt_disable_irq(struct intel_gt *gt, u32 mask)
+{
+ gen5_gt_update_irq(gt, mask, 0);
+}
+
+void gen5_gt_irq_reset(struct intel_gt *gt)
+{
+ struct intel_uncore *uncore = gt->uncore;
+
+ GEN3_IRQ_RESET(uncore, GT);
+ if (INTEL_GEN(gt->i915) >= 6)
+ GEN3_IRQ_RESET(uncore, GEN6_PM);
+}
+
+void gen5_gt_irq_postinstall(struct intel_gt *gt)
+{
+ struct intel_uncore *uncore = gt->uncore;
+ u32 pm_irqs = 0;
+ u32 gt_irqs = 0;
+
+ gt->gt_imr = ~0;
+ if (HAS_L3_DPF(gt->i915)) {
+ /* L3 parity interrupt is always unmasked. */
+ gt->gt_imr = ~GT_PARITY_ERROR(gt->i915);
+ gt_irqs |= GT_PARITY_ERROR(gt->i915);
+ }
+
+ gt_irqs |= GT_RENDER_USER_INTERRUPT;
+ if (IS_GEN(gt->i915, 5))
+ gt_irqs |= ILK_BSD_USER_INTERRUPT;
+ else
+ gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
+
+ GEN3_IRQ_INIT(uncore, GT, gt->gt_imr, gt_irqs);
+
+ if (INTEL_GEN(gt->i915) >= 6) {
+ /*
+ * RPS interrupts will get enabled/disabled on demand when RPS
+ * itself is enabled/disabled.
+ */
+ if (HAS_ENGINE(gt->i915, VECS0)) {
+ pm_irqs |= PM_VEBOX_USER_INTERRUPT;
+ gt->pm_ier |= PM_VEBOX_USER_INTERRUPT;
+ }
+
+ gt->pm_imr = 0xffffffff;
+ GEN3_IRQ_INIT(uncore, GEN6_PM, gt->pm_imr, pm_irqs);
+ }
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.h b/drivers/gpu/drm/i915/gt/intel_gt_irq.h
new file mode 100644
index 000000000000..8f37593712c9
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.h
@@ -0,0 +1,44 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_GT_IRQ_H
+#define INTEL_GT_IRQ_H
+
+#include <linux/types.h>
+
+struct intel_gt;
+
+#define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \
+ GEN8_GT_BCS_IRQ | \
+ GEN8_GT_VCS0_IRQ | \
+ GEN8_GT_VCS1_IRQ | \
+ GEN8_GT_VECS_IRQ | \
+ GEN8_GT_PM_IRQ | \
+ GEN8_GT_GUC_IRQ)
+
+void gen11_gt_irq_reset(struct intel_gt *gt);
+void gen11_gt_irq_postinstall(struct intel_gt *gt);
+void gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl);
+
+bool gen11_gt_reset_one_iir(struct intel_gt *gt,
+ const unsigned int bank,
+ const unsigned int bit);
+
+void gen5_gt_irq_handler(struct intel_gt *gt, u32 gt_iir);
+
+void gen5_gt_irq_postinstall(struct intel_gt *gt);
+void gen5_gt_irq_reset(struct intel_gt *gt);
+void gen5_gt_disable_irq(struct intel_gt *gt, u32 mask);
+void gen5_gt_enable_irq(struct intel_gt *gt, u32 mask);
+
+void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir);
+
+void gen8_gt_irq_ack(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4]);
+void gen8_gt_irq_reset(struct intel_gt *gt);
+void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4]);
+void gen8_gt_irq_postinstall(struct intel_gt *gt);
+
+#endif /* INTEL_GT_IRQ_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 65c0d0c9d543..1363e069ec83 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -17,7 +17,7 @@ static void pm_notify(struct drm_i915_private *i915, int state)
blocking_notifier_call_chain(&i915->gt.pm_notifications, state, i915);
}
-static int intel_gt_unpark(struct intel_wakeref *wf)
+static int __gt_unpark(struct intel_wakeref *wf)
{
struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
struct drm_i915_private *i915 = gt->i915;
@@ -53,14 +53,7 @@ static int intel_gt_unpark(struct intel_wakeref *wf)
return 0;
}
-void intel_gt_pm_get(struct intel_gt *gt)
-{
- struct intel_runtime_pm *rpm = &gt->i915->runtime_pm;
-
- intel_wakeref_get(rpm, &gt->wakeref, intel_gt_unpark);
-}
-
-static int intel_gt_park(struct intel_wakeref *wf)
+static int __gt_park(struct intel_wakeref *wf)
{
struct drm_i915_private *i915 =
container_of(wf, typeof(*i915), gt.wakeref);
@@ -74,22 +67,25 @@ static int intel_gt_park(struct intel_wakeref *wf)
if (INTEL_GEN(i915) >= 6)
gen6_rps_idle(i915);
+ /* Everything switched off, flush any residual interrupt just in case */
+ intel_synchronize_irq(i915);
+
GEM_BUG_ON(!wakeref);
intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);
return 0;
}
-void intel_gt_pm_put(struct intel_gt *gt)
-{
- struct intel_runtime_pm *rpm = &gt->i915->runtime_pm;
-
- intel_wakeref_put(rpm, &gt->wakeref, intel_gt_park);
-}
+static const struct intel_wakeref_ops wf_ops = {
+ .get = __gt_unpark,
+ .put = __gt_park,
+ .flags = INTEL_WAKEREF_PUT_ASYNC,
+};
void intel_gt_pm_init_early(struct intel_gt *gt)
{
- intel_wakeref_init(&gt->wakeref);
+ intel_wakeref_init(&gt->wakeref, &gt->i915->runtime_pm, &wf_ops);
+
BLOCKING_INIT_NOTIFIER_HEAD(&gt->pm_notifications);
}
@@ -164,3 +160,15 @@ int intel_gt_resume(struct intel_gt *gt)
return err;
}
+
+void intel_gt_runtime_suspend(struct intel_gt *gt)
+{
+ intel_uc_runtime_suspend(&gt->uc);
+}
+
+int intel_gt_runtime_resume(struct intel_gt *gt)
+{
+ intel_gt_init_swizzling(gt);
+
+ return intel_uc_runtime_resume(&gt->uc);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
index ba960e1fc209..fb39d99cd6ee 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -9,19 +9,44 @@
#include <linux/types.h>
-struct intel_gt;
+#include "intel_gt_types.h"
+#include "intel_wakeref.h"
enum {
INTEL_GT_UNPARK,
INTEL_GT_PARK,
};
-void intel_gt_pm_get(struct intel_gt *gt);
-void intel_gt_pm_put(struct intel_gt *gt);
+static inline bool intel_gt_pm_is_awake(const struct intel_gt *gt)
+{
+ return intel_wakeref_is_active(&gt->wakeref);
+}
+
+static inline void intel_gt_pm_get(struct intel_gt *gt)
+{
+ intel_wakeref_get(&gt->wakeref);
+}
+
+static inline bool intel_gt_pm_get_if_awake(struct intel_gt *gt)
+{
+ return intel_wakeref_get_if_active(&gt->wakeref);
+}
+
+static inline void intel_gt_pm_put(struct intel_gt *gt)
+{
+ intel_wakeref_put(&gt->wakeref);
+}
+
+static inline int intel_gt_pm_wait_for_idle(struct intel_gt *gt)
+{
+ return intel_wakeref_wait_for_idle(&gt->wakeref);
+}
void intel_gt_pm_init_early(struct intel_gt *gt);
void intel_gt_sanitize(struct intel_gt *gt, bool force);
int intel_gt_resume(struct intel_gt *gt);
+void intel_gt_runtime_suspend(struct intel_gt *gt);
+int intel_gt_runtime_resume(struct intel_gt *gt);
#endif /* INTEL_GT_PM_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c
new file mode 100644
index 000000000000..babe866126d7
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c
@@ -0,0 +1,109 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_gt.h"
+#include "intel_gt_irq.h"
+#include "intel_gt_pm_irq.h"
+
+static void write_pm_imr(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+ u32 mask = gt->pm_imr;
+ i915_reg_t reg;
+
+ if (INTEL_GEN(i915) >= 11) {
+ reg = GEN11_GPM_WGBOXPERF_INTR_MASK;
+ mask <<= 16; /* pm is in upper half */
+ } else if (INTEL_GEN(i915) >= 8) {
+ reg = GEN8_GT_IMR(2);
+ } else {
+ reg = GEN6_PMIMR;
+ }
+
+ intel_uncore_write(uncore, reg, mask);
+}
+
+static void gen6_gt_pm_update_irq(struct intel_gt *gt,
+ u32 interrupt_mask,
+ u32 enabled_irq_mask)
+{
+ u32 new_val;
+
+ WARN_ON(enabled_irq_mask & ~interrupt_mask);
+
+ lockdep_assert_held(&gt->irq_lock);
+
+ new_val = gt->pm_imr;
+ new_val &= ~interrupt_mask;
+ new_val |= ~enabled_irq_mask & interrupt_mask;
+
+ if (new_val != gt->pm_imr) {
+ gt->pm_imr = new_val;
+ write_pm_imr(gt);
+ }
+}
+
+void gen6_gt_pm_unmask_irq(struct intel_gt *gt, u32 mask)
+{
+ gen6_gt_pm_update_irq(gt, mask, mask);
+}
+
+void gen6_gt_pm_mask_irq(struct intel_gt *gt, u32 mask)
+{
+ gen6_gt_pm_update_irq(gt, mask, 0);
+}
+
+void gen6_gt_pm_reset_iir(struct intel_gt *gt, u32 reset_mask)
+{
+ struct intel_uncore *uncore = gt->uncore;
+ i915_reg_t reg = INTEL_GEN(gt->i915) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
+
+ lockdep_assert_held(&gt->irq_lock);
+
+ intel_uncore_write(uncore, reg, reset_mask);
+ intel_uncore_write(uncore, reg, reset_mask);
+ intel_uncore_posting_read(uncore, reg);
+}
+
+static void write_pm_ier(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+ u32 mask = gt->pm_ier;
+ i915_reg_t reg;
+
+ if (INTEL_GEN(i915) >= 11) {
+ reg = GEN11_GPM_WGBOXPERF_INTR_ENABLE;
+ mask <<= 16; /* pm is in upper half */
+ } else if (INTEL_GEN(i915) >= 8) {
+ reg = GEN8_GT_IER(2);
+ } else {
+ reg = GEN6_PMIER;
+ }
+
+ intel_uncore_write(uncore, reg, mask);
+}
+
+void gen6_gt_pm_enable_irq(struct intel_gt *gt, u32 enable_mask)
+{
+ lockdep_assert_held(&gt->irq_lock);
+
+ gt->pm_ier |= enable_mask;
+ write_pm_ier(gt);
+ gen6_gt_pm_unmask_irq(gt, enable_mask);
+}
+
+void gen6_gt_pm_disable_irq(struct intel_gt *gt, u32 disable_mask)
+{
+ lockdep_assert_held(&gt->irq_lock);
+
+ gt->pm_ier &= ~disable_mask;
+ gen6_gt_pm_mask_irq(gt, disable_mask);
+ write_pm_ier(gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.h b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.h
new file mode 100644
index 000000000000..b29816a04809
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.h
@@ -0,0 +1,22 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_GT_PM_IRQ_H
+#define INTEL_GT_PM_IRQ_H
+
+#include <linux/types.h>
+
+struct intel_gt;
+
+void gen6_gt_pm_unmask_irq(struct intel_gt *gt, u32 mask);
+void gen6_gt_pm_mask_irq(struct intel_gt *gt, u32 mask);
+
+void gen6_gt_pm_enable_irq(struct intel_gt *gt, u32 enable_mask);
+void gen6_gt_pm_disable_irq(struct intel_gt *gt, u32 disable_mask);
+
+void gen6_gt_pm_reset_iir(struct intel_gt *gt, u32 reset_mask);
+
+#endif /* INTEL_GT_PM_IRQ_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index 34d4a868e4f1..dc295c196d11 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -16,11 +16,13 @@
#include "uc/intel_uc.h"
#include "i915_vma.h"
+#include "intel_engine_types.h"
#include "intel_reset_types.h"
#include "intel_wakeref.h"
struct drm_i915_private;
struct i915_ggtt;
+struct intel_engine_cs;
struct intel_uncore;
struct intel_hangcheck {
@@ -39,7 +41,7 @@ struct intel_gt {
struct intel_uc uc;
struct intel_gt_timelines {
- struct mutex mutex; /* protects list */
+ spinlock_t lock; /* protects active_list */
struct list_head active_list;
/* Pack multiple timelines' seqnos into the same page */
@@ -47,8 +49,6 @@ struct intel_gt {
struct list_head hwsp_free_list;
} timelines;
- struct list_head active_rings;
-
struct intel_wakeref wakeref;
struct list_head closed_vma;
@@ -72,10 +72,16 @@ struct intel_gt {
struct i915_vma *scratch;
- u32 pm_imr;
+ spinlock_t irq_lock;
+ u32 gt_imr;
u32 pm_ier;
+ u32 pm_imr;
u32 pm_guc_events;
+
+ struct intel_engine_cs *engine[I915_NUM_ENGINES];
+ struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
+ [MAX_ENGINE_INSTANCE + 1];
};
enum intel_gt_scratch_field {
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index d9061d9348cb..d42584439f51 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -136,9 +136,12 @@
#include "gem/i915_gem_context.h"
#include "i915_drv.h"
+#include "i915_perf.h"
+#include "i915_trace.h"
#include "i915_vgpu.h"
#include "intel_engine_pm.h"
#include "intel_gt.h"
+#include "intel_gt_pm.h"
#include "intel_lrc_reg.h"
#include "intel_mocs.h"
#include "intel_reset.h"
@@ -163,6 +166,13 @@
#define CTX_DESC_FORCE_RESTORE BIT_ULL(2)
+#define GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE (0x1) /* lower csb dword */
+#define GEN12_CTX_SWITCH_DETAIL(csb_dw) ((csb_dw) & 0xF) /* upper csb dword */
+#define GEN12_CSB_SW_CTX_ID_MASK GENMASK(25, 15)
+#define GEN12_IDLE_CTX_ID 0x7FF
+#define GEN12_CSB_CTX_VALID(csb_dw) \
+ (FIELD_GET(GEN12_CSB_SW_CTX_ID_MASK, csb_dw) != GEN12_IDLE_CTX_ID)
+
/* Typical size of the average request (2 pipecontrols and a MI_BB) */
#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
#define WA_TAIL_DWORDS 2
@@ -216,8 +226,9 @@ static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine)
return container_of(engine, struct virtual_engine, base);
}
-static int execlists_context_deferred_alloc(struct intel_context *ce,
- struct intel_engine_cs *engine);
+static int __execlists_context_alloc(struct intel_context *ce,
+ struct intel_engine_cs *engine);
+
static void execlists_init_reg_state(u32 *reg_state,
struct intel_context *ce,
struct intel_engine_cs *engine,
@@ -417,13 +428,17 @@ lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine)
BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH)));
BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > (BIT(GEN11_SW_CTX_ID_WIDTH)));
- desc = ctx->desc_template; /* bits 0-11 */
- GEM_BUG_ON(desc & GENMASK_ULL(63, 12));
+ desc = INTEL_LEGACY_32B_CONTEXT;
+ if (i915_vm_is_4lvl(ce->vm))
+ desc = INTEL_LEGACY_64B_CONTEXT;
+ desc <<= GEN8_CTX_ADDRESSING_MODE_SHIFT;
+
+ desc |= GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
+ if (IS_GEN(engine->i915, 8))
+ desc |= GEN8_CTX_L3LLC_COHERENT;
desc |= i915_ggtt_offset(ce->state) + LRC_HEADER_PAGES * PAGE_SIZE;
/* bits 12-31 */
- GEM_BUG_ON(desc & GENMASK_ULL(63, 32));
-
/*
* The following 32bits are copied into the OA reports (dword 2).
* Consider updating oa_get_render_ctx_id in i915_perf.c when changing
@@ -539,26 +554,39 @@ execlists_context_status_change(struct i915_request *rq, unsigned long status)
status, rq);
}
+static inline struct intel_engine_cs *
+__execlists_schedule_in(struct i915_request *rq)
+{
+ struct intel_engine_cs * const engine = rq->engine;
+ struct intel_context * const ce = rq->hw_context;
+
+ intel_context_get(ce);
+
+ intel_gt_pm_get(engine->gt);
+ execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
+ intel_engine_context_in(engine);
+
+ return engine;
+}
+
static inline struct i915_request *
execlists_schedule_in(struct i915_request *rq, int idx)
{
- struct intel_context *ce = rq->hw_context;
- int count;
+ struct intel_context * const ce = rq->hw_context;
+ struct intel_engine_cs *old;
+ GEM_BUG_ON(!intel_engine_pm_is_awake(rq->engine));
trace_i915_request_in(rq, idx);
- count = intel_context_inflight_count(ce);
- if (!count) {
- intel_context_get(ce);
- ce->inflight = rq->engine;
-
- execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
- intel_engine_context_in(ce->inflight);
- }
+ old = READ_ONCE(ce->inflight);
+ do {
+ if (!old) {
+ WRITE_ONCE(ce->inflight, __execlists_schedule_in(rq));
+ break;
+ }
+ } while (!try_cmpxchg(&ce->inflight, &old, ptr_inc(old)));
- intel_context_inflight_inc(ce);
GEM_BUG_ON(intel_context_inflight(ce) != rq->engine);
-
return i915_request_get(rq);
}
@@ -572,34 +600,45 @@ static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
}
static inline void
-execlists_schedule_out(struct i915_request *rq)
+__execlists_schedule_out(struct i915_request *rq,
+ struct intel_engine_cs * const engine)
{
- struct intel_context *ce = rq->hw_context;
+ struct intel_context * const ce = rq->hw_context;
- GEM_BUG_ON(!intel_context_inflight_count(ce));
+ intel_engine_context_out(engine);
+ execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
+ intel_gt_pm_put(engine->gt);
- trace_i915_request_out(rq);
+ /*
+ * If this is part of a virtual engine, its next request may
+ * have been blocked waiting for access to the active context.
+ * We have to kick all the siblings again in case we need to
+ * switch (e.g. the next request is not runnable on this
+ * engine). Hopefully, we will already have submitted the next
+ * request before the tasklet runs and do not need to rebuild
+ * each virtual tree and kick everyone again.
+ */
+ if (ce->engine != engine)
+ kick_siblings(rq, ce);
- intel_context_inflight_dec(ce);
- if (!intel_context_inflight_count(ce)) {
- intel_engine_context_out(ce->inflight);
- execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
+ intel_context_put(ce);
+}
- /*
- * If this is part of a virtual engine, its next request may
- * have been blocked waiting for access to the active context.
- * We have to kick all the siblings again in case we need to
- * switch (e.g. the next request is not runnable on this
- * engine). Hopefully, we will already have submitted the next
- * request before the tasklet runs and do not need to rebuild
- * each virtual tree and kick everyone again.
- */
- ce->inflight = NULL;
- if (rq->engine != ce->engine)
- kick_siblings(rq, ce);
+static inline void
+execlists_schedule_out(struct i915_request *rq)
+{
+ struct intel_context * const ce = rq->hw_context;
+ struct intel_engine_cs *cur, *old;
- intel_context_put(ce);
- }
+ trace_i915_request_out(rq);
+ GEM_BUG_ON(intel_context_inflight(ce) != rq->engine);
+
+ old = READ_ONCE(ce->inflight);
+ do
+ cur = ptr_unmask_bits(old, 2) ? ptr_dec(old) : NULL;
+ while (!try_cmpxchg(&ce->inflight, &old, cur));
+ if (!cur)
+ __execlists_schedule_out(rq, old);
i915_request_put(rq);
}
@@ -674,6 +713,9 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
trace_ports(execlists, msg, execlists->pending);
+ if (!execlists->pending[0])
+ return false;
+
if (execlists->pending[execlists_num_ports(execlists)])
return false;
@@ -931,12 +973,24 @@ need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq)
return hint >= effective_prio(rq);
}
+static int
+switch_prio(struct intel_engine_cs *engine, const struct i915_request *rq)
+{
+ if (list_is_last(&rq->sched.link, &engine->active.requests))
+ return INT_MIN;
+
+ return rq_prio(list_next_entry(rq, sched.link));
+}
+
static bool
-enable_timeslice(struct intel_engine_cs *engine)
+enable_timeslice(const struct intel_engine_execlists *execlists)
{
- struct i915_request *last = last_active(&engine->execlists);
+ const struct i915_request *rq = *execlists->active;
+
+ if (i915_request_completed(rq))
+ return false;
- return last && need_timeslice(engine, last);
+ return execlists->switch_priority_hint >= effective_prio(rq);
}
static void record_preemption(struct intel_engine_execlists *execlists)
@@ -1281,14 +1335,16 @@ done:
if (submit) {
*port = execlists_schedule_in(last, port - execlists->pending);
memset(port + 1, 0, (last_port - port) * sizeof(*port));
+ execlists->switch_priority_hint =
+ switch_prio(engine, *execlists->pending);
execlists_submit_ports(engine);
} else {
ring_set_paused(engine, 0);
}
}
-void
-execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
+static void
+cancel_port_requests(struct intel_engine_execlists * const execlists)
{
struct i915_request * const *port, *rq;
@@ -1322,8 +1378,71 @@ enum csb_step {
CSB_COMPLETE,
};
+/*
+ * Starting with Gen12, the status has a new format:
+ *
+ * bit 0: switched to new queue
+ * bit 1: reserved
+ * bit 2: semaphore wait mode (poll or signal), only valid when
+ * switch detail is set to "wait on semaphore"
+ * bits 3-5: engine class
+ * bits 6-11: engine instance
+ * bits 12-14: reserved
+ * bits 15-25: sw context id of the lrc the GT switched to
+ * bits 26-31: sw counter of the lrc the GT switched to
+ * bits 32-35: context switch detail
+ * - 0: ctx complete
+ * - 1: wait on sync flip
+ * - 2: wait on vblank
+ * - 3: wait on scanline
+ * - 4: wait on semaphore
+ * - 5: context preempted (not on SEMAPHORE_WAIT or
+ * WAIT_FOR_EVENT)
+ * bit 36: reserved
+ * bits 37-43: wait detail (for switch detail 1 to 4)
+ * bits 44-46: reserved
+ * bits 47-57: sw context id of the lrc the GT switched away from
+ * bits 58-63: sw counter of the lrc the GT switched away from
+ */
+static inline enum csb_step
+gen12_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb)
+{
+ u32 lower_dw = csb[0];
+ u32 upper_dw = csb[1];
+ bool ctx_to_valid = GEN12_CSB_CTX_VALID(lower_dw);
+ bool ctx_away_valid = GEN12_CSB_CTX_VALID(upper_dw);
+ bool new_queue = lower_dw & GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE;
+
+ if (!ctx_away_valid && ctx_to_valid)
+ return CSB_PROMOTE;
+
+ /*
+ * The context switch detail is not guaranteed to be 5 when a preemption
+ * occurs, so we can't just check for that. The check below works for
+ * all the cases we care about, including preemptions of WAIT
+ * instructions and lite-restore. Preempt-to-idle via the CTRL register
+ * would require some extra handling, but we don't support that.
+ */
+ if (new_queue && ctx_away_valid)
+ return CSB_PREEMPT;
+
+ /*
+ * switch detail = 5 is covered by the case above and we do not expect a
+ * context switch on an unsuccessful wait instruction since we always
+ * use polling mode.
+ */
+ GEM_BUG_ON(GEN12_CTX_SWITCH_DETAIL(upper_dw));
+
+ if (*execlists->active) {
+ GEM_BUG_ON(!ctx_away_valid);
+ return CSB_COMPLETE;
+ }
+
+ return CSB_NOP;
+}
+
static inline enum csb_step
-csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb)
+gen8_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb)
{
unsigned int status = *csb;
@@ -1346,7 +1465,6 @@ static void process_csb(struct intel_engine_cs *engine)
const u8 num_entries = execlists->csb_size;
u8 head, tail;
- lockdep_assert_held(&engine->active.lock);
GEM_BUG_ON(USES_GUC_SUBMISSION(engine->i915));
/*
@@ -1376,6 +1494,8 @@ static void process_csb(struct intel_engine_cs *engine)
rmb();
do {
+ enum csb_step csb_step;
+
if (++head == num_entries)
head = 0;
@@ -1401,7 +1521,12 @@ static void process_csb(struct intel_engine_cs *engine)
engine->name, head,
buf[2 * head + 0], buf[2 * head + 1]);
- switch (csb_parse(execlists, buf + 2 * head)) {
+ if (INTEL_GEN(engine->i915) >= 12)
+ csb_step = gen12_csb_parse(execlists, buf + 2 * head);
+ else
+ csb_step = gen8_csb_parse(execlists, buf + 2 * head);
+
+ switch (csb_step) {
case CSB_PREEMPT: /* cancel old inflight, prepare for switch */
trace_ports(execlists, "preempted", execlists->active);
@@ -1417,15 +1542,14 @@ static void process_csb(struct intel_engine_cs *engine)
execlists->pending,
execlists_num_ports(execlists) *
sizeof(*execlists->pending));
- execlists->pending[0] = NULL;
-
- trace_ports(execlists, "promoted", execlists->active);
- if (enable_timeslice(engine))
+ if (enable_timeslice(execlists))
mod_timer(&execlists->timer, jiffies + 1);
if (!inject_preempt_hang(execlists))
ring_set_paused(engine, 0);
+
+ WRITE_ONCE(execlists->pending[0], NULL);
break;
case CSB_COMPLETE: /* port0 completed, advanced to port1 */
@@ -1469,8 +1593,6 @@ static void process_csb(struct intel_engine_cs *engine)
static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
{
lockdep_assert_held(&engine->active.lock);
-
- process_csb(engine);
if (!engine->execlists.pending[0])
execlists_dequeue(engine);
}
@@ -1484,9 +1606,12 @@ static void execlists_submission_tasklet(unsigned long data)
struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
unsigned long flags;
- spin_lock_irqsave(&engine->active.lock, flags);
- __execlists_submission_tasklet(engine);
- spin_unlock_irqrestore(&engine->active.lock, flags);
+ process_csb(engine);
+ if (!READ_ONCE(engine->execlists.pending[0])) {
+ spin_lock_irqsave(&engine->active.lock, flags);
+ __execlists_submission_tasklet(engine);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
+ }
}
static void execlists_submission_timer(struct timer_list *timer)
@@ -1569,10 +1694,41 @@ static void execlists_context_destroy(struct kref *kref)
intel_context_free(ce);
}
+static void
+set_redzone(void *vaddr, const struct intel_engine_cs *engine)
+{
+ if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ return;
+
+ vaddr += LRC_HEADER_PAGES * PAGE_SIZE;
+ vaddr += engine->context_size;
+
+ memset(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE);
+}
+
+static void
+check_redzone(const void *vaddr, const struct intel_engine_cs *engine)
+{
+ if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ return;
+
+ vaddr += LRC_HEADER_PAGES * PAGE_SIZE;
+ vaddr += engine->context_size;
+
+ if (memchr_inv(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE))
+ dev_err_once(engine->i915->drm.dev,
+ "%s context redzone overwritten!\n",
+ engine->name);
+}
+
static void execlists_context_unpin(struct intel_context *ce)
{
+ check_redzone((void *)ce->lrc_reg_state - LRC_STATE_PN * PAGE_SIZE,
+ ce->engine);
+
i915_gem_context_unpin_hw_id(ce->gem_context);
i915_gem_object_unpin_map(ce->state->obj);
+ intel_ring_reset(ce->ring, ce->ring->tail);
}
static void
@@ -1605,9 +1761,6 @@ __execlists_context_pin(struct intel_context *ce,
void *vaddr;
int ret;
- ret = execlists_context_deferred_alloc(ce, engine);
- if (ret)
- goto err;
GEM_BUG_ON(!ce->state);
ret = intel_context_active_acquire(ce);
@@ -1646,6 +1799,11 @@ static int execlists_context_pin(struct intel_context *ce)
return __execlists_context_pin(ce, ce->engine);
}
+static int execlists_context_alloc(struct intel_context *ce)
+{
+ return __execlists_context_alloc(ce, ce->engine);
+}
+
static void execlists_context_reset(struct intel_context *ce)
{
/*
@@ -1669,6 +1827,8 @@ static void execlists_context_reset(struct intel_context *ce)
}
static const struct intel_context_ops execlists_context_ops = {
+ .alloc = execlists_context_alloc,
+
.pin = execlists_context_pin,
.unpin = execlists_context_unpin,
@@ -2065,6 +2225,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
return 0;
switch (INTEL_GEN(engine->i915)) {
+ case 12:
case 11:
return 0;
case 10:
@@ -2238,15 +2399,15 @@ static void reset_csb_pointers(struct intel_engine_cs *engine)
static struct i915_request *active_request(struct i915_request *rq)
{
- const struct list_head * const list = &rq->engine->active.requests;
- const struct intel_context * const context = rq->hw_context;
+ const struct list_head * const list = &rq->timeline->requests;
+ const struct intel_context * const ce = rq->hw_context;
struct i915_request *active = NULL;
- list_for_each_entry_from_reverse(rq, list, sched.link) {
+ list_for_each_entry_from_reverse(rq, list, link) {
if (i915_request_completed(rq))
break;
- if (rq->hw_context != context)
+ if (rq->hw_context != ce)
break;
active = rq;
@@ -2280,18 +2441,6 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
GEM_BUG_ON(i915_active_is_idle(&ce->active));
GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
rq = active_request(rq);
-
- /*
- * Catch up with any missed context-switch interrupts.
- *
- * Ideally we would just read the remaining CSB entries now that we
- * know the gpu is idle. However, the CSB registers are sometimes^W
- * often trashed across a GPU reset! Instead we have to rely on
- * guessing the missed context-switch events by looking at what
- * requests were completed.
- */
- execlists_cancel_port_requests(execlists);
-
if (!rq) {
ce->ring->head = ce->ring->tail;
goto out_replay;
@@ -2353,6 +2502,7 @@ out_replay:
unwind:
/* Push back any incomplete requests for replay after the reset. */
+ cancel_port_requests(execlists);
__unwind_incomplete_requests(engine);
}
@@ -2652,6 +2802,63 @@ static int gen8_emit_flush_render(struct i915_request *request,
return 0;
}
+static int gen11_emit_flush_render(struct i915_request *request,
+ u32 mode)
+{
+ struct intel_engine_cs *engine = request->engine;
+ const u32 scratch_addr =
+ intel_gt_scratch_offset(engine->gt,
+ INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
+
+ if (mode & EMIT_FLUSH) {
+ u32 *cs;
+ u32 flags = 0;
+
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
+ flags |= PIPE_CONTROL_FLUSH_ENABLE;
+ flags |= PIPE_CONTROL_QW_WRITE;
+ flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
+
+ cs = intel_ring_begin(request, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cs = gen8_emit_pipe_control(cs, flags, scratch_addr);
+ intel_ring_advance(request, cs);
+ }
+
+ if (mode & EMIT_INVALIDATE) {
+ u32 *cs;
+ u32 flags = 0;
+
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_QW_WRITE;
+ flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
+
+ cs = intel_ring_begin(request, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cs = gen8_emit_pipe_control(cs, flags, scratch_addr);
+ intel_ring_advance(request, cs);
+ }
+
+ return 0;
+}
+
/*
* Reserve space for 2 NOOPs at the end of each request to be
* used as a workaround for not being allowed to do lite
@@ -2680,12 +2887,10 @@ static u32 *emit_preempt_busywait(struct i915_request *request, u32 *cs)
return cs;
}
-static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs)
+static __always_inline u32*
+gen8_emit_fini_breadcrumb_footer(struct i915_request *request,
+ u32 *cs)
{
- cs = gen8_emit_ggtt_write(cs,
- request->fence.seqno,
- request->timeline->hwsp_offset,
- 0);
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
@@ -2698,35 +2903,53 @@ static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs)
return gen8_emit_wa_tail(request, cs);
}
+static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs)
+{
+ cs = gen8_emit_ggtt_write(cs,
+ request->fence.seqno,
+ request->timeline->hwsp_offset,
+ 0);
+
+ return gen8_emit_fini_breadcrumb_footer(request, cs);
+}
+
static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
{
- /* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */
cs = gen8_emit_ggtt_write_rcs(cs,
request->fence.seqno,
request->timeline->hwsp_offset,
PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
PIPE_CONTROL_DC_FLUSH_ENABLE);
+
+ /* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */
cs = gen8_emit_pipe_control(cs,
PIPE_CONTROL_FLUSH_ENABLE |
PIPE_CONTROL_CS_STALL,
0);
- *cs++ = MI_USER_INTERRUPT;
- *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
- if (intel_engine_has_semaphores(request->engine))
- cs = emit_preempt_busywait(request, cs);
+ return gen8_emit_fini_breadcrumb_footer(request, cs);
+}
- request->tail = intel_ring_offset(request, cs);
- assert_ring_tail_valid(request->ring, request->tail);
+static u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *request,
+ u32 *cs)
+{
+ cs = gen8_emit_ggtt_write_rcs(cs,
+ request->fence.seqno,
+ request->timeline->hwsp_offset,
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_TILE_CACHE_FLUSH |
+ PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_DC_FLUSH_ENABLE |
+ PIPE_CONTROL_FLUSH_ENABLE);
- return gen8_emit_wa_tail(request, cs);
+ return gen8_emit_fini_breadcrumb_footer(request, cs);
}
static void execlists_park(struct intel_engine_cs *engine)
{
- del_timer_sync(&engine->execlists.timer);
- intel_engine_park(engine);
+ del_timer(&engine->execlists.timer);
}
void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
@@ -2817,11 +3040,23 @@ logical_ring_default_irqs(struct intel_engine_cs *engine)
engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
}
-int intel_execlists_submission_setup(struct intel_engine_cs *engine)
+static void rcs_submission_override(struct intel_engine_cs *engine)
{
- /* Intentionally left blank. */
- engine->buffer = NULL;
+ switch (INTEL_GEN(engine->i915)) {
+ case 12:
+ case 11:
+ engine->emit_flush = gen11_emit_flush_render;
+ engine->emit_fini_breadcrumb = gen11_emit_fini_breadcrumb_rcs;
+ break;
+ default:
+ engine->emit_flush = gen8_emit_flush_render;
+ engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs;
+ break;
+ }
+}
+int intel_execlists_submission_setup(struct intel_engine_cs *engine)
+{
tasklet_init(&engine->execlists.tasklet,
execlists_submission_tasklet, (unsigned long)engine);
timer_setup(&engine->execlists.timer, execlists_submission_timer, 0);
@@ -2829,10 +3064,8 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
logical_ring_default_vfuncs(engine);
logical_ring_default_irqs(engine);
- if (engine->class == RENDER_CLASS) {
- engine->emit_flush = gen8_emit_flush_render;
- engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs;
- }
+ if (engine->class == RENDER_CLASS)
+ rcs_submission_override(engine);
return 0;
}
@@ -2891,6 +3124,10 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
default:
MISSING_CASE(INTEL_GEN(engine->i915));
/* fall through */
+ case 12:
+ indirect_ctx_offset =
+ GEN12_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
+ break;
case 11:
indirect_ctx_offset =
GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
@@ -3032,6 +3269,8 @@ populate_lr_context(struct intel_context *ce,
return ret;
}
+ set_redzone(vaddr, engine);
+
if (engine->default_state) {
/*
* We only want to copy over the template context state;
@@ -3069,28 +3308,16 @@ err_unpin_ctx:
return ret;
}
-static struct intel_timeline *
-get_timeline(struct i915_gem_context *ctx, struct intel_gt *gt)
-{
- if (ctx->timeline)
- return intel_timeline_get(ctx->timeline);
- else
- return intel_timeline_create(gt, NULL);
-}
-
-static int execlists_context_deferred_alloc(struct intel_context *ce,
- struct intel_engine_cs *engine)
+static int __execlists_context_alloc(struct intel_context *ce,
+ struct intel_engine_cs *engine)
{
struct drm_i915_gem_object *ctx_obj;
+ struct intel_ring *ring;
struct i915_vma *vma;
u32 context_size;
- struct intel_ring *ring;
- struct intel_timeline *timeline;
int ret;
- if (ce->state)
- return 0;
-
+ GEM_BUG_ON(ce->state);
context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE);
/*
@@ -3098,6 +3325,8 @@ static int execlists_context_deferred_alloc(struct intel_context *ce,
* for our own use and for sharing with the GuC.
*/
context_size += LRC_HEADER_PAGES * PAGE_SIZE;
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ context_size += I915_GTT_PAGE_SIZE; /* for redzone */
ctx_obj = i915_gem_object_create_shmem(engine->i915, context_size);
if (IS_ERR(ctx_obj))
@@ -3109,16 +3338,19 @@ static int execlists_context_deferred_alloc(struct intel_context *ce,
goto error_deref_obj;
}
- timeline = get_timeline(ce->gem_context, engine->gt);
- if (IS_ERR(timeline)) {
- ret = PTR_ERR(timeline);
- goto error_deref_obj;
+ if (!ce->timeline) {
+ struct intel_timeline *tl;
+
+ tl = intel_timeline_create(engine->gt, NULL);
+ if (IS_ERR(tl)) {
+ ret = PTR_ERR(tl);
+ goto error_deref_obj;
+ }
+
+ ce->timeline = tl;
}
- ring = intel_engine_create_ring(engine,
- timeline,
- ce->gem_context->ring_size);
- intel_timeline_put(timeline);
+ ring = intel_engine_create_ring(engine, (unsigned long)ce->ring);
if (IS_ERR(ring)) {
ret = PTR_ERR(ring);
goto error_deref_obj;
@@ -3229,6 +3461,8 @@ static void virtual_context_enter(struct intel_context *ce)
for (n = 0; n < ve->num_siblings; n++)
intel_engine_pm_get(ve->siblings[n]);
+
+ intel_timeline_enter(ce->timeline);
}
static void virtual_context_exit(struct intel_context *ce)
@@ -3236,6 +3470,8 @@ static void virtual_context_exit(struct intel_context *ce)
struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
unsigned int n;
+ intel_timeline_exit(ce->timeline);
+
for (n = 0; n < ve->num_siblings; n++)
intel_engine_pm_put(ve->siblings[n]);
}
@@ -3536,6 +3772,12 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
ve->base.flags |= I915_ENGINE_IS_VIRTUAL;
+ err = __execlists_context_alloc(&ve->context, siblings[0]);
+ if (err)
+ goto err_put;
+
+ __set_bit(CONTEXT_ALLOC_BIT, &ve->context.flags);
+
return &ve->context;
err_put:
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
index 6bf34738b4e5..b8f20ad71169 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
@@ -64,5 +64,6 @@
#define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26
#define GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x19
#define GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x1A
+#define GEN12_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0xD
#endif /* _INTEL_LRC_REG_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index e082b25d2db1..728704bbbe18 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -62,6 +62,10 @@ struct drm_i915_mocs_table {
#define GEN11_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */
/* (e)LLC caching options */
+/*
+ * Note: LE_0_PAGETABLE works only up to Gen11; for newer gens it means
+ * the same as LE_UC
+ */
#define LE_0_PAGETABLE _LE_CACHEABILITY(0)
#define LE_1_UC _LE_CACHEABILITY(1)
#define LE_2_WT _LE_CACHEABILITY(2)
@@ -100,8 +104,9 @@ struct drm_i915_mocs_table {
* of bspec.
*
* Entries not part of the following tables are undefined as far as
- * userspace is concerned and shouldn't be relied upon. For the time
- * being they will be initialized to PTE.
+ * userspace is concerned and shouldn't be relied upon. For Gen < 12
+ * they will be initialized to PTE. Gen >= 12 onwards don't have a setting for
+ * PTE and will be initialized to an invalid value.
*
* The last two entries are reserved by the hardware. For ICL+ they
* should be initialized according to bspec and never used, for older
@@ -137,14 +142,7 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
};
#define GEN11_MOCS_ENTRIES \
- /* Base - Uncached (Deprecated) */ \
- MOCS_ENTRY(I915_MOCS_UNCACHED, \
- LE_1_UC | LE_TC_1_LLC, \
- L3_1_UC), \
- /* Base - L3 + LeCC:PAT (Deprecated) */ \
- MOCS_ENTRY(I915_MOCS_PTE, \
- LE_0_PAGETABLE | LE_TC_1_LLC, \
- L3_3_WB), \
+ /* Entries 0 and 1 are defined per-platform */ \
/* Base - L3 + LLC */ \
MOCS_ENTRY(2, \
LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), \
@@ -242,29 +240,65 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), \
L3_1_UC)
+static const struct drm_i915_mocs_entry tigerlake_mocs_table[] = {
+ /* Base - Error (Reserved for Non-Use) */
+ MOCS_ENTRY(0, 0x0, 0x0),
+ /* Base - Reserved */
+ MOCS_ENTRY(1, 0x0, 0x0),
+
+ GEN11_MOCS_ENTRIES,
+
+ /* Implicitly enable L1 - HDC:L1 + L3 + LLC */
+ MOCS_ENTRY(48,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
+ L3_3_WB),
+ /* Implicitly enable L1 - HDC:L1 + L3 */
+ MOCS_ENTRY(49,
+ LE_1_UC | LE_TC_1_LLC,
+ L3_3_WB),
+ /* Implicitly enable L1 - HDC:L1 + LLC */
+ MOCS_ENTRY(50,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
+ L3_1_UC),
+ /* Implicitly enable L1 - HDC:L1 */
+ MOCS_ENTRY(51,
+ LE_1_UC | LE_TC_1_LLC,
+ L3_1_UC),
+ /* HW Special Case (CCS) */
+ MOCS_ENTRY(60,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
+ L3_1_UC),
+ /* HW Special Case (Displayable) */
+ MOCS_ENTRY(61,
+ LE_1_UC | LE_TC_1_LLC | LE_SCF(1),
+ L3_3_WB),
+};
+
static const struct drm_i915_mocs_entry icelake_mocs_table[] = {
+ /* Base - Uncached (Deprecated) */
+ MOCS_ENTRY(I915_MOCS_UNCACHED,
+ LE_1_UC | LE_TC_1_LLC,
+ L3_1_UC),
+ /* Base - L3 + LeCC:PAT (Deprecated) */
+ MOCS_ENTRY(I915_MOCS_PTE,
+ LE_0_PAGETABLE | LE_TC_1_LLC,
+ L3_3_WB),
+
GEN11_MOCS_ENTRIES
};
-/**
- * get_mocs_settings()
- * @gt: gt device
- * @table: Output table that will be made to point at appropriate
- * MOCS values for the device.
- *
- * This function will return the values of the MOCS table that needs to
- * be programmed for the platform. It will return the values that need
- * to be programmed and if they need to be programmed.
- *
- * Return: true if there are applicable MOCS settings for the device.
- */
static bool get_mocs_settings(struct intel_gt *gt,
struct drm_i915_mocs_table *table)
{
struct drm_i915_private *i915 = gt->i915;
bool result = false;
- if (INTEL_GEN(i915) >= 11) {
+ if (INTEL_GEN(i915) >= 12) {
+ table->size = ARRAY_SIZE(tigerlake_mocs_table);
+ table->table = tigerlake_mocs_table;
+ table->n_entries = GEN11_NUM_MOCS_ENTRIES;
+ result = true;
+ } else if (IS_GEN(i915, 11)) {
table->size = ARRAY_SIZE(icelake_mocs_table);
table->table = icelake_mocs_table;
table->n_entries = GEN11_NUM_MOCS_ENTRIES;
@@ -346,6 +380,10 @@ void intel_mocs_init_engine(struct intel_engine_cs *engine)
unsigned int index;
u32 unused_value;
+ /* Platforms with global MOCS do not need per-engine initialization. */
+ if (HAS_GLOBAL_MOCS_REGISTERS(gt->i915))
+ return;
+
/* Called under a blanket forcewake */
assert_forcewakes_active(uncore, FORCEWAKE_ALL);
@@ -370,16 +408,36 @@ void intel_mocs_init_engine(struct intel_engine_cs *engine)
unused_value);
}
-/**
- * emit_mocs_control_table() - emit the mocs control table
- * @rq: Request to set up the MOCS table for.
- * @table: The values to program into the control regs.
- *
- * This function simply emits a MI_LOAD_REGISTER_IMM command for the
- * given table starting at the given address.
- *
- * Return: 0 on success, otherwise the error status.
- */
+static void intel_mocs_init_global(struct intel_gt *gt)
+{
+ struct intel_uncore *uncore = gt->uncore;
+ struct drm_i915_mocs_table table;
+ unsigned int index;
+
+ GEM_BUG_ON(!HAS_GLOBAL_MOCS_REGISTERS(gt->i915));
+
+ if (!get_mocs_settings(gt, &table))
+ return;
+
+ if (GEM_DEBUG_WARN_ON(table.size > table.n_entries))
+ return;
+
+ for (index = 0; index < table.size; index++)
+ intel_uncore_write(uncore,
+ GEN12_GLOBAL_MOCS(index),
+ table.table[index].control_value);
+
+ /*
+ * Ok, now set the unused entries to the invalid entry (index 0). These
+ * entries are officially undefined and no contract for the contents and
+ * settings is given for these entries.
+ */
+ for (; index < table.n_entries; index++)
+ intel_uncore_write(uncore,
+ GEN12_GLOBAL_MOCS(index),
+ table.table[0].control_value);
+}
+
static int emit_mocs_control_table(struct i915_request *rq,
const struct drm_i915_mocs_table *table)
{
@@ -439,17 +497,6 @@ static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
return low | high << 16;
}
-/**
- * emit_mocs_l3cc_table() - emit the mocs control table
- * @rq: Request to set up the MOCS table for.
- * @table: The values to program into the control regs.
- *
- * This function simply emits a MI_LOAD_REGISTER_IMM command for the
- * given table starting at the given address. This register set is
- * programmed in pairs.
- *
- * Return: 0 on success, otherwise the error status.
- */
static int emit_mocs_l3cc_table(struct i915_request *rq,
const struct drm_i915_mocs_table *table)
{
@@ -498,21 +545,7 @@ static int emit_mocs_l3cc_table(struct i915_request *rq,
return 0;
}
-/**
- * intel_mocs_init_l3cc_table() - program the mocs control table
- * @gt: the intel_gt container
- *
- * This function simply programs the mocs registers for the given table
- * starting at the given address. This register set is programmed in pairs.
- *
- * These registers may get programmed more than once, it is simpler to
- * re-program 32 registers than maintain the state of when they were programmed.
- * We are always reprogramming with the same values and this only on context
- * start.
- *
- * Return: Nothing.
- */
-void intel_mocs_init_l3cc_table(struct intel_gt *gt)
+static void intel_mocs_init_l3cc_table(struct intel_gt *gt)
{
struct intel_uncore *uncore = gt->uncore;
struct drm_i915_mocs_table table;
@@ -553,8 +586,8 @@ void intel_mocs_init_l3cc_table(struct intel_gt *gt)
}
/**
- * intel_rcs_context_init_mocs() - program the MOCS register.
- * @rq: Request to set up the MOCS tables for.
+ * intel_mocs_emit() - program the MOCS register.
+ * @rq: Request to use to set up the MOCS tables.
*
* This function will emit a batch buffer with the values required for
* programming the MOCS register values for all the currently supported
@@ -573,7 +606,8 @@ int intel_mocs_emit(struct i915_request *rq)
struct drm_i915_mocs_table t;
int ret;
- if (rq->engine->class != RENDER_CLASS)
+ if (HAS_GLOBAL_MOCS_REGISTERS(rq->i915) ||
+ rq->engine->class != RENDER_CLASS)
return 0;
if (get_mocs_settings(rq->engine->gt, &t)) {
@@ -590,3 +624,11 @@ int intel_mocs_emit(struct i915_request *rq)
return 0;
}
+
+void intel_mocs_init(struct intel_gt *gt)
+{
+ intel_mocs_init_l3cc_table(gt);
+
+ if (HAS_GLOBAL_MOCS_REGISTERS(gt->i915))
+ intel_mocs_init_global(gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.h b/drivers/gpu/drm/i915/gt/intel_mocs.h
index a334db2d6d6b..2ae816b7ca19 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.h
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.h
@@ -49,12 +49,11 @@
* context handling keep the MOCS in step.
*/
-struct drm_i915_private;
struct i915_request;
struct intel_engine_cs;
struct intel_gt;
-void intel_mocs_init_l3cc_table(struct intel_gt *gt);
+void intel_mocs_init(struct intel_gt *gt);
void intel_mocs_init_engine(struct intel_engine_cs *engine);
int intel_mocs_emit(struct i915_request *rq);
diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c
index be37d4501c67..6d05f9c64178 100644
--- a/drivers/gpu/drm/i915/gt/intel_renderstate.c
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c
@@ -222,7 +222,9 @@ int intel_renderstate_emit(struct i915_request *rq)
}
i915_vma_lock(so.vma);
- err = i915_vma_move_to_active(so.vma, rq, 0);
+ err = i915_request_await_object(rq, so.vma->obj, false);
+ if (err == 0)
+ err = i915_vma_move_to_active(so.vma, rq, 0);
i915_vma_unlock(so.vma);
err_unpin:
i915_vma_unpin(so.vma);
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 98c071fe532b..077716442c90 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -7,6 +7,7 @@
#include <linux/sched/mm.h>
#include <linux/stop_machine.h>
+#include "display/intel_display_types.h"
#include "display/intel_overlay.h"
#include "gem/i915_gem_context.h"
@@ -757,11 +758,8 @@ static void __intel_gt_set_wedged(struct intel_gt *gt)
if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
__intel_gt_reset(gt, ALL_ENGINES);
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt->i915, id)
engine->submit_request = nop_submit_request;
- engine->schedule = NULL;
- }
- gt->i915->caps.scheduler = 0;
/*
* Make sure no request can slip through without getting completed by
@@ -813,7 +811,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
*
* No more can be submitted until we reset the wedged bit.
*/
- mutex_lock(&timelines->mutex);
+ spin_lock(&timelines->lock);
list_for_each_entry(tl, &timelines->active_list, link) {
struct i915_request *rq;
@@ -821,6 +819,8 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
if (!rq)
continue;
+ spin_unlock(&timelines->lock);
+
/*
* All internal dependencies (i915_requests) will have
* been flushed by the set-wedge, but we may be stuck waiting
@@ -830,8 +830,12 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
*/
dma_fence_default_wait(&rq->fence, false, MAX_SCHEDULE_TIMEOUT);
i915_request_put(rq);
+
+ /* Restart iteration after droping lock */
+ spin_lock(&timelines->lock);
+ tl = list_entry(&timelines->active_list, typeof(*tl), link);
}
- mutex_unlock(&timelines->mutex);
+ spin_unlock(&timelines->lock);
intel_gt_sanitize(gt, false);
diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
index 8d24a49e5139..601c16239fdf 100644
--- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
@@ -37,6 +37,8 @@
#include "i915_trace.h"
#include "intel_context.h"
#include "intel_gt.h"
+#include "intel_gt_irq.h"
+#include "intel_gt_pm_irq.h"
#include "intel_reset.h"
#include "intel_workarounds.h"
@@ -636,7 +638,7 @@ static bool stop_ring(struct intel_engine_cs *engine)
static int xcs_resume(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
- struct intel_ring *ring = engine->buffer;
+ struct intel_ring *ring = engine->legacy.ring;
int ret = 0;
GEM_TRACE("%s: ring:{HEAD:%04x, TAIL:%04x}\n",
@@ -644,6 +646,7 @@ static int xcs_resume(struct intel_engine_cs *engine)
intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
+ /* WaClearRingBufHeadRegAtInit:ctg,elk */
if (!stop_ring(engine)) {
/* G45 ring initialization often fails to reset head to zero */
DRM_DEBUG_DRIVER("%s head not reset to zero "
@@ -675,19 +678,16 @@ static int xcs_resume(struct intel_engine_cs *engine)
intel_engine_reset_breadcrumbs(engine);
/* Enforce ordering by reading HEAD register back */
- ENGINE_READ(engine, RING_HEAD);
+ ENGINE_POSTING_READ(engine, RING_HEAD);
- /* Initialize the ring. This must happen _after_ we've cleared the ring
+ /*
+ * Initialize the ring. This must happen _after_ we've cleared the ring
* registers with the above sequence (the readback of the HEAD registers
* also enforces ordering), otherwise the hw might lose the new ring
- * register values. */
+ * register values.
+ */
ENGINE_WRITE(engine, RING_START, i915_ggtt_offset(ring->vma));
- /* WaClearRingBufHeadRegAtInit:ctg,elk */
- if (ENGINE_READ(engine, RING_HEAD))
- DRM_DEBUG_DRIVER("%s initialization failed [head=%08x], fudging\n",
- engine->name, ENGINE_READ(engine, RING_HEAD));
-
/* Check that the ring offsets point within the ring! */
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
@@ -834,12 +834,12 @@ static void reset_ring(struct intel_engine_cs *engine, bool stalled)
*/
__i915_request_reset(rq, stalled);
- GEM_BUG_ON(rq->ring != engine->buffer);
+ GEM_BUG_ON(rq->ring != engine->legacy.ring);
head = rq->head;
} else {
- head = engine->buffer->tail;
+ head = engine->legacy.ring->tail;
}
- engine->buffer->head = intel_ring_wrap(engine->buffer, head);
+ engine->legacy.ring->head = intel_ring_wrap(engine->legacy.ring, head);
spin_unlock_irqrestore(&engine->active.lock, flags);
}
@@ -984,13 +984,13 @@ static u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
static void
gen5_irq_enable(struct intel_engine_cs *engine)
{
- gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
+ gen5_gt_enable_irq(engine->gt, engine->irq_enable_mask);
}
static void
gen5_irq_disable(struct intel_engine_cs *engine)
{
- gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
+ gen5_gt_disable_irq(engine->gt, engine->irq_enable_mask);
}
static void
@@ -1051,14 +1051,14 @@ gen6_irq_enable(struct intel_engine_cs *engine)
/* Flush/delay to ensure the RING_IMR is active before the GT IMR */
ENGINE_POSTING_READ(engine, RING_IMR);
- gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
+ gen5_gt_enable_irq(engine->gt, engine->irq_enable_mask);
}
static void
gen6_irq_disable(struct intel_engine_cs *engine)
{
ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
- gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
+ gen5_gt_disable_irq(engine->gt, engine->irq_enable_mask);
}
static void
@@ -1069,14 +1069,14 @@ hsw_vebox_irq_enable(struct intel_engine_cs *engine)
/* Flush/delay to ensure the RING_IMR is active before the GT IMR */
ENGINE_POSTING_READ(engine, RING_IMR);
- gen6_unmask_pm_irq(engine->gt, engine->irq_enable_mask);
+ gen6_gt_pm_unmask_irq(engine->gt, engine->irq_enable_mask);
}
static void
hsw_vebox_irq_disable(struct intel_engine_cs *engine)
{
ENGINE_WRITE(engine, RING_IMR, ~0);
- gen6_mask_pm_irq(engine->gt, engine->irq_enable_mask);
+ gen6_gt_pm_mask_irq(engine->gt, engine->irq_enable_mask);
}
static int
@@ -1138,7 +1138,7 @@ i830_emit_bb_start(struct i915_request *rq,
* stable batch scratch bo area (so that the CS never
* stumbles over its tlb invalidation bug) ...
*/
- *cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA;
+ *cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
*cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096;
*cs++ = DIV_ROUND_UP(len, 4096) << 16 | 4096;
*cs++ = cs_offset;
@@ -1194,10 +1194,6 @@ int intel_ring_pin(struct intel_ring *ring)
if (atomic_fetch_inc(&ring->pin_count))
return 0;
- ret = intel_timeline_pin(ring->timeline);
- if (ret)
- goto err_unpin;
-
flags = PIN_GLOBAL;
/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
@@ -1210,7 +1206,7 @@ int intel_ring_pin(struct intel_ring *ring)
ret = i915_vma_pin(vma, 0, 0, flags);
if (unlikely(ret))
- goto err_timeline;
+ goto err_unpin;
if (i915_vma_is_map_and_fenceable(vma))
addr = (void __force *)i915_vma_pin_iomap(vma);
@@ -1222,18 +1218,15 @@ int intel_ring_pin(struct intel_ring *ring)
goto err_ring;
}
- vma->obj->pin_global++;
+ i915_vma_make_unshrinkable(vma);
GEM_BUG_ON(ring->vaddr);
ring->vaddr = addr;
- GEM_TRACE("ring:%llx pin\n", ring->timeline->fence_context);
return 0;
err_ring:
i915_vma_unpin(vma);
-err_timeline:
- intel_timeline_unpin(ring->timeline);
err_unpin:
atomic_dec(&ring->pin_count);
return ret;
@@ -1241,8 +1234,7 @@ err_unpin:
void intel_ring_reset(struct intel_ring *ring, u32 tail)
{
- GEM_BUG_ON(!intel_ring_offset_valid(ring, tail));
-
+ tail = intel_ring_wrap(ring, tail);
ring->tail = tail;
ring->head = tail;
ring->emit = tail;
@@ -1251,28 +1243,25 @@ void intel_ring_reset(struct intel_ring *ring, u32 tail)
void intel_ring_unpin(struct intel_ring *ring)
{
+ struct i915_vma *vma = ring->vma;
+
if (!atomic_dec_and_test(&ring->pin_count))
return;
- GEM_TRACE("ring:%llx unpin\n", ring->timeline->fence_context);
-
/* Discard any unused bytes beyond that submitted to hw. */
- intel_ring_reset(ring, ring->tail);
+ intel_ring_reset(ring, ring->emit);
- GEM_BUG_ON(!ring->vma);
- i915_vma_unset_ggtt_write(ring->vma);
- if (i915_vma_is_map_and_fenceable(ring->vma))
- i915_vma_unpin_iomap(ring->vma);
+ i915_vma_unset_ggtt_write(vma);
+ if (i915_vma_is_map_and_fenceable(vma))
+ i915_vma_unpin_iomap(vma);
else
- i915_gem_object_unpin_map(ring->vma->obj);
+ i915_gem_object_unpin_map(vma->obj);
GEM_BUG_ON(!ring->vaddr);
ring->vaddr = NULL;
- ring->vma->obj->pin_global--;
- i915_vma_unpin(ring->vma);
-
- intel_timeline_unpin(ring->timeline);
+ i915_vma_unpin(vma);
+ i915_vma_make_purgeable(vma);
}
static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
@@ -1307,9 +1296,7 @@ err:
}
struct intel_ring *
-intel_engine_create_ring(struct intel_engine_cs *engine,
- struct intel_timeline *timeline,
- int size)
+intel_engine_create_ring(struct intel_engine_cs *engine, int size)
{
struct drm_i915_private *i915 = engine->i915;
struct intel_ring *ring;
@@ -1323,8 +1310,6 @@ intel_engine_create_ring(struct intel_engine_cs *engine,
return ERR_PTR(-ENOMEM);
kref_init(&ring->ref);
- INIT_LIST_HEAD(&ring->request_list);
- ring->timeline = intel_timeline_get(timeline);
ring->size = size;
/* Workaround an erratum on the i830 which causes a hang if
@@ -1354,7 +1339,6 @@ void intel_ring_free(struct kref *ref)
i915_vma_close(ring->vma);
i915_vma_put(ring->vma);
- intel_timeline_put(ring->timeline);
kfree(ring);
}
@@ -1481,16 +1465,17 @@ err_obj:
return ERR_PTR(err);
}
-static int ring_context_pin(struct intel_context *ce)
+static int ring_context_alloc(struct intel_context *ce)
{
struct intel_engine_cs *engine = ce->engine;
- int err;
/* One ringbuffer to rule them all */
- GEM_BUG_ON(!engine->buffer);
- ce->ring = engine->buffer;
+ GEM_BUG_ON(!engine->legacy.ring);
+ ce->ring = engine->legacy.ring;
+ ce->timeline = intel_timeline_get(engine->legacy.timeline);
- if (!ce->state && engine->context_size) {
+ GEM_BUG_ON(ce->state);
+ if (engine->context_size) {
struct i915_vma *vma;
vma = alloc_context_vma(engine);
@@ -1500,6 +1485,13 @@ static int ring_context_pin(struct intel_context *ce)
ce->state = vma;
}
+ return 0;
+}
+
+static int ring_context_pin(struct intel_context *ce)
+{
+ int err;
+
err = intel_context_active_acquire(ce);
if (err)
return err;
@@ -1521,6 +1513,8 @@ static void ring_context_reset(struct intel_context *ce)
}
static const struct intel_context_ops ring_context_ops = {
+ .alloc = ring_context_alloc,
+
.pin = ring_context_pin,
.unpin = ring_context_unpin,
@@ -1869,7 +1863,10 @@ static int ring_request_alloc(struct i915_request *request)
return 0;
}
-static noinline int wait_for_space(struct intel_ring *ring, unsigned int bytes)
+static noinline int
+wait_for_space(struct intel_ring *ring,
+ struct intel_timeline *tl,
+ unsigned int bytes)
{
struct i915_request *target;
long timeout;
@@ -1877,15 +1874,18 @@ static noinline int wait_for_space(struct intel_ring *ring, unsigned int bytes)
if (intel_ring_update_space(ring) >= bytes)
return 0;
- GEM_BUG_ON(list_empty(&ring->request_list));
- list_for_each_entry(target, &ring->request_list, ring_link) {
+ GEM_BUG_ON(list_empty(&tl->requests));
+ list_for_each_entry(target, &tl->requests, link) {
+ if (target->ring != ring)
+ continue;
+
/* Would completion of this request free enough space? */
if (bytes <= __intel_ring_space(target->postfix,
ring->emit, ring->size))
break;
}
- if (WARN_ON(&target->ring_link == &ring->request_list))
+ if (GEM_WARN_ON(&target->link == &tl->requests))
return -ENOSPC;
timeout = i915_request_wait(target,
@@ -1952,7 +1952,7 @@ u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
*/
GEM_BUG_ON(!rq->reserved_space);
- ret = wait_for_space(ring, total_bytes);
+ ret = wait_for_space(ring, rq->timeline, total_bytes);
if (unlikely(ret))
return ERR_PTR(ret);
}
@@ -2157,8 +2157,11 @@ static void ring_destroy(struct intel_engine_cs *engine)
intel_engine_cleanup_common(engine);
- intel_ring_unpin(engine->buffer);
- intel_ring_put(engine->buffer);
+ intel_ring_unpin(engine->legacy.ring);
+ intel_ring_put(engine->legacy.ring);
+
+ intel_timeline_unpin(engine->legacy.timeline);
+ intel_timeline_put(engine->legacy.timeline);
kfree(engine);
}
@@ -2342,32 +2345,40 @@ int intel_ring_submission_init(struct intel_engine_cs *engine)
}
GEM_BUG_ON(timeline->has_initial_breadcrumb);
- ring = intel_engine_create_ring(engine, timeline, 32 * PAGE_SIZE);
- intel_timeline_put(timeline);
+ err = intel_timeline_pin(timeline);
+ if (err)
+ goto err_timeline;
+
+ ring = intel_engine_create_ring(engine, SZ_16K);
if (IS_ERR(ring)) {
err = PTR_ERR(ring);
- goto err;
+ goto err_timeline_unpin;
}
err = intel_ring_pin(ring);
if (err)
goto err_ring;
- GEM_BUG_ON(engine->buffer);
- engine->buffer = ring;
+ GEM_BUG_ON(engine->legacy.ring);
+ engine->legacy.ring = ring;
+ engine->legacy.timeline = timeline;
err = intel_engine_init_common(engine);
if (err)
- goto err_unpin;
+ goto err_ring_unpin;
- GEM_BUG_ON(ring->timeline->hwsp_ggtt != engine->status_page.vma);
+ GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma);
return 0;
-err_unpin:
+err_ring_unpin:
intel_ring_unpin(ring);
err_ring:
intel_ring_put(ring);
+err_timeline_unpin:
+ intel_timeline_unpin(timeline);
+err_timeline:
+ intel_timeline_put(timeline);
err:
intel_engine_cleanup_common(engine);
return err;
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c
index a0756f006f5f..6bf2d87da109 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.c
@@ -49,7 +49,7 @@ u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
* cases which disable slices for functional, apart for performance
* reasons. So in this case we select a known stable subset.
*/
- if (!i915->perf.oa.exclusive_stream) {
+ if (!i915->perf.exclusive_stream) {
ctx_sseu = *req_sseu;
} else {
ctx_sseu = intel_sseu_from_device_info(sseu);
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 6daa9eb59e19..02fbe11b671b 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -211,9 +211,9 @@ int intel_timeline_init(struct intel_timeline *timeline,
void *vaddr;
kref_init(&timeline->kref);
+ atomic_set(&timeline->pin_count, 0);
timeline->gt = gt;
- timeline->pin_count = 0;
timeline->has_initial_breadcrumb = !hwsp;
timeline->hwsp_cacheline = NULL;
@@ -254,7 +254,7 @@ int intel_timeline_init(struct intel_timeline *timeline,
mutex_init(&timeline->mutex);
- INIT_ACTIVE_REQUEST(&timeline->last_request);
+ INIT_ACTIVE_REQUEST(&timeline->last_request, &timeline->mutex);
INIT_LIST_HEAD(&timeline->requests);
i915_syncmap_init(&timeline->sync);
@@ -266,7 +266,7 @@ static void timelines_init(struct intel_gt *gt)
{
struct intel_gt_timelines *timelines = &gt->timelines;
- mutex_init(&timelines->mutex);
+ spin_lock_init(&timelines->lock);
INIT_LIST_HEAD(&timelines->active_list);
spin_lock_init(&timelines->hwsp_lock);
@@ -278,64 +278,11 @@ void intel_timelines_init(struct drm_i915_private *i915)
timelines_init(&i915->gt);
}
-static void timeline_add_to_active(struct intel_timeline *tl)
-{
- struct intel_gt_timelines *gt = &tl->gt->timelines;
-
- mutex_lock(&gt->mutex);
- list_add(&tl->link, &gt->active_list);
- mutex_unlock(&gt->mutex);
-}
-
-static void timeline_remove_from_active(struct intel_timeline *tl)
-{
- struct intel_gt_timelines *gt = &tl->gt->timelines;
-
- mutex_lock(&gt->mutex);
- list_del(&tl->link);
- mutex_unlock(&gt->mutex);
-}
-
-static void timelines_park(struct intel_gt *gt)
-{
- struct intel_gt_timelines *timelines = &gt->timelines;
- struct intel_timeline *timeline;
-
- mutex_lock(&timelines->mutex);
- list_for_each_entry(timeline, &timelines->active_list, link) {
- /*
- * All known fences are completed so we can scrap
- * the current sync point tracking and start afresh,
- * any attempt to wait upon a previous sync point
- * will be skipped as the fence was signaled.
- */
- i915_syncmap_free(&timeline->sync);
- }
- mutex_unlock(&timelines->mutex);
-}
-
-/**
- * intel_timelines_park - called when the driver idles
- * @i915: the drm_i915_private device
- *
- * When the driver is completely idle, we know that all of our sync points
- * have been signaled and our tracking is then entirely redundant. Any request
- * to wait upon an older sync point will be completed instantly as we know
- * the fence is signaled and therefore we will not even look them up in the
- * sync point map.
- */
-void intel_timelines_park(struct drm_i915_private *i915)
-{
- timelines_park(&i915->gt);
-}
-
void intel_timeline_fini(struct intel_timeline *timeline)
{
- GEM_BUG_ON(timeline->pin_count);
+ GEM_BUG_ON(atomic_read(&timeline->pin_count));
GEM_BUG_ON(!list_empty(&timeline->requests));
- i915_syncmap_free(&timeline->sync);
-
if (timeline->hwsp_cacheline)
cacheline_free(timeline->hwsp_cacheline);
else
@@ -367,31 +314,67 @@ int intel_timeline_pin(struct intel_timeline *tl)
{
int err;
- if (tl->pin_count++)
+ if (atomic_add_unless(&tl->pin_count, 1, 0))
return 0;
- GEM_BUG_ON(!tl->pin_count);
err = i915_vma_pin(tl->hwsp_ggtt, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (err)
- goto unpin;
+ return err;
tl->hwsp_offset =
i915_ggtt_offset(tl->hwsp_ggtt) +
offset_in_page(tl->hwsp_offset);
cacheline_acquire(tl->hwsp_cacheline);
- timeline_add_to_active(tl);
+ if (atomic_fetch_inc(&tl->pin_count)) {
+ cacheline_release(tl->hwsp_cacheline);
+ __i915_vma_unpin(tl->hwsp_ggtt);
+ }
return 0;
+}
-unpin:
- tl->pin_count = 0;
- return err;
+void intel_timeline_enter(struct intel_timeline *tl)
+{
+ struct intel_gt_timelines *timelines = &tl->gt->timelines;
+
+ lockdep_assert_held(&tl->mutex);
+
+ GEM_BUG_ON(!atomic_read(&tl->pin_count));
+ if (tl->active_count++)
+ return;
+ GEM_BUG_ON(!tl->active_count); /* overflow? */
+
+ spin_lock(&timelines->lock);
+ list_add(&tl->link, &timelines->active_list);
+ spin_unlock(&timelines->lock);
+}
+
+void intel_timeline_exit(struct intel_timeline *tl)
+{
+ struct intel_gt_timelines *timelines = &tl->gt->timelines;
+
+ lockdep_assert_held(&tl->mutex);
+
+ GEM_BUG_ON(!tl->active_count);
+ if (--tl->active_count)
+ return;
+
+ spin_lock(&timelines->lock);
+ list_del(&tl->link);
+ spin_unlock(&timelines->lock);
+
+ /*
+ * Since this timeline is idle, all bariers upon which we were waiting
+ * must also be complete and so we can discard the last used barriers
+ * without loss of information.
+ */
+ i915_syncmap_free(&tl->sync);
}
static u32 timeline_advance(struct intel_timeline *tl)
{
- GEM_BUG_ON(!tl->pin_count);
+ GEM_BUG_ON(!atomic_read(&tl->pin_count));
GEM_BUG_ON(tl->seqno & tl->has_initial_breadcrumb);
return tl->seqno += 1 + tl->has_initial_breadcrumb;
@@ -457,8 +440,7 @@ __intel_timeline_get_seqno(struct intel_timeline *tl,
* free it after the current request is retired, which ensures that
* all writes into the cacheline from previous requests are complete.
*/
- err = i915_active_ref(&tl->hwsp_cacheline->active,
- tl->fence_context, rq);
+ err = i915_active_ref(&tl->hwsp_cacheline->active, tl, rq);
if (err)
goto err_cacheline;
@@ -509,7 +491,7 @@ int intel_timeline_get_seqno(struct intel_timeline *tl,
static int cacheline_ref(struct intel_timeline_cacheline *cl,
struct i915_request *rq)
{
- return i915_active_ref(&cl->active, rq->fence.context, rq);
+ return i915_active_ref(&cl->active, rq->timeline, rq);
}
int intel_timeline_read_hwsp(struct i915_request *from,
@@ -542,20 +524,12 @@ int intel_timeline_read_hwsp(struct i915_request *from,
void intel_timeline_unpin(struct intel_timeline *tl)
{
- GEM_BUG_ON(!tl->pin_count);
- if (--tl->pin_count)
+ GEM_BUG_ON(!atomic_read(&tl->pin_count));
+ if (!atomic_dec_and_test(&tl->pin_count))
return;
- timeline_remove_from_active(tl);
cacheline_release(tl->hwsp_cacheline);
- /*
- * Since this timeline is idle, all bariers upon which we were waiting
- * must also be complete and so we can discard the last used barriers
- * without loss of information.
- */
- i915_syncmap_free(&tl->sync);
-
__i915_vma_unpin(tl->hwsp_ggtt);
}
@@ -574,8 +548,6 @@ static void timelines_fini(struct intel_gt *gt)
GEM_BUG_ON(!list_empty(&timelines->active_list));
GEM_BUG_ON(!list_empty(&timelines->hwsp_free_list));
-
- mutex_destroy(&timelines->mutex);
}
void intel_timelines_fini(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.h b/drivers/gpu/drm/i915/gt/intel_timeline.h
index e08cebf64833..f583af1ba18d 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.h
@@ -77,9 +77,11 @@ static inline bool intel_timeline_sync_is_later(struct intel_timeline *tl,
}
int intel_timeline_pin(struct intel_timeline *tl);
+void intel_timeline_enter(struct intel_timeline *tl);
int intel_timeline_get_seqno(struct intel_timeline *tl,
struct i915_request *rq,
u32 *seqno);
+void intel_timeline_exit(struct intel_timeline *tl);
void intel_timeline_unpin(struct intel_timeline *tl);
int intel_timeline_read_hwsp(struct i915_request *from,
@@ -87,7 +89,6 @@ int intel_timeline_read_hwsp(struct i915_request *from,
u32 *hwsp_offset);
void intel_timelines_init(struct drm_i915_private *i915);
-void intel_timelines_park(struct drm_i915_private *i915);
void intel_timelines_fini(struct drm_i915_private *i915);
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline_types.h b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
index 9a71aea7a338..2b1baf2fcc8e 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
@@ -25,7 +25,25 @@ struct intel_timeline {
struct mutex mutex; /* protects the flow of requests */
- unsigned int pin_count;
+ /*
+ * pin_count and active_count track essentially the same thing:
+ * How many requests are in flight or may be under construction.
+ *
+ * We need two distinct counters so that we can assign different
+ * lifetimes to the events for different use-cases. For example,
+ * we want to permanently keep the timeline pinned for the kernel
+ * context so that we can issue requests at any time without having
+ * to acquire space in the GGTT. However, we want to keep tracking
+ * the activity (to be able to detect when we become idle) along that
+ * permanently pinned timeline and so end up requiring two counters.
+ *
+ * Note that the active_count is protected by the intel_timeline.mutex,
+ * but the pin_count is protected by a combination of serialisation
+ * from the intel_context caller plus internal atomicity.
+ */
+ atomic_t pin_count;
+ unsigned int active_count;
+
const u32 *hwsp_seqno;
struct i915_vma *hwsp_ggtt;
u32 hwsp_offset;
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 704ace01e7f5..126ab3667919 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -569,6 +569,11 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
GEN11_SAMPLER_ENABLE_HEADLESS_MSG);
}
+static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+}
+
static void
__intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
struct i915_wa_list *wal,
@@ -581,7 +586,9 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
wa_init_start(wal, name, engine->name);
- if (IS_GEN(i915, 11))
+ if (IS_GEN(i915, 12))
+ tgl_ctx_workarounds_init(engine, wal);
+ else if (IS_GEN(i915, 11))
icl_ctx_workarounds_init(engine, wal);
else if (IS_CANNONLAKE(i915))
cnl_ctx_workarounds_init(engine, wal);
@@ -891,9 +898,16 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
}
static void
+tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+{
+}
+
+static void
gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
- if (IS_GEN(i915, 11))
+ if (IS_GEN(i915, 12))
+ tgl_gt_workarounds_init(i915, wal);
+ else if (IS_GEN(i915, 11))
icl_gt_workarounds_init(i915, wal);
else if (IS_CANNONLAKE(i915))
cnl_gt_workarounds_init(i915, wal);
@@ -1183,6 +1197,10 @@ static void icl_whitelist_build(struct intel_engine_cs *engine)
}
}
+static void tgl_whitelist_build(struct intel_engine_cs *engine)
+{
+}
+
void intel_engine_init_whitelist(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
@@ -1190,7 +1208,9 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
wa_init_start(w, "whitelist", engine->name);
- if (IS_GEN(i915, 11))
+ if (IS_GEN(i915, 12))
+ tgl_whitelist_build(engine);
+ else if (IS_GEN(i915, 11))
icl_whitelist_build(engine);
else if (IS_CANNONLAKE(i915))
cnl_whitelist_build(engine);
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 10cb312462e5..5d43cbc3f345 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -27,59 +27,40 @@
#include "i915_drv.h"
#include "intel_context.h"
#include "intel_engine_pm.h"
+#include "intel_engine_pool.h"
#include "mock_engine.h"
#include "selftests/mock_request.h"
-struct mock_ring {
- struct intel_ring base;
- struct intel_timeline timeline;
-};
-
static void mock_timeline_pin(struct intel_timeline *tl)
{
- tl->pin_count++;
+ atomic_inc(&tl->pin_count);
}
static void mock_timeline_unpin(struct intel_timeline *tl)
{
- GEM_BUG_ON(!tl->pin_count);
- tl->pin_count--;
+ GEM_BUG_ON(!atomic_read(&tl->pin_count));
+ atomic_dec(&tl->pin_count);
}
static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
{
const unsigned long sz = PAGE_SIZE / 2;
- struct mock_ring *ring;
+ struct intel_ring *ring;
ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL);
if (!ring)
return NULL;
- if (intel_timeline_init(&ring->timeline, engine->gt, NULL)) {
- kfree(ring);
- return NULL;
- }
-
- kref_init(&ring->base.ref);
- ring->base.size = sz;
- ring->base.effective_size = sz;
- ring->base.vaddr = (void *)(ring + 1);
- ring->base.timeline = &ring->timeline;
- atomic_set(&ring->base.pin_count, 1);
+ kref_init(&ring->ref);
+ ring->size = sz;
+ ring->effective_size = sz;
+ ring->vaddr = (void *)(ring + 1);
+ atomic_set(&ring->pin_count, 1);
- INIT_LIST_HEAD(&ring->base.request_list);
- intel_ring_update_space(&ring->base);
+ intel_ring_update_space(ring);
- return &ring->base;
-}
-
-static void mock_ring_free(struct intel_ring *base)
-{
- struct mock_ring *ring = container_of(base, typeof(*ring), base);
-
- intel_timeline_fini(&ring->timeline);
- kfree(ring);
+ return ring;
}
static struct i915_request *first_request(struct mock_engine *engine)
@@ -130,7 +111,6 @@ static void hw_delay_complete(struct timer_list *t)
static void mock_context_unpin(struct intel_context *ce)
{
- mock_timeline_unpin(ce->ring->timeline);
}
static void mock_context_destroy(struct kref *ref)
@@ -139,32 +119,41 @@ static void mock_context_destroy(struct kref *ref)
GEM_BUG_ON(intel_context_is_pinned(ce));
- if (ce->ring)
- mock_ring_free(ce->ring);
+ if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
+ kfree(ce->ring);
+ mock_timeline_unpin(ce->timeline);
+ }
intel_context_fini(ce);
intel_context_free(ce);
}
-static int mock_context_pin(struct intel_context *ce)
+static int mock_context_alloc(struct intel_context *ce)
{
- int ret;
-
- if (!ce->ring) {
- ce->ring = mock_ring(ce->engine);
- if (!ce->ring)
- return -ENOMEM;
+ ce->ring = mock_ring(ce->engine);
+ if (!ce->ring)
+ return -ENOMEM;
+
+ GEM_BUG_ON(ce->timeline);
+ ce->timeline = intel_timeline_create(ce->engine->gt, NULL);
+ if (IS_ERR(ce->timeline)) {
+ kfree(ce->engine);
+ return PTR_ERR(ce->timeline);
}
- ret = intel_context_active_acquire(ce);
- if (ret)
- return ret;
+ mock_timeline_pin(ce->timeline);
- mock_timeline_pin(ce->ring->timeline);
return 0;
}
+static int mock_context_pin(struct intel_context *ce)
+{
+ return intel_context_active_acquire(ce);
+}
+
static const struct intel_context_ops mock_context_ops = {
+ .alloc = mock_context_alloc,
+
.pin = mock_context_pin,
.unpin = mock_context_unpin,
@@ -262,6 +251,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
snprintf(engine->base.name, sizeof(engine->base.name), "%s", name);
engine->base.id = id;
engine->base.mask = BIT(id);
+ engine->base.instance = id;
engine->base.status_page.addr = (void *)(engine + 1);
engine->base.cops = &mock_context_ops;
@@ -280,29 +270,26 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
timer_setup(&engine->hw_delay, hw_delay_complete, 0);
INIT_LIST_HEAD(&engine->hw_queue);
+ intel_engine_add_user(&engine->base);
+
return &engine->base;
}
int mock_engine_init(struct intel_engine_cs *engine)
{
- struct drm_i915_private *i915 = engine->i915;
- int err;
+ struct intel_context *ce;
intel_engine_init_active(engine, ENGINE_MOCK);
intel_engine_init_breadcrumbs(engine);
intel_engine_init_execlists(engine);
intel_engine_init__pm(engine);
+ intel_engine_pool_init(&engine->pool);
- engine->kernel_context =
- i915_gem_context_get_engine(i915->kernel_context, engine->id);
- if (IS_ERR(engine->kernel_context))
- goto err_breadcrumbs;
-
- err = intel_context_pin(engine->kernel_context);
- intel_context_put(engine->kernel_context);
- if (err)
+ ce = create_kernel_context(engine);
+ if (IS_ERR(ce))
goto err_breadcrumbs;
+ engine->kernel_context = ce;
return 0;
err_breadcrumbs:
@@ -336,6 +323,7 @@ void mock_engine_free(struct intel_engine_cs *engine)
GEM_BUG_ON(timer_pending(&mock->hw_delay));
intel_context_unpin(engine->kernel_context);
+ intel_context_put(engine->kernel_context);
intel_engine_fini_breadcrumbs(engine);
diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c
new file mode 100644
index 000000000000..9d1ea26c7a2d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_context.c
@@ -0,0 +1,456 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_selftest.h"
+#include "intel_engine_pm.h"
+#include "intel_gt.h"
+
+#include "gem/selftests/mock_context.h"
+#include "selftests/igt_flush_test.h"
+#include "selftests/mock_drm.h"
+
+static int request_sync(struct i915_request *rq)
+{
+ long timeout;
+ int err = 0;
+
+ i915_request_get(rq);
+
+ i915_request_add(rq);
+ timeout = i915_request_wait(rq, 0, HZ / 10);
+ if (timeout < 0) {
+ err = timeout;
+ } else {
+ mutex_lock(&rq->timeline->mutex);
+ i915_request_retire_upto(rq);
+ mutex_unlock(&rq->timeline->mutex);
+ }
+
+ i915_request_put(rq);
+
+ return err;
+}
+
+static int context_sync(struct intel_context *ce)
+{
+ struct intel_timeline *tl = ce->timeline;
+ int err = 0;
+
+ mutex_lock(&tl->mutex);
+ do {
+ struct i915_request *rq;
+ long timeout;
+
+ rcu_read_lock();
+ rq = rcu_dereference(tl->last_request.request);
+ if (rq)
+ rq = i915_request_get_rcu(rq);
+ rcu_read_unlock();
+ if (!rq)
+ break;
+
+ timeout = i915_request_wait(rq, 0, HZ / 10);
+ if (timeout < 0)
+ err = timeout;
+ else
+ i915_request_retire_upto(rq);
+
+ i915_request_put(rq);
+ } while (!err);
+ mutex_unlock(&tl->mutex);
+
+ return err;
+}
+
+static int __live_context_size(struct intel_engine_cs *engine,
+ struct i915_gem_context *fixme)
+{
+ struct intel_context *ce;
+ struct i915_request *rq;
+ void *vaddr;
+ int err;
+
+ ce = intel_context_create(fixme, engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ err = intel_context_pin(ce);
+ if (err)
+ goto err;
+
+ vaddr = i915_gem_object_pin_map(ce->state->obj,
+ i915_coherent_map_type(engine->i915));
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ intel_context_unpin(ce);
+ goto err;
+ }
+
+ /*
+ * Note that execlists also applies a redzone which it checks on
+ * context unpin when debugging. We are using the same location
+ * and same poison value so that our checks overlap. Despite the
+ * redundancy, we want to keep this little selftest so that we
+ * get coverage of any and all submission backends, and we can
+ * always extend this test to ensure we trick the HW into a
+ * compromising position wrt to the various sections that need
+ * to be written into the context state.
+ *
+ * TLDR; this overlaps with the execlists redzone.
+ */
+ if (HAS_EXECLISTS(engine->i915))
+ vaddr += LRC_HEADER_PAGES * PAGE_SIZE;
+
+ vaddr += engine->context_size - I915_GTT_PAGE_SIZE;
+ memset(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE);
+
+ rq = intel_context_create_request(ce);
+ intel_context_unpin(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_unpin;
+ }
+
+ err = request_sync(rq);
+ if (err)
+ goto err_unpin;
+
+ /* Force the context switch */
+ rq = i915_request_create(engine->kernel_context);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_unpin;
+ }
+ err = request_sync(rq);
+ if (err)
+ goto err_unpin;
+
+ if (memchr_inv(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE)) {
+ pr_err("%s context overwrote trailing red-zone!", engine->name);
+ err = -EINVAL;
+ }
+
+err_unpin:
+ i915_gem_object_unpin_map(ce->state->obj);
+err:
+ intel_context_put(ce);
+ return err;
+}
+
+static int live_context_size(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct i915_gem_context *fixme;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Check that our context sizes are correct by seeing if the
+ * HW tries to write past the end of one.
+ */
+
+ mutex_lock(&gt->i915->drm.struct_mutex);
+
+ fixme = kernel_context(gt->i915);
+ if (IS_ERR(fixme)) {
+ err = PTR_ERR(fixme);
+ goto unlock;
+ }
+
+ for_each_engine(engine, gt->i915, id) {
+ struct {
+ struct drm_i915_gem_object *state;
+ void *pinned;
+ } saved;
+
+ if (!engine->context_size)
+ continue;
+
+ intel_engine_pm_get(engine);
+
+ /*
+ * Hide the old default state -- we lie about the context size
+ * and get confused when the default state is smaller than
+ * expected. For our do nothing request, inheriting the
+ * active state is sufficient, we are only checking that we
+ * don't use more than we planned.
+ */
+ saved.state = fetch_and_zero(&engine->default_state);
+ saved.pinned = fetch_and_zero(&engine->pinned_default_state);
+
+ /* Overlaps with the execlists redzone */
+ engine->context_size += I915_GTT_PAGE_SIZE;
+
+ err = __live_context_size(engine, fixme);
+
+ engine->context_size -= I915_GTT_PAGE_SIZE;
+
+ engine->pinned_default_state = saved.pinned;
+ engine->default_state = saved.state;
+
+ intel_engine_pm_put(engine);
+
+ if (err)
+ break;
+ }
+
+ kernel_context_close(fixme);
+unlock:
+ mutex_unlock(&gt->i915->drm.struct_mutex);
+ return err;
+}
+
+static int __live_active_context(struct intel_engine_cs *engine,
+ struct i915_gem_context *fixme)
+{
+ struct intel_context *ce;
+ int pass;
+ int err;
+
+ /*
+ * We keep active contexts alive until after a subsequent context
+ * switch as the final write from the context-save will be after
+ * we retire the final request. We track when we unpin the context,
+ * under the presumption that the final pin is from the last request,
+ * and instead of immediately unpinning the context, we add a task
+ * to unpin the context from the next idle-barrier.
+ *
+ * This test makes sure that the context is kept alive until a
+ * subsequent idle-barrier (emitted when the engine wakeref hits 0
+ * with no more outstanding requests).
+ */
+
+ if (intel_engine_pm_is_awake(engine)) {
+ pr_err("%s is awake before starting %s!\n",
+ engine->name, __func__);
+ return -EINVAL;
+ }
+
+ ce = intel_context_create(fixme, engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ for (pass = 0; pass <= 2; pass++) {
+ struct i915_request *rq;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err;
+ }
+
+ err = request_sync(rq);
+ if (err)
+ goto err;
+
+ /* Context will be kept active until after an idle-barrier. */
+ if (i915_active_is_idle(&ce->active)) {
+ pr_err("context is not active; expected idle-barrier (%s pass %d)\n",
+ engine->name, pass);
+ err = -EINVAL;
+ goto err;
+ }
+
+ if (!intel_engine_pm_is_awake(engine)) {
+ pr_err("%s is asleep before idle-barrier\n",
+ engine->name);
+ err = -EINVAL;
+ goto err;
+ }
+ }
+
+ /* Now make sure our idle-barriers are flushed */
+ err = context_sync(engine->kernel_context);
+ if (err)
+ goto err;
+
+ if (!i915_active_is_idle(&ce->active)) {
+ pr_err("context is still active!");
+ err = -EINVAL;
+ }
+
+ if (intel_engine_pm_is_awake(engine)) {
+ struct drm_printer p = drm_debug_printer(__func__);
+
+ intel_engine_dump(engine, &p,
+ "%s is still awake after idle-barriers\n",
+ engine->name);
+ GEM_TRACE_DUMP();
+
+ err = -EINVAL;
+ goto err;
+ }
+
+err:
+ intel_context_put(ce);
+ return err;
+}
+
+static int live_active_context(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct i915_gem_context *fixme;
+ enum intel_engine_id id;
+ struct drm_file *file;
+ int err = 0;
+
+ file = mock_file(gt->i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ mutex_lock(&gt->i915->drm.struct_mutex);
+
+ fixme = live_context(gt->i915, file);
+ if (IS_ERR(fixme)) {
+ err = PTR_ERR(fixme);
+ goto unlock;
+ }
+
+ for_each_engine(engine, gt->i915, id) {
+ err = __live_active_context(engine, fixme);
+ if (err)
+ break;
+
+ err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
+ if (err)
+ break;
+ }
+
+unlock:
+ mutex_unlock(&gt->i915->drm.struct_mutex);
+ mock_file_free(gt->i915, file);
+ return err;
+}
+
+static int __remote_sync(struct intel_context *ce, struct intel_context *remote)
+{
+ struct i915_request *rq;
+ int err;
+
+ err = intel_context_pin(remote);
+ if (err)
+ return err;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto unpin;
+ }
+
+ err = intel_context_prepare_remote_request(remote, rq);
+ if (err) {
+ i915_request_add(rq);
+ goto unpin;
+ }
+
+ err = request_sync(rq);
+
+unpin:
+ intel_context_unpin(remote);
+ return err;
+}
+
+static int __live_remote_context(struct intel_engine_cs *engine,
+ struct i915_gem_context *fixme)
+{
+ struct intel_context *local, *remote;
+ int pass;
+ int err;
+
+ /*
+ * Check that our idle barriers do not interfere with normal
+ * activity tracking. In particular, check that operating
+ * on the context image remotely (intel_context_prepare_remote_request),
+ * which inserts foreign fences into intel_context.active, does not
+ * clobber the idle-barrier.
+ */
+
+ remote = intel_context_create(fixme, engine);
+ if (IS_ERR(remote))
+ return PTR_ERR(remote);
+
+ local = intel_context_create(fixme, engine);
+ if (IS_ERR(local)) {
+ err = PTR_ERR(local);
+ goto err_remote;
+ }
+
+ for (pass = 0; pass <= 2; pass++) {
+ err = __remote_sync(local, remote);
+ if (err)
+ break;
+
+ err = __remote_sync(engine->kernel_context, remote);
+ if (err)
+ break;
+
+ if (i915_active_is_idle(&remote->active)) {
+ pr_err("remote context is not active; expected idle-barrier (%s pass %d)\n",
+ engine->name, pass);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ intel_context_put(local);
+err_remote:
+ intel_context_put(remote);
+ return err;
+}
+
+static int live_remote_context(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct i915_gem_context *fixme;
+ enum intel_engine_id id;
+ struct drm_file *file;
+ int err = 0;
+
+ file = mock_file(gt->i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ mutex_lock(&gt->i915->drm.struct_mutex);
+
+ fixme = live_context(gt->i915, file);
+ if (IS_ERR(fixme)) {
+ err = PTR_ERR(fixme);
+ goto unlock;
+ }
+
+ for_each_engine(engine, gt->i915, id) {
+ err = __live_remote_context(engine, fixme);
+ if (err)
+ break;
+
+ err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
+ if (err)
+ break;
+ }
+
+unlock:
+ mutex_unlock(&gt->i915->drm.struct_mutex);
+ mock_file_free(gt->i915, file);
+ return err;
+}
+
+int intel_context_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_context_size),
+ SUBTEST(live_active_context),
+ SUBTEST(live_remote_context),
+ };
+ struct intel_gt *gt = &i915->gt;
+
+ if (intel_gt_is_wedged(gt))
+ return 0;
+
+ return intel_gt_live_subtests(tests, gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine.c b/drivers/gpu/drm/i915/gt/selftest_engine.c
new file mode 100644
index 000000000000..f65b118e261d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_engine.c
@@ -0,0 +1,28 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "i915_selftest.h"
+#include "selftest_engine.h"
+
+int intel_engine_live_selftests(struct drm_i915_private *i915)
+{
+ static int (* const tests[])(struct intel_gt *) = {
+ live_engine_pm_selftests,
+ NULL,
+ };
+ struct intel_gt *gt = &i915->gt;
+ typeof(*tests) *fn;
+
+ for (fn = tests; *fn; fn++) {
+ int err;
+
+ err = (*fn)(gt);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine.h b/drivers/gpu/drm/i915/gt/selftest_engine.h
new file mode 100644
index 000000000000..ab32d09ec5a1
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_engine.h
@@ -0,0 +1,14 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef SELFTEST_ENGINE_H
+#define SELFTEST_ENGINE_H
+
+struct intel_gt;
+
+int live_engine_pm_selftests(struct intel_gt *gt);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
index cfaa6b296835..3880f07c29b8 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
@@ -12,19 +12,18 @@ static int intel_mmio_bases_check(void *arg)
for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
const struct engine_info *info = &intel_engines[i];
- char name[INTEL_ENGINE_CS_MAX_NAME];
u8 prev = U8_MAX;
- __sprint_engine_name(name, info);
-
for (j = 0; j < MAX_MMIO_BASES; j++) {
u8 gen = info->mmio_bases[j].gen;
u32 base = info->mmio_bases[j].base;
if (gen >= prev) {
- pr_err("%s: %s: mmio base for gen %x "
- "is before the one for gen %x\n",
- __func__, name, prev, gen);
+ pr_err("%s(%s, class:%d, instance:%d): mmio base for gen %x is before the one for gen %x\n",
+ __func__,
+ intel_engine_class_repr(info->class),
+ info->class, info->instance,
+ prev, gen);
return -EINVAL;
}
@@ -32,17 +31,22 @@ static int intel_mmio_bases_check(void *arg)
break;
if (!base) {
- pr_err("%s: %s: invalid mmio base (%x) "
- "for gen %x at entry %u\n",
- __func__, name, base, gen, j);
+ pr_err("%s(%s, class:%d, instance:%d): invalid mmio base (%x) for gen %x at entry %u\n",
+ __func__,
+ intel_engine_class_repr(info->class),
+ info->class, info->instance,
+ base, gen, j);
return -EINVAL;
}
prev = gen;
}
- pr_info("%s: min gen supported for %s = %d\n",
- __func__, name, prev);
+ pr_debug("%s: min gen supported for %s%d is %d\n",
+ __func__,
+ intel_engine_class_repr(info->class),
+ info->instance,
+ prev);
}
return 0;
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
new file mode 100644
index 000000000000..3a1419376912
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
@@ -0,0 +1,83 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "i915_selftest.h"
+#include "selftest_engine.h"
+#include "selftests/igt_atomic.h"
+
+static int live_engine_pm(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * Check we can call intel_engine_pm_put from any context. No
+ * failures are reported directly, but if we mess up lockdep should
+ * tell us.
+ */
+ if (intel_gt_pm_wait_for_idle(gt)) {
+ pr_err("Unable to flush GT pm before test\n");
+ return -EBUSY;
+ }
+
+ GEM_BUG_ON(intel_gt_pm_is_awake(gt));
+ for_each_engine(engine, gt->i915, id) {
+ const typeof(*igt_atomic_phases) *p;
+
+ for (p = igt_atomic_phases; p->name; p++) {
+ /*
+ * Acquisition is always synchronous, except if we
+ * know that the engine is already awake, in which
+ * case we should use intel_engine_pm_get_if_awake()
+ * to atomically grab the wakeref.
+ *
+ * In practice,
+ * intel_engine_pm_get();
+ * intel_engine_pm_put();
+ * occurs in one thread, while simultaneously
+ * intel_engine_pm_get_if_awake();
+ * intel_engine_pm_put();
+ * occurs from atomic context in another.
+ */
+ GEM_BUG_ON(intel_engine_pm_is_awake(engine));
+ intel_engine_pm_get(engine);
+
+ p->critical_section_begin();
+ if (!intel_engine_pm_get_if_awake(engine))
+ pr_err("intel_engine_pm_get_if_awake(%s) failed under %s\n",
+ engine->name, p->name);
+ else
+ intel_engine_pm_put(engine);
+ intel_engine_pm_put(engine);
+ p->critical_section_end();
+
+ /* engine wakeref is sync (instant) */
+ if (intel_engine_pm_is_awake(engine)) {
+ pr_err("%s is still awake after flushing pm\n",
+ engine->name);
+ return -EINVAL;
+ }
+
+ /* gt wakeref is async (deferred to workqueue) */
+ if (intel_gt_pm_wait_for_idle(gt)) {
+ pr_err("GT failed to idle\n");
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int live_engine_pm_selftests(struct intel_gt *gt)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_engine_pm),
+ };
+
+ return intel_gt_live_subtests(tests, gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 4484b4447db1..a0098fc35921 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -118,7 +118,10 @@ static int move_to_active(struct i915_vma *vma,
int err;
i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, flags);
+ err = i915_request_await_object(rq, vma->obj,
+ flags & EXEC_OBJECT_WRITE);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, flags);
i915_vma_unlock(vma);
return err;
@@ -1154,7 +1157,14 @@ static int evict_fence(void *data)
goto out_unlock;
}
+ err = i915_vma_pin(arg->vma, 0, 0, PIN_GLOBAL | PIN_MAPPABLE);
+ if (err) {
+ pr_err("Unable to pin vma for Y-tiled fence; err:%d\n", err);
+ goto out_unlock;
+ }
+
err = i915_vma_pin_fence(arg->vma);
+ i915_vma_unpin(arg->vma);
if (err) {
pr_err("Unable to pin Y-tiled fence; err:%d\n", err);
goto out_unlock;
@@ -1237,7 +1247,10 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
}
i915_vma_lock(arg.vma);
- err = i915_vma_move_to_active(arg.vma, rq, flags);
+ err = i915_request_await_object(rq, arg.vma->obj,
+ flags & EXEC_OBJECT_WRITE);
+ if (err == 0)
+ err = i915_vma_move_to_active(arg.vma, rq, flags);
i915_vma_unlock(arg.vma);
if (flags & EXEC_OBJECT_NEEDS_FENCE)
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 60f27e52d267..d791158988d6 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -22,9 +22,9 @@
static int live_sanitycheck(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct intel_engine_cs *engine;
+ struct i915_gem_engines_iter it;
struct i915_gem_context *ctx;
- enum intel_engine_id id;
+ struct intel_context *ce;
struct igt_spinner spin;
intel_wakeref_t wakeref;
int err = -ENOMEM;
@@ -35,17 +35,17 @@ static int live_sanitycheck(void *arg)
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- if (igt_spinner_init(&spin, i915))
+ if (igt_spinner_init(&spin, &i915->gt))
goto err_unlock;
ctx = kernel_context(i915);
if (!ctx)
goto err_spin;
- for_each_engine(engine, i915, id) {
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
struct i915_request *rq;
- rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
+ rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_ctx;
@@ -69,6 +69,7 @@ static int live_sanitycheck(void *arg)
err = 0;
err_ctx:
+ i915_gem_context_unlock_engines(ctx);
kernel_context_close(ctx);
err_spin:
igt_spinner_fini(&spin);
@@ -480,6 +481,24 @@ err_unlock:
return err;
}
+static struct i915_request *
+spinner_create_request(struct igt_spinner *spin,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ u32 arb)
+{
+ struct intel_context *ce;
+ struct i915_request *rq;
+
+ ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
+ if (IS_ERR(ce))
+ return ERR_CAST(ce);
+
+ rq = igt_spinner_create_request(spin, ce, arb);
+ intel_context_put(ce);
+ return rq;
+}
+
static int live_preempt(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -499,10 +518,10 @@ static int live_preempt(void *arg)
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- if (igt_spinner_init(&spin_hi, i915))
+ if (igt_spinner_init(&spin_hi, &i915->gt))
goto err_unlock;
- if (igt_spinner_init(&spin_lo, i915))
+ if (igt_spinner_init(&spin_lo, &i915->gt))
goto err_spin_hi;
ctx_hi = kernel_context(i915);
@@ -529,8 +548,8 @@ static int live_preempt(void *arg)
goto err_ctx_lo;
}
- rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
- MI_ARB_CHECK);
+ rq = spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_ctx_lo;
@@ -545,8 +564,8 @@ static int live_preempt(void *arg)
goto err_ctx_lo;
}
- rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
- MI_ARB_CHECK);
+ rq = spinner_create_request(&spin_hi, ctx_hi, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq)) {
igt_spinner_end(&spin_lo);
err = PTR_ERR(rq);
@@ -603,10 +622,10 @@ static int live_late_preempt(void *arg)
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- if (igt_spinner_init(&spin_hi, i915))
+ if (igt_spinner_init(&spin_hi, &i915->gt))
goto err_unlock;
- if (igt_spinner_init(&spin_lo, i915))
+ if (igt_spinner_init(&spin_lo, &i915->gt))
goto err_spin_hi;
ctx_hi = kernel_context(i915);
@@ -632,8 +651,8 @@ static int live_late_preempt(void *arg)
goto err_ctx_lo;
}
- rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
- MI_ARB_CHECK);
+ rq = spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_ctx_lo;
@@ -645,8 +664,8 @@ static int live_late_preempt(void *arg)
goto err_wedged;
}
- rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
- MI_NOOP);
+ rq = spinner_create_request(&spin_hi, ctx_hi, engine,
+ MI_NOOP);
if (IS_ERR(rq)) {
igt_spinner_end(&spin_lo);
err = PTR_ERR(rq);
@@ -711,7 +730,7 @@ static int preempt_client_init(struct drm_i915_private *i915,
if (!c->ctx)
return -ENOMEM;
- if (igt_spinner_init(&c->spin, i915))
+ if (igt_spinner_init(&c->spin, &i915->gt))
goto err_ctx;
return 0;
@@ -761,9 +780,9 @@ static int live_nopreempt(void *arg)
engine->execlists.preempt_hang.count = 0;
- rq_a = igt_spinner_create_request(&a.spin,
- a.ctx, engine,
- MI_ARB_CHECK);
+ rq_a = spinner_create_request(&a.spin,
+ a.ctx, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq_a)) {
err = PTR_ERR(rq_a);
goto err_client_b;
@@ -778,9 +797,9 @@ static int live_nopreempt(void *arg)
goto err_wedged;
}
- rq_b = igt_spinner_create_request(&b.spin,
- b.ctx, engine,
- MI_ARB_CHECK);
+ rq_b = spinner_create_request(&b.spin,
+ b.ctx, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq_b)) {
err = PTR_ERR(rq_b);
goto err_client_b;
@@ -880,9 +899,9 @@ static int live_suppress_self_preempt(void *arg)
engine->execlists.preempt_hang.count = 0;
- rq_a = igt_spinner_create_request(&a.spin,
- a.ctx, engine,
- MI_NOOP);
+ rq_a = spinner_create_request(&a.spin,
+ a.ctx, engine,
+ MI_NOOP);
if (IS_ERR(rq_a)) {
err = PTR_ERR(rq_a);
goto err_client_b;
@@ -894,10 +913,12 @@ static int live_suppress_self_preempt(void *arg)
goto err_wedged;
}
+ /* Keep postponing the timer to avoid premature slicing */
+ mod_timer(&engine->execlists.timer, jiffies + HZ);
for (depth = 0; depth < 8; depth++) {
- rq_b = igt_spinner_create_request(&b.spin,
- b.ctx, engine,
- MI_NOOP);
+ rq_b = spinner_create_request(&b.spin,
+ b.ctx, engine,
+ MI_NOOP);
if (IS_ERR(rq_b)) {
err = PTR_ERR(rq_b);
goto err_client_b;
@@ -919,7 +940,8 @@ static int live_suppress_self_preempt(void *arg)
igt_spinner_end(&a.spin);
if (engine->execlists.preempt_hang.count) {
- pr_err("Preemption recorded x%d, depth %d; should have been suppressed!\n",
+ pr_err("Preemption on %s recorded x%d, depth %d; should have been suppressed!\n",
+ engine->name,
engine->execlists.preempt_hang.count,
depth);
err = -EINVAL;
@@ -1048,9 +1070,9 @@ static int live_suppress_wait_preempt(void *arg)
goto err_client_3;
for (i = 0; i < ARRAY_SIZE(client); i++) {
- rq[i] = igt_spinner_create_request(&client[i].spin,
- client[i].ctx, engine,
- MI_NOOP);
+ rq[i] = spinner_create_request(&client[i].spin,
+ client[i].ctx, engine,
+ MI_NOOP);
if (IS_ERR(rq[i])) {
err = PTR_ERR(rq[i]);
goto err_wedged;
@@ -1157,9 +1179,9 @@ static int live_chain_preempt(void *arg)
if (!intel_engine_has_preemption(engine))
continue;
- rq = igt_spinner_create_request(&lo.spin,
- lo.ctx, engine,
- MI_ARB_CHECK);
+ rq = spinner_create_request(&lo.spin,
+ lo.ctx, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq))
goto err_wedged;
i915_request_add(rq);
@@ -1183,18 +1205,18 @@ static int live_chain_preempt(void *arg)
}
for_each_prime_number_from(count, 1, ring_size) {
- rq = igt_spinner_create_request(&hi.spin,
- hi.ctx, engine,
- MI_ARB_CHECK);
+ rq = spinner_create_request(&hi.spin,
+ hi.ctx, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq))
goto err_wedged;
i915_request_add(rq);
if (!igt_wait_for_spinner(&hi.spin, rq))
goto err_wedged;
- rq = igt_spinner_create_request(&lo.spin,
- lo.ctx, engine,
- MI_ARB_CHECK);
+ rq = spinner_create_request(&lo.spin,
+ lo.ctx, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq))
goto err_wedged;
i915_request_add(rq);
@@ -1284,10 +1306,10 @@ static int live_preempt_hang(void *arg)
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- if (igt_spinner_init(&spin_hi, i915))
+ if (igt_spinner_init(&spin_hi, &i915->gt))
goto err_unlock;
- if (igt_spinner_init(&spin_lo, i915))
+ if (igt_spinner_init(&spin_lo, &i915->gt))
goto err_spin_hi;
ctx_hi = kernel_context(i915);
@@ -1308,8 +1330,8 @@ static int live_preempt_hang(void *arg)
if (!intel_engine_has_preemption(engine))
continue;
- rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
- MI_ARB_CHECK);
+ rq = spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_ctx_lo;
@@ -1324,8 +1346,8 @@ static int live_preempt_hang(void *arg)
goto err_ctx_lo;
}
- rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
- MI_ARB_CHECK);
+ rq = spinner_create_request(&spin_hi, ctx_hi, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq)) {
igt_spinner_end(&spin_lo);
err = PTR_ERR(rq);
@@ -1437,11 +1459,13 @@ static int smoke_submit(struct preempt_smoke *smoke,
if (vma) {
i915_vma_lock(vma);
- err = rq->engine->emit_bb_start(rq,
- vma->node.start,
- PAGE_SIZE, 0);
+ err = i915_request_await_object(rq, vma->obj, false);
if (!err)
err = i915_vma_move_to_active(vma, rq, 0);
+ if (!err)
+ err = rq->engine->emit_bb_start(rq,
+ vma->node.start,
+ PAGE_SIZE, 0);
i915_vma_unlock(vma);
}
@@ -1773,6 +1797,7 @@ static int live_virtual_engine(void *arg)
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
struct intel_engine_cs *engine;
+ struct intel_gt *gt = &i915->gt;
enum intel_engine_id id;
unsigned int class, inst;
int err = -ENODEV;
@@ -1796,10 +1821,10 @@ static int live_virtual_engine(void *arg)
nsibling = 0;
for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
- if (!i915->engine_class[class][inst])
+ if (!gt->engine_class[class][inst])
continue;
- siblings[nsibling++] = i915->engine_class[class][inst];
+ siblings[nsibling++] = gt->engine_class[class][inst];
}
if (nsibling < 2)
continue;
@@ -1920,6 +1945,7 @@ static int live_virtual_mask(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ struct intel_gt *gt = &i915->gt;
unsigned int class, inst;
int err = 0;
@@ -1933,10 +1959,10 @@ static int live_virtual_mask(void *arg)
nsibling = 0;
for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
- if (!i915->engine_class[class][inst])
+ if (!gt->engine_class[class][inst])
break;
- siblings[nsibling++] = i915->engine_class[class][inst];
+ siblings[nsibling++] = gt->engine_class[class][inst];
}
if (nsibling < 2)
continue;
@@ -2097,6 +2123,7 @@ static int live_virtual_bond(void *arg)
};
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ struct intel_gt *gt = &i915->gt;
unsigned int class, inst;
int err = 0;
@@ -2111,11 +2138,11 @@ static int live_virtual_bond(void *arg)
nsibling = 0;
for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
- if (!i915->engine_class[class][inst])
+ if (!gt->engine_class[class][inst])
break;
GEM_BUG_ON(nsibling == ARRAY_SIZE(siblings));
- siblings[nsibling++] = i915->engine_class[class][inst];
+ siblings[nsibling++] = gt->engine_class[class][inst];
}
if (nsibling < 2)
continue;
diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
index f0a840030382..321481403165 100644
--- a/drivers/gpu/drm/i915/gt/selftest_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
@@ -689,7 +689,9 @@ static int live_hwsp_wrap(void *arg)
tl->seqno = -4u;
+ mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING);
err = intel_timeline_get_seqno(tl, rq, &seqno[0]);
+ mutex_unlock(&tl->mutex);
if (err) {
i915_request_add(rq);
goto out;
@@ -704,7 +706,9 @@ static int live_hwsp_wrap(void *arg)
}
hwsp_seqno[0] = tl->hwsp_seqno;
+ mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING);
err = intel_timeline_get_seqno(tl, rq, &seqno[1]);
+ mutex_unlock(&tl->mutex);
if (err) {
i915_request_add(rq);
goto out;
@@ -816,8 +820,6 @@ static int live_hwsp_recycle(void *arg)
if (err)
goto out;
-
- intel_timelines_park(i915); /* Encourage recycling! */
} while (!__igt_timeout(end_time, NULL));
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index ab147985fa74..d06d68ac2a3b 100644
--- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -5,6 +5,7 @@
*/
#include "gem/i915_gem_pm.h"
+#include "gt/intel_engine_user.h"
#include "gt/intel_gt.h"
#include "i915_selftest.h"
#include "intel_reset.h"
@@ -112,7 +113,9 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
}
i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ err = i915_request_await_object(rq, vma->obj, true);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
if (err)
goto err_req;
@@ -238,6 +241,7 @@ switch_to_scratch_context(struct intel_engine_cs *engine,
struct igt_spinner *spin)
{
struct i915_gem_context *ctx;
+ struct intel_context *ce;
struct i915_request *rq;
intel_wakeref_t wakeref;
int err = 0;
@@ -248,10 +252,14 @@ switch_to_scratch_context(struct intel_engine_cs *engine,
GEM_BUG_ON(i915_gem_context_is_bannable(ctx));
+ ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
+ GEM_BUG_ON(IS_ERR(ce));
+
rq = ERR_PTR(-ENODEV);
with_intel_runtime_pm(&engine->i915->runtime_pm, wakeref)
- rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP);
+ rq = igt_spinner_create_request(spin, ce, MI_NOOP);
+ intel_context_put(ce);
kernel_context_close(ctx);
if (IS_ERR(rq)) {
@@ -291,7 +299,7 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- err = igt_spinner_init(&spin, i915);
+ err = igt_spinner_init(&spin, engine->gt);
if (err)
goto out_ctx;
@@ -1083,7 +1091,7 @@ verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str);
- for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ for_each_gem_engine(ce, i915_gem_context_engines(ctx), it) {
enum intel_engine_id id = ce->engine->id;
ok &= engine_wa_list_verify(ce,
@@ -1094,7 +1102,6 @@ verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
&lists->engine[id].ctx_wa_list,
str) == 0;
}
- i915_gem_context_unlock_engines(ctx);
return ok;
}
@@ -1115,6 +1122,8 @@ live_gpu_reset_workarounds(void *arg)
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ i915_gem_context_lock_engines(ctx);
+
pr_info("Verifying after GPU reset...\n");
igt_global_reset_lock(&i915->gt);
@@ -1131,6 +1140,7 @@ live_gpu_reset_workarounds(void *arg)
ok = verify_wa_lists(ctx, &lists, "after reset");
out:
+ i915_gem_context_unlock_engines(ctx);
kernel_context_close(ctx);
reference_lists_fini(i915, &lists);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
@@ -1143,10 +1153,10 @@ static int
live_engine_reset_workarounds(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct intel_engine_cs *engine;
+ struct i915_gem_engines_iter it;
struct i915_gem_context *ctx;
+ struct intel_context *ce;
struct igt_spinner spin;
- enum intel_engine_id id;
struct i915_request *rq;
intel_wakeref_t wakeref;
struct wa_lists lists;
@@ -1164,7 +1174,8 @@ live_engine_reset_workarounds(void *arg)
reference_lists_init(i915, &lists);
- for_each_engine(engine, i915, id) {
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ struct intel_engine_cs *engine = ce->engine;
bool ok;
pr_info("Verifying after %s reset...\n", engine->name);
@@ -1183,11 +1194,11 @@ live_engine_reset_workarounds(void *arg)
goto err;
}
- ret = igt_spinner_init(&spin, i915);
+ ret = igt_spinner_init(&spin, engine->gt);
if (ret)
goto err;
- rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
+ rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
igt_spinner_fini(&spin);
@@ -1214,8 +1225,8 @@ live_engine_reset_workarounds(void *arg)
goto err;
}
}
-
err:
+ i915_gem_context_unlock_engines(ctx);
reference_lists_fini(i915, &lists);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
igt_global_reset_unlock(&i915->gt);
diff --git a/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c
index 5c549205828a..598170efcaf6 100644
--- a/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c
@@ -15,7 +15,7 @@ void mock_timeline_init(struct intel_timeline *timeline, u64 context)
mutex_init(&timeline->mutex);
- INIT_ACTIVE_REQUEST(&timeline->last_request);
+ INIT_ACTIVE_REQUEST(&timeline->last_request, &timeline->mutex);
INIT_LIST_HEAD(&timeline->requests);
i915_syncmap_init(&timeline->sync);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 13fbbffd05c7..249c747e9756 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -1,25 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * Copyright © 2014-2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
+ * Copyright © 2014-2019 Intel Corporation
*/
#include "gt/intel_gt.h"
@@ -82,6 +63,7 @@ void intel_guc_init_early(struct intel_guc *guc)
intel_guc_fw_init_early(guc);
intel_guc_ct_init_early(&guc->ct);
intel_guc_log_init_early(&guc->log);
+ intel_guc_submission_init_early(guc);
mutex_init(&guc->send_mutex);
spin_lock_init(&guc->irq_lock);
@@ -144,7 +126,7 @@ static u32 guc_ctl_feature_flags(struct intel_guc *guc)
{
u32 flags = 0;
- if (!intel_uc_is_using_guc_submission(&guc_to_gt(guc)->uc))
+ if (!intel_guc_is_submission_supported(guc))
flags |= GUC_CTL_DISABLE_SCHEDULER;
return flags;
@@ -154,7 +136,7 @@ static u32 guc_ctl_ctxinfo_flags(struct intel_guc *guc)
{
u32 flags = 0;
- if (intel_uc_is_using_guc_submission(&guc_to_gt(guc)->uc)) {
+ if (intel_guc_is_submission_supported(guc)) {
u32 ctxnum, base;
base = intel_guc_ggtt_offset(guc, guc->stage_desc_pool);
@@ -290,7 +272,7 @@ int intel_guc_init(struct intel_guc *guc)
if (ret)
goto err_ads;
- if (intel_uc_is_using_guc_submission(&gt->uc)) {
+ if (intel_guc_is_submission_supported(guc)) {
/*
* This is stuff we need to have available at fw load time
* if we are planning to enable submission later
@@ -320,6 +302,7 @@ err_fw:
intel_uc_fw_fini(&guc->fw);
err_fetch:
intel_uc_fw_cleanup_fetch(&guc->fw);
+ DRM_DEV_DEBUG_DRIVER(gt->i915->drm.dev, "failed with %d\n", ret);
return ret;
}
@@ -327,9 +310,12 @@ void intel_guc_fini(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
+ if (!intel_uc_fw_is_available(&guc->fw))
+ return;
+
i915_ggtt_disable_guc(gt->ggtt);
- if (intel_uc_is_using_guc_submission(&gt->uc))
+ if (intel_guc_is_submission_supported(guc))
intel_guc_submission_fini(guc);
intel_guc_ct_fini(&guc->ct);
@@ -625,7 +611,7 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
goto err;
}
- return vma;
+ return i915_vma_make_unshrinkable(vma);
err:
i915_gem_object_put(obj);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 714e9892aaff..2b2f046d3cc3 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -1,25 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
- * Copyright © 2014-2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
+ * Copyright © 2014-2019 Intel Corporation
*/
#ifndef _INTEL_GUC_H_
@@ -47,9 +28,6 @@ struct intel_guc {
struct intel_guc_log log;
struct intel_guc_ct ct;
- /* Log snapshot if GuC errors during load */
- struct drm_i915_gem_object *load_err_log;
-
/* intel_guc_recv interrupt related state */
spinlock_t irq_lock;
unsigned int msg_enabled_mask;
@@ -61,6 +39,8 @@ struct intel_guc {
void (*disable)(struct intel_guc *guc);
} interrupts;
+ bool submission_supported;
+
struct i915_vma *ads_vma;
struct __guc_ads_blob *ads_blob;
@@ -172,6 +152,16 @@ int intel_guc_suspend(struct intel_guc *guc);
int intel_guc_resume(struct intel_guc *guc);
struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
+static inline bool intel_guc_is_supported(struct intel_guc *guc)
+{
+ return intel_uc_fw_is_supported(&guc->fw);
+}
+
+static inline bool intel_guc_is_enabled(struct intel_guc *guc)
+{
+ return intel_uc_fw_is_enabled(&guc->fw);
+}
+
static inline bool intel_guc_is_running(struct intel_guc *guc)
{
return intel_uc_fw_is_running(&guc->fw);
@@ -185,6 +175,11 @@ static inline int intel_guc_sanitize(struct intel_guc *guc)
return 0;
}
+static inline bool intel_guc_is_submission_supported(struct intel_guc *guc)
+{
+ return guc->submission_supported;
+}
+
static inline void intel_guc_enable_msg(struct intel_guc *guc, u32 mask)
{
spin_lock_irq(&guc->irq_lock);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
index a0da80241f22..ca6674b8e00c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
@@ -1,25 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * Copyright © 2014-2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
+ * Copyright © 2014-2019 Intel Corporation
*/
#include "gt/intel_gt.h"
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h
index 7f40f9cd5fb9..b00d3ae1113a 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h
@@ -1,25 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
- * Copyright © 2014-2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
+ * Copyright © 2014-2019 Intel Corporation
*/
#ifndef _INTEL_GUC_ADS_H_
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
index 9e383a47609f..b49115517510 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
@@ -1,24 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * Copyright © 2016-2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2016-2019 Intel Corporation
*/
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
index 8c1f6d133168..7c24d83f5c24 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
@@ -1,24 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
- * Copyright © 2016-2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2016-2019 Intel Corporation
*/
#ifndef _INTEL_GUC_CT_H_
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
index 28735c14b9a0..5528224448f6 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
@@ -1,24 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2014-2019 Intel Corporation
*
* Authors:
* Vinit Azad <[email protected]>
@@ -39,7 +21,10 @@
*/
void intel_guc_fw_init_early(struct intel_guc *guc)
{
- intel_uc_fw_init_early(&guc->fw, INTEL_UC_FW_TYPE_GUC, guc_to_gt(guc)->i915);
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+
+ intel_uc_fw_init_early(&guc->fw, INTEL_UC_FW_TYPE_GUC, HAS_GT_UC(i915),
+ INTEL_INFO(i915)->platform, INTEL_REVID(i915));
}
static void guc_prepare_xfer(struct intel_uncore *uncore)
@@ -172,10 +157,10 @@ int intel_guc_fw_upload(struct intel_guc *guc)
if (ret)
goto out;
- guc->fw.status = INTEL_UC_FIRMWARE_RUNNING;
+ intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_RUNNING);
return 0;
out:
- guc->fw.status = INTEL_UC_FIRMWARE_FAIL;
+ intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_FAIL);
return ret;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h
index 4ec5d3d9e2b0..b5ab639d7259 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h
@@ -1,25 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
+ * Copyright © 2017-2019 Intel Corporation
*/
#ifndef _INTEL_GUC_FW_H_
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
index 06a9bdfb0faf..1d3cdd67ca2f 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
@@ -1,25 +1,8 @@
+/* SPDX-License-Identifier: MIT */
/*
- * Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2014-2019 Intel Corporation
*/
+
#ifndef _INTEL_GUC_FWIF_H
#define _INTEL_GUC_FWIF_H
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index 3460deca12c8..36332064de9c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -1,32 +1,14 @@
+// SPDX-License-Identifier: MIT
/*
- * Copyright © 2014-2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
+ * Copyright © 2014-2019 Intel Corporation
*/
#include <linux/debugfs.h>
#include "gt/intel_gt.h"
-#include "intel_guc_log.h"
#include "i915_drv.h"
+#include "i915_memcpy.h"
+#include "intel_guc_log.h"
static void guc_log_capture_logs(struct intel_guc_log *log);
@@ -390,6 +372,7 @@ static int guc_log_relay_create(struct intel_guc_log *log)
int ret;
lockdep_assert_held(&log->relay.lock);
+ GEM_BUG_ON(!log->vma);
/* Keep the size of sub buffers same as shared log buffer */
subbuf_size = log->vma->size;
@@ -572,6 +555,9 @@ int intel_guc_log_relay_open(struct intel_guc_log *log)
{
int ret;
+ if (!log->vma)
+ return -ENODEV;
+
mutex_lock(&log->relay.lock);
if (intel_guc_log_relay_enabled(log)) {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
index 1969572f1f79..6f764879acb1 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
@@ -1,25 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
- * Copyright © 2014-2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
+ * Copyright © 2014-2019 Intel Corporation
*/
#ifndef _INTEL_GUC_LOG_H_
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
index e3cbb23299ce..edf194d23c6b 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
@@ -1,26 +1,8 @@
+/* SPDX-License-Identifier: MIT */
/*
- * Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
+ * Copyright © 2014-2019 Intel Corporation
*/
+
#ifndef _INTEL_GUC_REG_H_
#define _INTEL_GUC_REG_H_
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index b4238fe16a03..f325d3dd564f 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1,25 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
*/
#include <linux/circ_buf.h>
@@ -29,10 +10,12 @@
#include "gt/intel_context.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_pm.h"
#include "gt/intel_lrc_reg.h"
#include "intel_guc_submission.h"
#include "i915_drv.h"
+#include "i915_trace.h"
enum {
GUC_PREEMPT_NONE = 0,
@@ -487,8 +470,6 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
guc_wq_item_append(client, engine->guc_id, ctx_desc,
ring_tail, rq->fence.seqno);
guc_ring_doorbell(client);
-
- client->submissions[engine->id] += 1;
}
/*
@@ -534,10 +515,14 @@ static struct i915_request *schedule_in(struct i915_request *rq, int idx)
{
trace_i915_request_in(rq, idx);
- if (!rq->hw_context->inflight)
- rq->hw_context->inflight = rq->engine;
- intel_context_inflight_inc(rq->hw_context);
+ /*
+ * Currently we are not tracking the rq->context being inflight
+ * (ce->inflight = rq->engine). It is only used by the execlists
+ * backend at the moment, a similar counting strategy would be
+ * required if we generalise the inflight tracking.
+ */
+ intel_gt_pm_get(rq->engine->gt);
return i915_request_get(rq);
}
@@ -545,10 +530,7 @@ static void schedule_out(struct i915_request *rq)
{
trace_i915_request_out(rq);
- intel_context_inflight_dec(rq->hw_context);
- if (!intel_context_inflight_count(rq->hw_context))
- rq->hw_context->inflight = NULL;
-
+ intel_gt_pm_put(rq->engine->gt);
i915_request_put(rq);
}
@@ -571,6 +553,11 @@ static void __guc_dequeue(struct intel_engine_cs *engine)
last = NULL;
}
+ /*
+ * We write directly into the execlists->inflight queue and don't use
+ * the execlists->pending queue, as we don't have a distinct switch
+ * event.
+ */
port = first;
while ((rb = rb_first_cached(&execlists->queue))) {
struct i915_priolist *p = to_priolist(rb);
@@ -651,6 +638,19 @@ static void guc_reset_prepare(struct intel_engine_cs *engine)
__tasklet_disable_sync_once(&execlists->tasklet);
}
+static void
+cancel_port_requests(struct intel_engine_execlists * const execlists)
+{
+ struct i915_request * const *port, *rq;
+
+ /* Note we are only using the inflight and not the pending queue */
+
+ for (port = execlists->active; (rq = *port); port++)
+ schedule_out(rq);
+ execlists->active =
+ memset(execlists->inflight, 0, sizeof(execlists->inflight));
+}
+
static void guc_reset(struct intel_engine_cs *engine, bool stalled)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
@@ -659,7 +659,7 @@ static void guc_reset(struct intel_engine_cs *engine, bool stalled)
spin_lock_irqsave(&engine->active.lock, flags);
- execlists_cancel_port_requests(execlists);
+ cancel_port_requests(execlists);
/* Push back any incomplete requests for replay after the reset. */
rq = execlists_unwind_incomplete_requests(execlists);
@@ -702,7 +702,7 @@ static void guc_cancel_requests(struct intel_engine_cs *engine)
spin_lock_irqsave(&engine->active.lock, flags);
/* Cancel the requests on the HW and clear the ELSP tracker. */
- execlists_cancel_port_requests(execlists);
+ cancel_port_requests(execlists);
/* Mark all executing requests as skipped. */
list_for_each_entry(rq, &engine->active.requests, sched.link) {
@@ -1074,19 +1074,6 @@ static void guc_interrupts_release(struct intel_gt *gt)
rps->pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK;
}
-static void guc_submission_park(struct intel_engine_cs *engine)
-{
- intel_engine_park(engine);
- intel_engine_unpin_breadcrumbs_irq(engine);
- engine->flags &= ~I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
-}
-
-static void guc_submission_unpark(struct intel_engine_cs *engine)
-{
- engine->flags |= I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
- intel_engine_pin_breadcrumbs_irq(engine);
-}
-
static void guc_set_default_submission(struct intel_engine_cs *engine)
{
/*
@@ -1104,8 +1091,8 @@ static void guc_set_default_submission(struct intel_engine_cs *engine)
engine->execlists.tasklet.func = guc_submission_tasklet;
- engine->park = guc_submission_park;
- engine->unpark = guc_submission_unpark;
+ /* do not use execlists park/unpark */
+ engine->park = engine->unpark = NULL;
engine->reset.prepare = guc_reset_prepare;
engine->reset.reset = guc_reset;
@@ -1114,6 +1101,15 @@ static void guc_set_default_submission(struct intel_engine_cs *engine)
engine->cancel_requests = guc_cancel_requests;
engine->flags &= ~I915_ENGINE_SUPPORTS_STATS;
+ engine->flags |= I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
+
+ /*
+ * For the breadcrumb irq to work we need the interrupts to stay
+ * enabled. However, on all platforms on which we'll have support for
+ * GuC submission we don't allow disabling the interrupts at runtime, so
+ * we're always safe with the current flow.
+ */
+ GEM_BUG_ON(engine->irq_enable || engine->irq_disable);
}
int intel_guc_submission_enable(struct intel_guc *guc)
@@ -1123,6 +1119,10 @@ int intel_guc_submission_enable(struct intel_guc *guc)
enum intel_engine_id id;
int err;
+ err = i915_inject_load_error(gt->i915, -ENXIO);
+ if (err)
+ return err;
+
/*
* We're using GuC work items for submitting work through GuC. Since
* we're coalescing multiple requests from a single context into a
@@ -1163,6 +1163,22 @@ void intel_guc_submission_disable(struct intel_guc *guc)
guc_clients_disable(guc);
}
+static bool __guc_submission_support(struct intel_guc *guc)
+{
+ /* XXX: GuC submission is unavailable for now */
+ return false;
+
+ if (!intel_guc_is_supported(guc))
+ return false;
+
+ return i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION;
+}
+
+void intel_guc_submission_init_early(struct intel_guc *guc)
+{
+ guc->submission_supported = __guc_submission_support(guc);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_guc.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
index 87a38cb6faf3..54d716828352 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
@@ -1,25 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
- * Copyright © 2014-2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
+ * Copyright © 2014-2019 Intel Corporation
*/
#ifndef _INTEL_GUC_SUBMISSION_H_
@@ -70,13 +51,12 @@ struct intel_guc_client {
/* Protects GuC client's WQ access */
spinlock_t wq_lock;
- /* Per-engine counts of GuC submissions */
- u64 submissions[I915_NUM_ENGINES];
/* For testing purposes, use nop WQ items instead of real ones */
I915_SELFTEST_DECLARE(bool use_nop_wqi);
};
+void intel_guc_submission_init_early(struct intel_guc *guc);
int intel_guc_submission_init(struct intel_guc *guc);
int intel_guc_submission_enable(struct intel_guc *guc);
void intel_guc_submission_disable(struct intel_guc *guc);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index c9535caba844..d4625c97b4f9 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -1,25 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * Copyright © 2016-2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
+ * Copyright © 2016-2019 Intel Corporation
*/
#include <linux/types.h>
@@ -52,6 +33,11 @@ static int intel_huc_rsa_data_create(struct intel_huc *huc)
struct i915_vma *vma;
size_t copied;
void *vaddr;
+ int err;
+
+ err = i915_inject_load_error(gt->i915, -ENXIO);
+ if (err)
+ return err;
/*
* HuC firmware will sit above GUC_GGTT_TOP and will not map
@@ -91,11 +77,12 @@ static void intel_huc_rsa_data_destroy(struct intel_huc *huc)
int intel_huc_init(struct intel_huc *huc)
{
+ struct drm_i915_private *i915 = huc_to_gt(huc)->i915;
int err;
err = intel_uc_fw_init(&huc->fw);
if (err)
- return err;
+ goto out;
/*
* HuC firmware image is outside GuC accessible range.
@@ -110,13 +97,19 @@ int intel_huc_init(struct intel_huc *huc)
out_fini:
intel_uc_fw_fini(&huc->fw);
+out:
+ intel_uc_fw_cleanup_fetch(&huc->fw);
+ DRM_DEV_DEBUG_DRIVER(i915->drm.dev, "failed with %d\n", err);
return err;
}
void intel_huc_fini(struct intel_huc *huc)
{
- intel_uc_fw_fini(&huc->fw);
+ if (!intel_uc_fw_is_available(&huc->fw))
+ return;
+
intel_huc_rsa_data_destroy(huc);
+ intel_uc_fw_fini(&huc->fw);
}
/**
@@ -136,9 +129,15 @@ int intel_huc_auth(struct intel_huc *huc)
struct intel_guc *guc = &gt->uc.guc;
int ret;
- GEM_BUG_ON(!intel_uc_fw_is_loaded(&huc->fw));
GEM_BUG_ON(intel_huc_is_authenticated(huc));
+ if (!intel_uc_fw_is_loaded(&huc->fw))
+ return -ENOEXEC;
+
+ ret = i915_inject_load_error(gt->i915, -ENXIO);
+ if (ret)
+ goto fail;
+
ret = intel_guc_auth_huc(guc,
intel_guc_ggtt_offset(guc, huc->rsa_data));
if (ret) {
@@ -157,14 +156,12 @@ int intel_huc_auth(struct intel_huc *huc)
goto fail;
}
- huc->fw.status = INTEL_UC_FIRMWARE_RUNNING;
-
+ intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_RUNNING);
return 0;
fail:
- huc->fw.status = INTEL_UC_FIRMWARE_FAIL;
-
- DRM_ERROR("HuC: Authentication failed %d\n", ret);
+ i915_probe_error(gt->i915, "HuC: Authentication failed %d\n", ret);
+ intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_FAIL);
return ret;
}
@@ -185,7 +182,7 @@ int intel_huc_check_status(struct intel_huc *huc)
intel_wakeref_t wakeref;
u32 status = 0;
- if (!intel_uc_is_using_huc(&gt->uc))
+ if (!intel_huc_is_supported(huc))
return -ENODEV;
with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
index 4465209ce233..644c059fe01d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
@@ -1,25 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
- * Copyright © 2014-2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
+ * Copyright © 2014-2019 Intel Corporation
*/
#ifndef _INTEL_HUC_H_
@@ -55,6 +36,16 @@ static inline int intel_huc_sanitize(struct intel_huc *huc)
return 0;
}
+static inline bool intel_huc_is_supported(struct intel_huc *huc)
+{
+ return intel_uc_fw_is_supported(&huc->fw);
+}
+
+static inline bool intel_huc_is_enabled(struct intel_huc *huc)
+{
+ return intel_uc_fw_is_enabled(&huc->fw);
+}
+
static inline bool intel_huc_is_authenticated(struct intel_huc *huc)
{
return intel_uc_fw_is_running(&huc->fw);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
index 0e885859c828..74602487ed67 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2014-2018 Intel Corporation
+ * Copyright © 2014-2019 Intel Corporation
*/
#include "gt/intel_gt.h"
@@ -31,7 +30,13 @@
*/
void intel_huc_fw_init_early(struct intel_huc *huc)
{
- intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC, huc_to_gt(huc)->i915);
+ struct intel_gt *gt = huc_to_gt(huc);
+ struct intel_uc *uc = &gt->uc;
+ struct drm_i915_private *i915 = gt->i915;
+
+ intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC,
+ intel_uc_uses_guc(uc),
+ INTEL_INFO(i915)->platform, INTEL_REVID(i915));
}
/**
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h
index 8a00a0ebddc5..b791269ce923 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h
@@ -1,7 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2014-2018 Intel Corporation
+ * Copyright © 2014-2019 Intel Corporation
*/
#ifndef _INTEL_HUC_FW_H_
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 6eb8bb3fa252..71ee7ab035cc 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -1,25 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
+ * Copyright © 2016-2019 Intel Corporation
*/
#include "gt/intel_gt.h"
@@ -31,8 +12,6 @@
#include "i915_drv.h"
-static void guc_free_load_err_log(struct intel_guc *guc);
-
/* Reset GuC providing us with fresh state for both GuC and HuC.
*/
static int __intel_uc_reset_hw(struct intel_uc *uc)
@@ -41,6 +20,10 @@ static int __intel_uc_reset_hw(struct intel_uc *uc)
int ret;
u32 guc_status;
+ ret = i915_inject_load_error(gt->i915, -ENXIO);
+ if (ret)
+ return ret;
+
ret = intel_reset_guc(gt);
if (ret) {
DRM_ERROR("Failed to reset GuC, ret = %d\n", ret);
@@ -55,78 +38,49 @@ static int __intel_uc_reset_hw(struct intel_uc *uc)
return ret;
}
-static int __get_platform_enable_guc(struct intel_uc *uc)
+static void __confirm_options(struct intel_uc *uc)
{
- struct intel_uc_fw *guc_fw = &uc->guc.fw;
- struct intel_uc_fw *huc_fw = &uc->huc.fw;
- int enable_guc = 0;
-
- if (!HAS_GT_UC(uc_to_gt(uc)->i915))
- return 0;
-
- /* We don't want to enable GuC/HuC on pre-Gen11 by default */
- if (INTEL_GEN(uc_to_gt(uc)->i915) < 11)
- return 0;
-
- if (intel_uc_fw_supported(guc_fw) && intel_uc_fw_supported(huc_fw))
- enable_guc |= ENABLE_GUC_LOAD_HUC;
-
- return enable_guc;
-}
+ struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
-/**
- * sanitize_options_early - sanitize uC related modparam options
- * @uc: the intel_uc structure
- *
- * In case of "enable_guc" option this function will attempt to modify
- * it only if it was initially set to "auto(-1)". Default value for this
- * modparam varies between platforms and it is hardcoded in driver code.
- * Any other modparam value is only monitored against availability of the
- * related hardware or firmware definitions.
- */
-static void sanitize_options_early(struct intel_uc *uc)
-{
- struct intel_uc_fw *guc_fw = &uc->guc.fw;
- struct intel_uc_fw *huc_fw = &uc->huc.fw;
-
- /* A negative value means "use platform default" */
- if (i915_modparams.enable_guc < 0)
- i915_modparams.enable_guc = __get_platform_enable_guc(uc);
-
- DRM_DEBUG_DRIVER("enable_guc=%d (submission:%s huc:%s)\n",
- i915_modparams.enable_guc,
- yesno(intel_uc_is_using_guc_submission(uc)),
- yesno(intel_uc_is_using_huc(uc)));
-
- /* Verify GuC firmware availability */
- if (intel_uc_is_using_guc(uc) && !intel_uc_fw_supported(guc_fw)) {
- DRM_WARN("Incompatible option detected: enable_guc=%d, "
- "but GuC is not supported!\n",
- i915_modparams.enable_guc);
- DRM_INFO("Disabling GuC/HuC loading!\n");
- i915_modparams.enable_guc = 0;
- }
+ DRM_DEV_DEBUG_DRIVER(i915->drm.dev,
+ "enable_guc=%d (guc:%s submission:%s huc:%s)\n",
+ i915_modparams.enable_guc,
+ yesno(intel_uc_uses_guc(uc)),
+ yesno(intel_uc_uses_guc_submission(uc)),
+ yesno(intel_uc_uses_huc(uc)));
- /* Verify HuC firmware availability */
- if (intel_uc_is_using_huc(uc) && !intel_uc_fw_supported(huc_fw)) {
- DRM_WARN("Incompatible option detected: enable_guc=%d, "
- "but HuC is not supported!\n",
- i915_modparams.enable_guc);
- DRM_INFO("Disabling HuC loading!\n");
- i915_modparams.enable_guc &= ~ENABLE_GUC_LOAD_HUC;
- }
+ if (i915_modparams.enable_guc == -1)
+ return;
- /* XXX: GuC submission is unavailable for now */
- if (intel_uc_is_using_guc_submission(uc)) {
- DRM_INFO("Incompatible option detected: enable_guc=%d, "
- "but GuC submission is not supported!\n",
- i915_modparams.enable_guc);
- DRM_INFO("Switching to non-GuC submission mode!\n");
- i915_modparams.enable_guc &= ~ENABLE_GUC_SUBMISSION;
+ if (i915_modparams.enable_guc == 0) {
+ GEM_BUG_ON(intel_uc_uses_guc(uc));
+ GEM_BUG_ON(intel_uc_uses_guc_submission(uc));
+ GEM_BUG_ON(intel_uc_uses_huc(uc));
+ return;
}
- /* Make sure that sanitization was done */
- GEM_BUG_ON(i915_modparams.enable_guc < 0);
+ if (!intel_uc_supports_guc(uc))
+ dev_info(i915->drm.dev,
+ "Incompatible option enable_guc=%d - %s\n",
+ i915_modparams.enable_guc, "GuC is not supported!");
+
+ if (i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC &&
+ !intel_uc_supports_huc(uc))
+ dev_info(i915->drm.dev,
+ "Incompatible option enable_guc=%d - %s\n",
+ i915_modparams.enable_guc, "HuC is not supported!");
+
+ if (i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION &&
+ !intel_uc_supports_guc_submission(uc))
+ dev_info(i915->drm.dev,
+ "Incompatible option enable_guc=%d - %s\n",
+ i915_modparams.enable_guc, "GuC submission is N/A");
+
+ if (i915_modparams.enable_guc & ~(ENABLE_GUC_SUBMISSION |
+ ENABLE_GUC_LOAD_HUC))
+ dev_info(i915->drm.dev,
+ "Incompatible option enable_guc=%d - %s\n",
+ i915_modparams.enable_guc, "undocumented flag");
}
void intel_uc_init_early(struct intel_uc *uc)
@@ -134,12 +88,11 @@ void intel_uc_init_early(struct intel_uc *uc)
intel_guc_init_early(&uc->guc);
intel_huc_init_early(&uc->huc);
- sanitize_options_early(uc);
+ __confirm_options(uc);
}
-void intel_uc_cleanup_early(struct intel_uc *uc)
+void intel_uc_driver_late_release(struct intel_uc *uc)
{
- guc_free_load_err_log(&uc->guc);
}
/**
@@ -154,21 +107,20 @@ void intel_uc_init_mmio(struct intel_uc *uc)
intel_guc_init_send_regs(&uc->guc);
}
-static void guc_capture_load_err_log(struct intel_guc *guc)
+static void __uc_capture_load_err_log(struct intel_uc *uc)
{
- if (!guc->log.vma || !intel_guc_log_get_level(&guc->log))
- return;
-
- if (!guc->load_err_log)
- guc->load_err_log = i915_gem_object_get(guc->log.vma->obj);
+ struct intel_guc *guc = &uc->guc;
- return;
+ if (guc->log.vma && !uc->load_err_log)
+ uc->load_err_log = i915_gem_object_get(guc->log.vma->obj);
}
-static void guc_free_load_err_log(struct intel_guc *guc)
+static void __uc_free_load_err_log(struct intel_uc *uc)
{
- if (guc->load_err_log)
- i915_gem_object_put(guc->load_err_log);
+ struct drm_i915_gem_object *log = fetch_and_zero(&uc->load_err_log);
+
+ if (log)
+ i915_gem_object_put(log);
}
/*
@@ -233,11 +185,22 @@ static void guc_disable_interrupts(struct intel_guc *guc)
guc->interrupts.disable(guc);
}
+static inline bool guc_communication_enabled(struct intel_guc *guc)
+{
+ return guc->send != intel_guc_send_nop;
+}
+
static int guc_enable_communication(struct intel_guc *guc)
{
struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
int ret;
+ GEM_BUG_ON(guc_communication_enabled(guc));
+
+ ret = i915_inject_load_error(i915, -ENXIO);
+ if (ret)
+ return ret;
+
ret = intel_guc_ct_enable(&guc->ct);
if (ret)
return ret;
@@ -301,95 +264,151 @@ static void guc_disable_communication(struct intel_guc *guc)
void intel_uc_fetch_firmwares(struct intel_uc *uc)
{
struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
+ int err;
- if (!intel_uc_is_using_guc(uc))
+ if (!intel_uc_uses_guc(uc))
return;
- intel_uc_fw_fetch(&uc->guc.fw, i915);
+ err = intel_uc_fw_fetch(&uc->guc.fw, i915);
+ if (err)
+ return;
- if (intel_uc_is_using_huc(uc))
+ if (intel_uc_uses_huc(uc))
intel_uc_fw_fetch(&uc->huc.fw, i915);
}
void intel_uc_cleanup_firmwares(struct intel_uc *uc)
{
- if (!intel_uc_is_using_guc(uc))
+ if (!intel_uc_uses_guc(uc))
return;
- if (intel_uc_is_using_huc(uc))
+ if (intel_uc_uses_huc(uc))
intel_uc_fw_cleanup_fetch(&uc->huc.fw);
intel_uc_fw_cleanup_fetch(&uc->guc.fw);
}
-int intel_uc_init(struct intel_uc *uc)
+void intel_uc_init(struct intel_uc *uc)
{
struct intel_guc *guc = &uc->guc;
struct intel_huc *huc = &uc->huc;
int ret;
- if (!intel_uc_is_using_guc(uc))
- return 0;
-
- if (!intel_uc_fw_supported(&guc->fw))
- return -ENODEV;
+ if (!intel_uc_uses_guc(uc))
+ return;
/* XXX: GuC submission is unavailable for now */
- GEM_BUG_ON(intel_uc_is_using_guc_submission(uc));
+ GEM_BUG_ON(intel_uc_supports_guc_submission(uc));
ret = intel_guc_init(guc);
- if (ret)
- return ret;
-
- if (intel_uc_is_using_huc(uc)) {
- ret = intel_huc_init(huc);
- if (ret)
- goto err_guc;
+ if (ret) {
+ intel_uc_fw_cleanup_fetch(&huc->fw);
+ return;
}
- return 0;
-
-err_guc:
- intel_guc_fini(guc);
- return ret;
+ if (intel_uc_uses_huc(uc))
+ intel_huc_init(huc);
}
void intel_uc_fini(struct intel_uc *uc)
{
struct intel_guc *guc = &uc->guc;
- if (!intel_uc_is_using_guc(uc))
+ if (!intel_uc_uses_guc(uc))
return;
- GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw));
-
- if (intel_uc_is_using_huc(uc))
+ if (intel_uc_uses_huc(uc))
intel_huc_fini(&uc->huc);
intel_guc_fini(guc);
+
+ __uc_free_load_err_log(uc);
}
-static void __uc_sanitize(struct intel_uc *uc)
+static int __uc_sanitize(struct intel_uc *uc)
{
struct intel_guc *guc = &uc->guc;
struct intel_huc *huc = &uc->huc;
- GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw));
+ GEM_BUG_ON(!intel_uc_supports_guc(uc));
intel_huc_sanitize(huc);
intel_guc_sanitize(guc);
- __intel_uc_reset_hw(uc);
+ return __intel_uc_reset_hw(uc);
}
void intel_uc_sanitize(struct intel_uc *uc)
{
- if (!intel_uc_is_using_guc(uc))
+ if (!intel_uc_supports_guc(uc))
return;
__uc_sanitize(uc);
}
+/* Initialize and verify the uC regs related to uC positioning in WOPCM */
+static int uc_init_wopcm(struct intel_uc *uc)
+{
+ struct intel_gt *gt = uc_to_gt(uc);
+ struct intel_uncore *uncore = gt->uncore;
+ u32 base = intel_wopcm_guc_base(&gt->i915->wopcm);
+ u32 size = intel_wopcm_guc_size(&gt->i915->wopcm);
+ u32 huc_agent = intel_uc_uses_huc(uc) ? HUC_LOADING_AGENT_GUC : 0;
+ u32 mask;
+ int err;
+
+ if (unlikely(!base || !size)) {
+ i915_probe_error(gt->i915, "Unsuccessful WOPCM partitioning\n");
+ return -E2BIG;
+ }
+
+ GEM_BUG_ON(!intel_uc_supports_guc(uc));
+ GEM_BUG_ON(!(base & GUC_WOPCM_OFFSET_MASK));
+ GEM_BUG_ON(base & ~GUC_WOPCM_OFFSET_MASK);
+ GEM_BUG_ON(!(size & GUC_WOPCM_SIZE_MASK));
+ GEM_BUG_ON(size & ~GUC_WOPCM_SIZE_MASK);
+
+ err = i915_inject_load_error(gt->i915, -ENXIO);
+ if (err)
+ return err;
+
+ mask = GUC_WOPCM_SIZE_MASK | GUC_WOPCM_SIZE_LOCKED;
+ err = intel_uncore_write_and_verify(uncore, GUC_WOPCM_SIZE, size, mask,
+ size | GUC_WOPCM_SIZE_LOCKED);
+ if (err)
+ goto err_out;
+
+ mask = GUC_WOPCM_OFFSET_MASK | GUC_WOPCM_OFFSET_VALID | huc_agent;
+ err = intel_uncore_write_and_verify(uncore, DMA_GUC_WOPCM_OFFSET,
+ base | huc_agent, mask,
+ base | huc_agent |
+ GUC_WOPCM_OFFSET_VALID);
+ if (err)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ i915_probe_error(gt->i915, "Failed to init uC WOPCM registers!\n");
+ i915_probe_error(gt->i915, "%s(%#x)=%#x\n", "DMA_GUC_WOPCM_OFFSET",
+ i915_mmio_reg_offset(DMA_GUC_WOPCM_OFFSET),
+ intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET));
+ i915_probe_error(gt->i915, "%s(%#x)=%#x\n", "GUC_WOPCM_SIZE",
+ i915_mmio_reg_offset(GUC_WOPCM_SIZE),
+ intel_uncore_read(uncore, GUC_WOPCM_SIZE));
+
+ return err;
+}
+
+static bool uc_is_wopcm_locked(struct intel_uc *uc)
+{
+ struct intel_gt *gt = uc_to_gt(uc);
+ struct intel_uncore *uncore = gt->uncore;
+
+ return (intel_uncore_read(uncore, GUC_WOPCM_SIZE) & GUC_WOPCM_SIZE_LOCKED) ||
+ (intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET) & GUC_WOPCM_OFFSET_VALID);
+}
+
int intel_uc_init_hw(struct intel_uc *uc)
{
struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
@@ -397,10 +416,28 @@ int intel_uc_init_hw(struct intel_uc *uc)
struct intel_huc *huc = &uc->huc;
int ret, attempts;
- if (!intel_uc_is_using_guc(uc))
+ if (!intel_uc_supports_guc(uc))
+ return 0;
+
+ /*
+ * We can silently continue without GuC only if it was never enabled
+ * before on this system after reboot, otherwise we risk GPU hangs.
+ * To check if GuC was loaded before we look at WOPCM registers.
+ */
+ if (!intel_uc_uses_guc(uc) && !uc_is_wopcm_locked(uc))
return 0;
- GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw));
+ if (!intel_uc_fw_is_available(&guc->fw)) {
+ ret = uc_is_wopcm_locked(uc) ||
+ intel_uc_fw_is_overridden(&guc->fw) ||
+ intel_uc_supports_guc_submission(uc) ?
+ intel_uc_fw_status_to_error(guc->fw.status) : 0;
+ goto err_out;
+ }
+
+ ret = uc_init_wopcm(uc);
+ if (ret)
+ goto err_out;
guc_reset_interrupts(guc);
@@ -416,16 +453,11 @@ int intel_uc_init_hw(struct intel_uc *uc)
* Always reset the GuC just before (re)loading, so
* that the state and timing are fairly predictable
*/
- ret = __intel_uc_reset_hw(uc);
+ ret = __uc_sanitize(uc);
if (ret)
goto err_out;
- if (intel_uc_is_using_huc(uc)) {
- ret = intel_huc_fw_upload(huc);
- if (ret && intel_uc_fw_is_overridden(&huc->fw))
- goto err_out;
- }
-
+ intel_huc_fw_upload(huc);
intel_guc_ads_reset(guc);
intel_guc_write_params(guc);
ret = intel_guc_fw_upload(guc);
@@ -444,28 +476,32 @@ int intel_uc_init_hw(struct intel_uc *uc)
if (ret)
goto err_log_capture;
- if (intel_uc_fw_is_loaded(&huc->fw)) {
- ret = intel_huc_auth(huc);
- if (ret && intel_uc_fw_is_overridden(&huc->fw))
- goto err_communication;
- }
+ intel_huc_auth(huc);
ret = intel_guc_sample_forcewake(guc);
if (ret)
goto err_communication;
- if (intel_uc_is_using_guc_submission(uc)) {
+ if (intel_uc_supports_guc_submission(uc)) {
ret = intel_guc_submission_enable(guc);
if (ret)
goto err_communication;
}
- dev_info(i915->drm.dev, "GuC firmware version %u.%u\n",
- guc->fw.major_ver_found, guc->fw.minor_ver_found);
- dev_info(i915->drm.dev, "GuC submission %s\n",
- enableddisabled(intel_uc_is_using_guc_submission(uc)));
- dev_info(i915->drm.dev, "HuC %s\n",
- enableddisabled(intel_huc_is_authenticated(huc)));
+ dev_info(i915->drm.dev, "%s firmware %s version %u.%u %s:%s\n",
+ intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_GUC), guc->fw.path,
+ guc->fw.major_ver_found, guc->fw.minor_ver_found,
+ "submission",
+ enableddisabled(intel_uc_supports_guc_submission(uc)));
+
+ if (intel_uc_uses_huc(uc)) {
+ dev_info(i915->drm.dev, "%s firmware %s version %u.%u %s:%s\n",
+ intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_HUC),
+ huc->fw.path,
+ huc->fw.major_ver_found, huc->fw.minor_ver_found,
+ "authenticated",
+ yesno(intel_huc_is_authenticated(huc)));
+ }
return 0;
@@ -475,19 +511,20 @@ int intel_uc_init_hw(struct intel_uc *uc)
err_communication:
guc_disable_communication(guc);
err_log_capture:
- guc_capture_load_err_log(guc);
+ __uc_capture_load_err_log(uc);
err_out:
__uc_sanitize(uc);
- /*
- * Note that there is no fallback as either user explicitly asked for
- * the GuC or driver default option was to run with the GuC enabled.
- */
- if (GEM_WARN_ON(ret == -EIO))
- ret = -EINVAL;
+ if (!ret) {
+ dev_notice(i915->drm.dev, "GuC is uninitialized\n");
+ /* We want to run without GuC submission */
+ return 0;
+ }
- dev_err(i915->drm.dev, "GuC initialization failed %d\n", ret);
- return ret;
+ i915_probe_error(i915, "GuC initialization failed %d\n", ret);
+
+ /* We want to keep KMS alive */
+ return -EIO;
}
void intel_uc_fini_hw(struct intel_uc *uc)
@@ -497,9 +534,7 @@ void intel_uc_fini_hw(struct intel_uc *uc)
if (!intel_guc_is_running(guc))
return;
- GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw));
-
- if (intel_uc_is_using_guc_submission(uc))
+ if (intel_uc_supports_guc_submission(uc))
intel_guc_submission_disable(guc);
guc_disable_communication(guc);
@@ -550,7 +585,7 @@ void intel_uc_suspend(struct intel_uc *uc)
intel_uc_runtime_suspend(uc);
}
-int intel_uc_resume(struct intel_uc *uc)
+static int __uc_resume(struct intel_uc *uc, bool enable_communication)
{
struct intel_guc *guc = &uc->guc;
int err;
@@ -558,7 +593,11 @@ int intel_uc_resume(struct intel_uc *uc)
if (!intel_guc_is_running(guc))
return 0;
- guc_enable_communication(guc);
+ /* Make sure we enable communication if and only if it's disabled */
+ GEM_BUG_ON(enable_communication == guc_communication_enabled(guc));
+
+ if (enable_communication)
+ guc_enable_communication(guc);
err = intel_guc_resume(guc);
if (err) {
@@ -568,3 +607,21 @@ int intel_uc_resume(struct intel_uc *uc)
return 0;
}
+
+int intel_uc_resume(struct intel_uc *uc)
+{
+ /*
+ * When coming out of S3/S4 we sanitize and re-init the HW, so
+ * communication is already re-enabled at this point.
+ */
+ return __uc_resume(uc, false);
+}
+
+int intel_uc_runtime_resume(struct intel_uc *uc)
+{
+ /*
+ * During runtime resume we don't sanitize, so we need to re-init
+ * communication as well.
+ */
+ return __uc_resume(uc, true);
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
index fe3362fd7706..527995c21196 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
@@ -1,26 +1,8 @@
+/* SPDX-License-Identifier: MIT */
/*
- * Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
+ * Copyright © 2014-2019 Intel Corporation
*/
+
#ifndef _INTEL_UC_H_
#define _INTEL_UC_H_
@@ -31,39 +13,55 @@
struct intel_uc {
struct intel_guc guc;
struct intel_huc huc;
+
+ /* Snapshot of GuC log from last failed load */
+ struct drm_i915_gem_object *load_err_log;
};
void intel_uc_init_early(struct intel_uc *uc);
-void intel_uc_cleanup_early(struct intel_uc *uc);
+void intel_uc_driver_late_release(struct intel_uc *uc);
void intel_uc_init_mmio(struct intel_uc *uc);
void intel_uc_fetch_firmwares(struct intel_uc *uc);
void intel_uc_cleanup_firmwares(struct intel_uc *uc);
void intel_uc_sanitize(struct intel_uc *uc);
+void intel_uc_init(struct intel_uc *uc);
int intel_uc_init_hw(struct intel_uc *uc);
void intel_uc_fini_hw(struct intel_uc *uc);
-int intel_uc_init(struct intel_uc *uc);
void intel_uc_fini(struct intel_uc *uc);
void intel_uc_reset_prepare(struct intel_uc *uc);
void intel_uc_suspend(struct intel_uc *uc);
void intel_uc_runtime_suspend(struct intel_uc *uc);
int intel_uc_resume(struct intel_uc *uc);
+int intel_uc_runtime_resume(struct intel_uc *uc);
+
+static inline bool intel_uc_supports_guc(struct intel_uc *uc)
+{
+ return intel_guc_is_supported(&uc->guc);
+}
+
+static inline bool intel_uc_uses_guc(struct intel_uc *uc)
+{
+ return intel_guc_is_enabled(&uc->guc);
+}
+
+static inline bool intel_uc_supports_guc_submission(struct intel_uc *uc)
+{
+ return intel_guc_is_submission_supported(&uc->guc);
+}
-static inline bool intel_uc_is_using_guc(struct intel_uc *uc)
+static inline bool intel_uc_uses_guc_submission(struct intel_uc *uc)
{
- GEM_BUG_ON(i915_modparams.enable_guc < 0);
- return i915_modparams.enable_guc > 0;
+ return intel_guc_is_submission_supported(&uc->guc);
}
-static inline bool intel_uc_is_using_guc_submission(struct intel_uc *uc)
+static inline bool intel_uc_supports_huc(struct intel_uc *uc)
{
- GEM_BUG_ON(i915_modparams.enable_guc < 0);
- return i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION;
+ return intel_uc_supports_guc(uc);
}
-static inline bool intel_uc_is_using_huc(struct intel_uc *uc)
+static inline bool intel_uc_uses_huc(struct intel_uc *uc)
{
- GEM_BUG_ON(i915_modparams.enable_guc < 0);
- return i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC;
+ return intel_huc_is_enabled(&uc->huc);
}
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index ac91e3efd02b..bd22bf11adad 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -1,25 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * Copyright © 2016-2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
+ * Copyright © 2016-2019 Intel Corporation
*/
#include <linux/bitfield.h>
@@ -30,6 +11,29 @@
#include "intel_uc_fw_abi.h"
#include "i915_drv.h"
+#ifdef CONFIG_DRM_I915_DEBUG_GUC
+static inline struct intel_gt *__uc_fw_to_gt(struct intel_uc_fw *uc_fw)
+{
+ GEM_BUG_ON(uc_fw->status == INTEL_UC_FIRMWARE_UNINITIALIZED);
+ if (uc_fw->type == INTEL_UC_FW_TYPE_GUC)
+ return container_of(uc_fw, struct intel_gt, uc.guc.fw);
+
+ GEM_BUG_ON(uc_fw->type != INTEL_UC_FW_TYPE_HUC);
+ return container_of(uc_fw, struct intel_gt, uc.huc.fw);
+}
+
+void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
+ enum intel_uc_fw_status status)
+{
+ uc_fw->__status = status;
+ DRM_DEV_DEBUG_DRIVER(__uc_fw_to_gt(uc_fw)->i915->drm.dev,
+ "%s firmware -> %s\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ status == INTEL_UC_FIRMWARE_SELECTED ?
+ uc_fw->path : intel_uc_fw_status_repr(status));
+}
+#endif
+
/*
* List of required GuC and HuC binaries per-platform.
* Must be ordered based on platform + revid, from newer to older.
@@ -132,36 +136,60 @@ __uc_fw_auto_select(struct intel_uc_fw *uc_fw, enum intel_platform p, u8 rev)
uc_fw->path = NULL;
}
}
+
+ /* We don't want to enable GuC/HuC on pre-Gen11 by default */
+ if (i915_modparams.enable_guc == -1 && p < INTEL_ICELAKE)
+ uc_fw->path = NULL;
+}
+
+static const char *__override_guc_firmware_path(void)
+{
+ if (i915_modparams.enable_guc & (ENABLE_GUC_SUBMISSION |
+ ENABLE_GUC_LOAD_HUC))
+ return i915_modparams.guc_firmware_path;
+ return "";
}
-static bool
-__uc_fw_override(struct intel_uc_fw *uc_fw)
+static const char *__override_huc_firmware_path(void)
{
+ if (i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC)
+ return i915_modparams.huc_firmware_path;
+ return "";
+}
+
+static void __uc_fw_user_override(struct intel_uc_fw *uc_fw)
+{
+ const char *path = NULL;
+
switch (uc_fw->type) {
case INTEL_UC_FW_TYPE_GUC:
- uc_fw->path = i915_modparams.guc_firmware_path;
+ path = __override_guc_firmware_path();
break;
case INTEL_UC_FW_TYPE_HUC:
- uc_fw->path = i915_modparams.huc_firmware_path;
+ path = __override_huc_firmware_path();
break;
}
- uc_fw->user_overridden = uc_fw->path;
- return uc_fw->user_overridden;
+ if (unlikely(path)) {
+ uc_fw->path = path;
+ uc_fw->user_overridden = true;
+ }
}
/**
* intel_uc_fw_init_early - initialize the uC object and select the firmware
- * @i915: device private
* @uc_fw: uC firmware
* @type: type of uC
+ * @supported: is uC support possible
+ * @platform: platform identifier
+ * @rev: hardware revision
*
* Initialize the state of our uC object and relevant tracking and select the
* firmware to fetch and load.
*/
void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
- enum intel_uc_fw_type type,
- struct drm_i915_private *i915)
+ enum intel_uc_fw_type type, bool supported,
+ enum intel_platform platform, u8 rev)
{
/*
* we use FIRMWARE_UNINITIALIZED to detect checks against uc_fw->status
@@ -173,45 +201,89 @@ void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
uc_fw->type = type;
- if (HAS_GT_UC(i915) && likely(!__uc_fw_override(uc_fw)))
- __uc_fw_auto_select(uc_fw, INTEL_INFO(i915)->platform,
- INTEL_REVID(i915));
+ if (supported) {
+ __uc_fw_auto_select(uc_fw, platform, rev);
+ __uc_fw_user_override(uc_fw);
+ }
- if (uc_fw->path && *uc_fw->path)
- uc_fw->status = INTEL_UC_FIRMWARE_SELECTED;
- else
- uc_fw->status = INTEL_UC_FIRMWARE_NOT_SUPPORTED;
+ intel_uc_fw_change_status(uc_fw, uc_fw->path ? *uc_fw->path ?
+ INTEL_UC_FIRMWARE_SELECTED :
+ INTEL_UC_FIRMWARE_DISABLED :
+ INTEL_UC_FIRMWARE_NOT_SUPPORTED);
+}
+
+static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw,
+ struct drm_i915_private *i915,
+ int e)
+{
+ bool user = e == -EINVAL;
+
+ if (i915_inject_load_error(i915, e)) {
+ /* non-existing blob */
+ uc_fw->path = "<invalid>";
+ uc_fw->user_overridden = user;
+ } else if (i915_inject_load_error(i915, e)) {
+ /* require next major version */
+ uc_fw->major_ver_wanted += 1;
+ uc_fw->minor_ver_wanted = 0;
+ uc_fw->user_overridden = user;
+ } else if (i915_inject_load_error(i915, e)) {
+ /* require next minor version */
+ uc_fw->minor_ver_wanted += 1;
+ uc_fw->user_overridden = user;
+ } else if (uc_fw->major_ver_wanted && i915_inject_load_error(i915, e)) {
+ /* require prev major version */
+ uc_fw->major_ver_wanted -= 1;
+ uc_fw->minor_ver_wanted = 0;
+ uc_fw->user_overridden = user;
+ } else if (uc_fw->minor_ver_wanted && i915_inject_load_error(i915, e)) {
+ /* require prev minor version - hey, this should work! */
+ uc_fw->minor_ver_wanted -= 1;
+ uc_fw->user_overridden = user;
+ } else if (user && i915_inject_load_error(i915, e)) {
+ /* officially unsupported platform */
+ uc_fw->major_ver_wanted = 0;
+ uc_fw->minor_ver_wanted = 0;
+ uc_fw->user_overridden = true;
+ }
}
/**
* intel_uc_fw_fetch - fetch uC firmware
- *
* @uc_fw: uC firmware
* @i915: device private
*
* Fetch uC firmware into GEM obj.
+ *
+ * Return: 0 on success, a negative errno code on failure.
*/
-void intel_uc_fw_fetch(struct intel_uc_fw *uc_fw, struct drm_i915_private *i915)
+int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw, struct drm_i915_private *i915)
{
+ struct device *dev = i915->drm.dev;
struct drm_i915_gem_object *obj;
const struct firmware *fw = NULL;
struct uc_css_header *css;
size_t size;
int err;
- GEM_BUG_ON(!intel_uc_fw_supported(uc_fw));
+ GEM_BUG_ON(!i915->wopcm.size);
+ GEM_BUG_ON(!intel_uc_fw_is_enabled(uc_fw));
- err = request_firmware(&fw, uc_fw->path, i915->drm.dev);
+ err = i915_inject_load_error(i915, -ENXIO);
if (err)
- goto fail;
+ return err;
+
+ __force_fw_fetch_failures(uc_fw, i915, -EINVAL);
+ __force_fw_fetch_failures(uc_fw, i915, -ESTALE);
- DRM_DEBUG_DRIVER("%s fw size %zu ptr %p\n",
- intel_uc_fw_type_repr(uc_fw->type), fw->size, fw);
+ err = request_firmware(&fw, uc_fw->path, dev);
+ if (err)
+ goto fail;
/* Check the size of the blob before examining buffer contents */
- if (fw->size < sizeof(struct uc_css_header)) {
- DRM_WARN("%s: Unexpected firmware size (%zu, min %zu)\n",
- intel_uc_fw_type_repr(uc_fw->type),
+ if (unlikely(fw->size < sizeof(struct uc_css_header))) {
+ dev_warn(dev, "%s firmware %s: invalid size: %zu < %zu\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
fw->size, sizeof(struct uc_css_header));
err = -ENODATA;
goto fail;
@@ -222,10 +294,12 @@ void intel_uc_fw_fetch(struct intel_uc_fw *uc_fw, struct drm_i915_private *i915)
/* Check integrity of size values inside CSS header */
size = (css->header_size_dw - css->key_size_dw - css->modulus_size_dw -
css->exponent_size_dw) * sizeof(u32);
- if (size != sizeof(struct uc_css_header)) {
- DRM_WARN("%s: Mismatched firmware header definition\n",
- intel_uc_fw_type_repr(uc_fw->type));
- err = -ENOEXEC;
+ if (unlikely(size != sizeof(struct uc_css_header))) {
+ dev_warn(dev,
+ "%s firmware %s: unexpected header size: %zu != %zu\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
+ fw->size, sizeof(struct uc_css_header));
+ err = -EPROTO;
goto fail;
}
@@ -233,23 +307,35 @@ void intel_uc_fw_fetch(struct intel_uc_fw *uc_fw, struct drm_i915_private *i915)
uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
/* now RSA */
- if (css->key_size_dw != UOS_RSA_SCRATCH_COUNT) {
- DRM_WARN("%s: Mismatched firmware RSA key size (%u)\n",
- intel_uc_fw_type_repr(uc_fw->type), css->key_size_dw);
- err = -ENOEXEC;
+ if (unlikely(css->key_size_dw != UOS_RSA_SCRATCH_COUNT)) {
+ dev_warn(dev, "%s firmware %s: unexpected key size: %u != %u\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
+ css->key_size_dw, UOS_RSA_SCRATCH_COUNT);
+ err = -EPROTO;
goto fail;
}
uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
/* At least, it should have header, uCode and RSA. Size of all three. */
size = sizeof(struct uc_css_header) + uc_fw->ucode_size + uc_fw->rsa_size;
- if (fw->size < size) {
- DRM_WARN("%s: Truncated firmware (%zu, expected %zu)\n",
- intel_uc_fw_type_repr(uc_fw->type), fw->size, size);
+ if (unlikely(fw->size < size)) {
+ dev_warn(dev, "%s firmware %s: invalid size: %zu < %zu\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
+ fw->size, size);
err = -ENOEXEC;
goto fail;
}
+ /* Sanity check whether this fw is not larger than whole WOPCM memory */
+ size = __intel_uc_fw_get_upload_size(uc_fw);
+ if (unlikely(size >= i915->wopcm.size)) {
+ dev_warn(dev, "%s firmware %s: invalid size: %zu > %zu\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
+ size, (size_t)i915->wopcm.size);
+ err = -E2BIG;
+ goto fail;
+ }
+
/* Get version numbers from the CSS header */
switch (uc_fw->type) {
case INTEL_UC_FW_TYPE_GUC:
@@ -271,48 +357,43 @@ void intel_uc_fw_fetch(struct intel_uc_fw *uc_fw, struct drm_i915_private *i915)
break;
}
- DRM_DEBUG_DRIVER("%s fw version %u.%u (wanted %u.%u)\n",
- intel_uc_fw_type_repr(uc_fw->type),
- uc_fw->major_ver_found, uc_fw->minor_ver_found,
- uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
-
- if (uc_fw->major_ver_wanted == 0 && uc_fw->minor_ver_wanted == 0) {
- DRM_NOTE("%s: Skipping firmware version check\n",
- intel_uc_fw_type_repr(uc_fw->type));
- } else if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
- uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
- DRM_NOTE("%s: Wrong firmware version (%u.%u, required %u.%u)\n",
- intel_uc_fw_type_repr(uc_fw->type),
- uc_fw->major_ver_found, uc_fw->minor_ver_found,
- uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
- err = -ENOEXEC;
- goto fail;
+ if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
+ uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
+ dev_notice(dev, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
+ uc_fw->major_ver_found, uc_fw->minor_ver_found,
+ uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
+ if (!intel_uc_fw_is_overridden(uc_fw)) {
+ err = -ENOEXEC;
+ goto fail;
+ }
}
obj = i915_gem_object_create_shmem_from_data(i915, fw->data, fw->size);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
- DRM_DEBUG_DRIVER("%s fw object_create err=%d\n",
- intel_uc_fw_type_repr(uc_fw->type), err);
goto fail;
}
uc_fw->obj = obj;
uc_fw->size = fw->size;
- uc_fw->status = INTEL_UC_FIRMWARE_AVAILABLE;
+ intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
release_firmware(fw);
- return;
+ return 0;
fail:
- uc_fw->status = INTEL_UC_FIRMWARE_MISSING;
+ intel_uc_fw_change_status(uc_fw, err == -ENOENT ?
+ INTEL_UC_FIRMWARE_MISSING :
+ INTEL_UC_FIRMWARE_ERROR);
- DRM_WARN("%s: Failed to fetch firmware %s (error %d)\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
- DRM_INFO("%s: Firmware can be downloaded from %s\n",
+ dev_notice(dev, "%s firmware %s: fetch failed with error %d\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
+ dev_info(dev, "%s firmware(s) can be downloaded from %s\n",
intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
release_firmware(fw); /* OK even if fw is NULL */
+ return err;
}
static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw, struct i915_ggtt *ggtt)
@@ -364,6 +445,10 @@ static int uc_fw_xfer(struct intel_uc_fw *uc_fw, struct intel_gt *gt,
u64 offset;
int ret;
+ ret = i915_inject_load_error(gt->i915, -ETIMEDOUT);
+ if (ret)
+ return ret;
+
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
/* Set the source address for the uCode */
@@ -418,14 +503,16 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, struct intel_gt *gt,
{
int err;
- DRM_DEBUG_DRIVER("%s fw load %s\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
-
/* make sure the status was cleared the last time we reset the uc */
GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw));
+ err = i915_inject_load_error(gt->i915, -ENOEXEC);
+ if (err)
+ return err;
+
if (!intel_uc_fw_is_available(uc_fw))
return -ENOEXEC;
+
/* Call custom loader */
intel_uc_fw_ggtt_bind(uc_fw, gt);
err = uc_fw_xfer(uc_fw, gt, wopcm_offset, dma_flags);
@@ -433,25 +520,14 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, struct intel_gt *gt,
if (err)
goto fail;
- uc_fw->status = INTEL_UC_FIRMWARE_TRANSFERRED;
- DRM_DEBUG_DRIVER("%s fw xfer completed\n",
- intel_uc_fw_type_repr(uc_fw->type));
-
- DRM_INFO("%s: Loaded firmware %s (version %u.%u)\n",
- intel_uc_fw_type_repr(uc_fw->type),
- uc_fw->path,
- uc_fw->major_ver_found, uc_fw->minor_ver_found);
-
+ intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_TRANSFERRED);
return 0;
fail:
- uc_fw->status = INTEL_UC_FIRMWARE_FAIL;
- DRM_DEBUG_DRIVER("%s fw load failed\n",
- intel_uc_fw_type_repr(uc_fw->type));
-
- DRM_WARN("%s: Failed to load firmware %s (error %d)\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
-
+ i915_probe_error(gt->i915, "Failed to load %s firmware %s (%d)\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
+ err);
+ intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_FAIL);
return err;
}
@@ -466,9 +542,11 @@ int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
return -ENOEXEC;
err = i915_gem_object_pin_pages(uc_fw->obj);
- if (err)
+ if (err) {
DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n",
intel_uc_fw_type_repr(uc_fw->type), err);
+ intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_FAIL);
+ }
return err;
}
@@ -483,20 +561,18 @@ void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
/**
* intel_uc_fw_cleanup_fetch - cleanup uC firmware
- *
* @uc_fw: uC firmware
*
* Cleans up uC firmware by releasing the firmware GEM obj.
*/
void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw)
{
- struct drm_i915_gem_object *obj;
+ if (!intel_uc_fw_is_available(uc_fw))
+ return;
- obj = fetch_and_zero(&uc_fw->obj);
- if (obj)
- i915_gem_object_put(obj);
+ i915_gem_object_put(fetch_and_zero(&uc_fw->obj));
- uc_fw->status = INTEL_UC_FIRMWARE_SELECTED;
+ intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_SELECTED);
}
/**
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
index 6b64b8073703..7a0a5989afc9 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
@@ -1,25 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
- * Copyright © 2014-2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
+ * Copyright © 2014-2019 Intel Corporation
*/
#ifndef _INTEL_UC_FW_H_
@@ -27,6 +8,7 @@
#include <linux/types.h>
#include "intel_uc_fw_abi.h"
+#include "intel_device_info.h"
#include "i915_gem.h"
struct drm_printer;
@@ -36,13 +18,35 @@ struct intel_gt;
/* Home of GuC, HuC and DMC firmwares */
#define INTEL_UC_FIRMWARE_URL "https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/tree/i915"
+/*
+ * +------------+---------------------------------------------------+
+ * | PHASE | FIRMWARE STATUS TRANSITIONS |
+ * +============+===================================================+
+ * | | UNINITIALIZED |
+ * +------------+- / | \ -+
+ * | | DISABLED <--/ | \--> NOT_SUPPORTED |
+ * | init_early | V |
+ * | | SELECTED |
+ * +------------+- / | \ -+
+ * | | MISSING <--/ | \--> ERROR |
+ * | fetch | | |
+ * | | /------> AVAILABLE <---<-----------\ |
+ * +------------+- \ / \ \ \ -+
+ * | | FAIL <--< \--> TRANSFERRED \ |
+ * | upload | \ / \ / |
+ * | | \---------/ \--> RUNNING |
+ * +------------+---------------------------------------------------+
+ */
+
enum intel_uc_fw_status {
- INTEL_UC_FIRMWARE_FAIL = -3, /* failed to xfer or init/auth the fw */
- INTEL_UC_FIRMWARE_MISSING = -2, /* blob not found on the system */
INTEL_UC_FIRMWARE_NOT_SUPPORTED = -1, /* no uc HW */
INTEL_UC_FIRMWARE_UNINITIALIZED = 0, /* used to catch checks done too early */
+ INTEL_UC_FIRMWARE_DISABLED, /* disabled */
INTEL_UC_FIRMWARE_SELECTED, /* selected the blob we want to load */
+ INTEL_UC_FIRMWARE_MISSING, /* blob not found on the system */
+ INTEL_UC_FIRMWARE_ERROR, /* invalid format or version */
INTEL_UC_FIRMWARE_AVAILABLE, /* blob found and copied in mem */
+ INTEL_UC_FIRMWARE_FAIL, /* failed to xfer or init/auth the fw */
INTEL_UC_FIRMWARE_TRANSFERRED, /* dma xfer done */
INTEL_UC_FIRMWARE_RUNNING /* init/auth done */
};
@@ -59,7 +63,10 @@ enum intel_uc_fw_type {
*/
struct intel_uc_fw {
enum intel_uc_fw_type type;
- enum intel_uc_fw_status status;
+ union {
+ const enum intel_uc_fw_status status;
+ enum intel_uc_fw_status __status; /* no accidental overwrites */
+ };
const char *path;
bool user_overridden;
size_t size;
@@ -79,22 +86,37 @@ struct intel_uc_fw {
u32 ucode_size;
};
+#ifdef CONFIG_DRM_I915_DEBUG_GUC
+void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
+ enum intel_uc_fw_status status);
+#else
+static inline void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
+ enum intel_uc_fw_status status)
+{
+ uc_fw->__status = status;
+}
+#endif
+
static inline
const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status)
{
switch (status) {
- case INTEL_UC_FIRMWARE_FAIL:
- return "FAIL";
- case INTEL_UC_FIRMWARE_MISSING:
- return "MISSING";
case INTEL_UC_FIRMWARE_NOT_SUPPORTED:
return "N/A";
case INTEL_UC_FIRMWARE_UNINITIALIZED:
return "UNINITIALIZED";
+ case INTEL_UC_FIRMWARE_DISABLED:
+ return "DISABLED";
case INTEL_UC_FIRMWARE_SELECTED:
return "SELECTED";
+ case INTEL_UC_FIRMWARE_MISSING:
+ return "MISSING";
+ case INTEL_UC_FIRMWARE_ERROR:
+ return "ERROR";
case INTEL_UC_FIRMWARE_AVAILABLE:
return "AVAILABLE";
+ case INTEL_UC_FIRMWARE_FAIL:
+ return "FAIL";
case INTEL_UC_FIRMWARE_TRANSFERRED:
return "TRANSFERRED";
case INTEL_UC_FIRMWARE_RUNNING:
@@ -103,6 +125,31 @@ const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status)
return "<invalid>";
}
+static inline int intel_uc_fw_status_to_error(enum intel_uc_fw_status status)
+{
+ switch (status) {
+ case INTEL_UC_FIRMWARE_NOT_SUPPORTED:
+ return -ENODEV;
+ case INTEL_UC_FIRMWARE_UNINITIALIZED:
+ return -EACCES;
+ case INTEL_UC_FIRMWARE_DISABLED:
+ return -EPERM;
+ case INTEL_UC_FIRMWARE_MISSING:
+ return -ENOENT;
+ case INTEL_UC_FIRMWARE_ERROR:
+ return -ENOEXEC;
+ case INTEL_UC_FIRMWARE_FAIL:
+ return -EIO;
+ case INTEL_UC_FIRMWARE_SELECTED:
+ return -ESTALE;
+ case INTEL_UC_FIRMWARE_AVAILABLE:
+ case INTEL_UC_FIRMWARE_TRANSFERRED:
+ case INTEL_UC_FIRMWARE_RUNNING:
+ return 0;
+ }
+ return -EINVAL;
+}
+
static inline const char *intel_uc_fw_type_repr(enum intel_uc_fw_type type)
{
switch (type) {
@@ -122,6 +169,16 @@ __intel_uc_fw_status(struct intel_uc_fw *uc_fw)
return uc_fw->status;
}
+static inline bool intel_uc_fw_is_supported(struct intel_uc_fw *uc_fw)
+{
+ return __intel_uc_fw_status(uc_fw) != INTEL_UC_FIRMWARE_NOT_SUPPORTED;
+}
+
+static inline bool intel_uc_fw_is_enabled(struct intel_uc_fw *uc_fw)
+{
+ return __intel_uc_fw_status(uc_fw) > INTEL_UC_FIRMWARE_DISABLED;
+}
+
static inline bool intel_uc_fw_is_available(struct intel_uc_fw *uc_fw)
{
return __intel_uc_fw_status(uc_fw) >= INTEL_UC_FIRMWARE_AVAILABLE;
@@ -137,11 +194,6 @@ static inline bool intel_uc_fw_is_running(struct intel_uc_fw *uc_fw)
return __intel_uc_fw_status(uc_fw) == INTEL_UC_FIRMWARE_RUNNING;
}
-static inline bool intel_uc_fw_supported(struct intel_uc_fw *uc_fw)
-{
- return __intel_uc_fw_status(uc_fw) != INTEL_UC_FIRMWARE_NOT_SUPPORTED;
-}
-
static inline bool intel_uc_fw_is_overridden(const struct intel_uc_fw *uc_fw)
{
return uc_fw->user_overridden;
@@ -150,7 +202,12 @@ static inline bool intel_uc_fw_is_overridden(const struct intel_uc_fw *uc_fw)
static inline void intel_uc_fw_sanitize(struct intel_uc_fw *uc_fw)
{
if (intel_uc_fw_is_loaded(uc_fw))
- uc_fw->status = INTEL_UC_FIRMWARE_AVAILABLE;
+ intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
+}
+
+static inline u32 __intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw)
+{
+ return sizeof(struct uc_css_header) + uc_fw->ucode_size;
}
/**
@@ -166,14 +223,13 @@ static inline u32 intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw)
if (!intel_uc_fw_is_available(uc_fw))
return 0;
- return sizeof(struct uc_css_header) + uc_fw->ucode_size;
+ return __intel_uc_fw_get_upload_size(uc_fw);
}
void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
- enum intel_uc_fw_type type,
- struct drm_i915_private *i915);
-void intel_uc_fw_fetch(struct intel_uc_fw *uc_fw,
- struct drm_i915_private *i915);
+ enum intel_uc_fw_type type, bool supported,
+ enum intel_platform platform, u8 rev);
+int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw, struct drm_i915_private *i915);
void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw);
int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, struct intel_gt *gt,
u32 wopcm_offset, u32 dma_flags);
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
index 371f7a60c987..bba0eafe1cdb 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
@@ -1,25 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
*/
#include "i915_selftest.h"
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index c3d19d88da40..5ff2437b2998 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -172,14 +172,14 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu)
intel_runtime_pm_get(&dev_priv->runtime_pm);
- mutex_lock(&dev_priv->drm.struct_mutex);
+ mutex_lock(&dev_priv->ggtt.vm.mutex);
_clear_vgpu_fence(vgpu);
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
reg = vgpu->fence.regs[i];
i915_unreserve_fence(reg);
vgpu->fence.regs[i] = NULL;
}
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_unlock(&dev_priv->ggtt.vm.mutex);
intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
}
@@ -195,7 +195,7 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
intel_runtime_pm_get(rpm);
/* Request fences from host */
- mutex_lock(&dev_priv->drm.struct_mutex);
+ mutex_lock(&dev_priv->ggtt.vm.mutex);
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
reg = i915_reserve_fence(dev_priv);
@@ -207,7 +207,7 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
_clear_vgpu_fence(vgpu);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_unlock(&dev_priv->ggtt.vm.mutex);
intel_runtime_pm_put_unchecked(rpm);
return 0;
out_free_fence:
@@ -220,7 +220,7 @@ out_free_fence:
i915_unreserve_fence(reg);
vgpu->fence.regs[i] = NULL;
}
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_unlock(&dev_priv->ggtt.vm.mutex);
intel_runtime_pm_put_unchecked(rpm);
return -ENOSPC;
}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index b09dc315e2da..e753b1e706e2 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -374,21 +374,37 @@ typedef int (*parser_cmd_handler)(struct parser_exec_state *s);
#define ADDR_FIX_4(x1, x2, x3, x4) (ADDR_FIX_1(x1) | ADDR_FIX_3(x2, x3, x4))
#define ADDR_FIX_5(x1, x2, x3, x4, x5) (ADDR_FIX_1(x1) | ADDR_FIX_4(x2, x3, x4, x5))
+#define DWORD_FIELD(dword, end, start) \
+ FIELD_GET(GENMASK(end, start), cmd_val(s, dword))
+
+#define OP_LENGTH_BIAS 2
+#define CMD_LEN(value) (value + OP_LENGTH_BIAS)
+
+static int gvt_check_valid_cmd_length(int len, int valid_len)
+{
+ if (valid_len != len) {
+ gvt_err("len is not valid: len=%u valid_len=%u\n",
+ len, valid_len);
+ return -EFAULT;
+ }
+ return 0;
+}
+
struct cmd_info {
const char *name;
u32 opcode;
-#define F_LEN_MASK (1U<<0)
+#define F_LEN_MASK 3U
#define F_LEN_CONST 1U
#define F_LEN_VAR 0U
+/* value is const although LEN maybe variable */
+#define F_LEN_VAR_FIXED (1<<1)
/*
* command has its own ip advance logic
* e.g. MI_BATCH_START, MI_BATCH_END
*/
-#define F_IP_ADVANCE_CUSTOM (1<<1)
-
-#define F_POST_HANDLE (1<<2)
+#define F_IP_ADVANCE_CUSTOM (1<<2)
u32 flag;
#define R_RCS BIT(RCS0)
@@ -418,9 +434,12 @@ struct cmd_info {
* flag == F_LEN_VAR : length bias bits
* Note: length is in DWord
*/
- u8 len;
+ u32 len;
parser_cmd_handler handler;
+
+ /* valid length in DWord */
+ u32 valid_len;
};
struct cmd_entry {
@@ -944,6 +963,18 @@ static int cmd_handler_lri(struct parser_exec_state *s)
int i, ret = 0;
int cmd_len = cmd_length(s);
struct intel_gvt *gvt = s->vgpu->gvt;
+ u32 valid_len = CMD_LEN(1);
+
+ /*
+ * Official intel docs are somewhat sloppy , check the definition of
+ * MI_LOAD_REGISTER_IMM.
+ */
+ #define MAX_VALID_LEN 127
+ if ((cmd_len < valid_len) || (cmd_len > MAX_VALID_LEN)) {
+ gvt_err("len is not valid: len=%u valid_len=%u\n",
+ cmd_len, valid_len);
+ return -EFAULT;
+ }
for (i = 1; i < cmd_len; i += 2) {
if (IS_BROADWELL(gvt->dev_priv) && s->ring_id != RCS0) {
@@ -1375,6 +1406,15 @@ static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
int ret;
int i;
int len = cmd_length(s);
+ u32 valid_len = CMD_LEN(1);
+
+ /* Flip Type == Stereo 3D Flip */
+ if (DWORD_FIELD(2, 1, 0) == 2)
+ valid_len++;
+ ret = gvt_check_valid_cmd_length(cmd_length(s),
+ valid_len);
+ if (ret)
+ return ret;
ret = decode_mi_display_flip(s, &info);
if (ret) {
@@ -1494,12 +1534,21 @@ static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s)
int op_size = (cmd_length(s) - 3) * sizeof(u32);
int core_id = (cmd_val(s, 2) & (1 << 0)) ? 1 : 0;
unsigned long gma, gma_low, gma_high;
+ u32 valid_len = CMD_LEN(2);
int ret = 0;
/* check ppggt */
if (!(cmd_val(s, 0) & (1 << 22)))
return 0;
+ /* check if QWORD */
+ if (DWORD_FIELD(0, 21, 21))
+ valid_len++;
+ ret = gvt_check_valid_cmd_length(cmd_length(s),
+ valid_len);
+ if (ret)
+ return ret;
+
gma = cmd_val(s, 2) & GENMASK(31, 2);
if (gmadr_bytes == 8) {
@@ -1542,11 +1591,20 @@ static int cmd_handler_mi_op_2f(struct parser_exec_state *s)
int op_size = (1 << ((cmd_val(s, 0) & GENMASK(20, 19)) >> 19)) *
sizeof(u32);
unsigned long gma, gma_high;
+ u32 valid_len = CMD_LEN(1);
int ret = 0;
if (!(cmd_val(s, 0) & (1 << 22)))
return ret;
+ /* check if QWORD */
+ if (DWORD_FIELD(0, 20, 19) == 1)
+ valid_len += 8;
+ ret = gvt_check_valid_cmd_length(cmd_length(s),
+ valid_len);
+ if (ret)
+ return ret;
+
gma = cmd_val(s, 1) & GENMASK(31, 2);
if (gmadr_bytes == 8) {
gma_high = cmd_val(s, 2) & GENMASK(15, 0);
@@ -1584,6 +1642,16 @@ static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
bool index_mode = false;
int ret = 0;
u32 hws_pga, val;
+ u32 valid_len = CMD_LEN(2);
+
+ ret = gvt_check_valid_cmd_length(cmd_length(s),
+ valid_len);
+ if (ret) {
+ /* Check again for Qword */
+ ret = gvt_check_valid_cmd_length(cmd_length(s),
+ ++valid_len);
+ return ret;
+ }
/* Check post-sync and ppgtt bit */
if (((cmd_val(s, 0) >> 14) & 0x3) && (cmd_val(s, 1) & (1 << 2))) {
@@ -1661,7 +1729,9 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
return 1;
}
-static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size)
+static int find_bb_size(struct parser_exec_state *s,
+ unsigned long *bb_size,
+ unsigned long *bb_end_cmd_offset)
{
unsigned long gma = 0;
const struct cmd_info *info;
@@ -1673,6 +1743,7 @@ static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size)
s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm;
*bb_size = 0;
+ *bb_end_cmd_offset = 0;
/* get the start gm address of the batch buffer */
gma = get_gma_bb_from_cmd(s, 1);
@@ -1708,6 +1779,10 @@ static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size)
/* chained batch buffer */
bb_end = true;
}
+
+ if (bb_end)
+ *bb_end_cmd_offset = *bb_size;
+
cmd_len = get_cmd_length(info, cmd) << 2;
*bb_size += cmd_len;
gma += cmd_len;
@@ -1716,12 +1791,36 @@ static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size)
return 0;
}
+static int audit_bb_end(struct parser_exec_state *s, void *va)
+{
+ struct intel_vgpu *vgpu = s->vgpu;
+ u32 cmd = *(u32 *)va;
+ const struct cmd_info *info;
+
+ info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+ if (info == NULL) {
+ gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
+ cmd, get_opcode(cmd, s->ring_id),
+ (s->buf_addr_type == PPGTT_BUFFER) ?
+ "ppgtt" : "ggtt", s->ring_id, s->workload);
+ return -EBADRQC;
+ }
+
+ if ((info->opcode == OP_MI_BATCH_BUFFER_END) ||
+ ((info->opcode == OP_MI_BATCH_BUFFER_START) &&
+ (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0)))
+ return 0;
+
+ return -EBADRQC;
+}
+
static int perform_bb_shadow(struct parser_exec_state *s)
{
struct intel_vgpu *vgpu = s->vgpu;
struct intel_vgpu_shadow_bb *bb;
unsigned long gma = 0;
unsigned long bb_size;
+ unsigned long bb_end_cmd_offset;
int ret = 0;
struct intel_vgpu_mm *mm = (s->buf_addr_type == GTT_BUFFER) ?
s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm;
@@ -1732,7 +1831,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
if (gma == INTEL_GVT_INVALID_ADDR)
return -EFAULT;
- ret = find_bb_size(s, &bb_size);
+ ret = find_bb_size(s, &bb_size, &bb_end_cmd_offset);
if (ret)
return ret;
@@ -1788,6 +1887,10 @@ static int perform_bb_shadow(struct parser_exec_state *s)
goto err_unmap;
}
+ ret = audit_bb_end(s, bb->va + start_offset + bb_end_cmd_offset);
+ if (ret)
+ goto err_unmap;
+
INIT_LIST_HEAD(&bb->list);
list_add(&bb->list, &s->workload->shadow_bb);
@@ -1912,21 +2015,24 @@ static const struct cmd_info cmd_info[] = {
{"MI_RS_CONTEXT", OP_MI_RS_CONTEXT, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
NULL},
- {"MI_DISPLAY_FLIP", OP_MI_DISPLAY_FLIP, F_LEN_VAR | F_POST_HANDLE,
+ {"MI_DISPLAY_FLIP", OP_MI_DISPLAY_FLIP, F_LEN_VAR,
R_RCS | R_BCS, D_ALL, 0, 8, cmd_handler_mi_display_flip},
- {"MI_SEMAPHORE_MBOX", OP_MI_SEMAPHORE_MBOX, F_LEN_VAR, R_ALL, D_ALL,
- 0, 8, NULL},
+ {"MI_SEMAPHORE_MBOX", OP_MI_SEMAPHORE_MBOX, F_LEN_VAR | F_LEN_VAR_FIXED,
+ R_ALL, D_ALL, 0, 8, NULL, CMD_LEN(1)},
{"MI_MATH", OP_MI_MATH, F_LEN_VAR, R_ALL, D_ALL, 0, 8, NULL},
- {"MI_URB_CLEAR", OP_MI_URB_CLEAR, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+ {"MI_URB_CLEAR", OP_MI_URB_CLEAR, F_LEN_VAR | F_LEN_VAR_FIXED, R_RCS,
+ D_ALL, 0, 8, NULL, CMD_LEN(0)},
- {"MI_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL, F_LEN_VAR, R_ALL,
- D_BDW_PLUS, 0, 8, NULL},
+ {"MI_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL,
+ F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_BDW_PLUS, 0, 8,
+ NULL, CMD_LEN(0)},
- {"MI_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT, F_LEN_VAR, R_ALL,
- D_BDW_PLUS, ADDR_FIX_1(2), 8, cmd_handler_mi_semaphore_wait},
+ {"MI_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT,
+ F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_BDW_PLUS, ADDR_FIX_1(2),
+ 8, cmd_handler_mi_semaphore_wait, CMD_LEN(2)},
{"MI_STORE_DATA_IMM", OP_MI_STORE_DATA_IMM, F_LEN_VAR, R_ALL, D_BDW_PLUS,
ADDR_FIX_1(1), 10, cmd_handler_mi_store_data_imm},
@@ -1940,8 +2046,9 @@ static const struct cmd_info cmd_info[] = {
{"MI_UPDATE_GTT", OP_MI_UPDATE_GTT, F_LEN_VAR, R_ALL, D_BDW_PLUS, 0, 10,
cmd_handler_mi_update_gtt},
- {"MI_STORE_REGISTER_MEM", OP_MI_STORE_REGISTER_MEM, F_LEN_VAR, R_ALL,
- D_ALL, ADDR_FIX_1(2), 8, cmd_handler_srm},
+ {"MI_STORE_REGISTER_MEM", OP_MI_STORE_REGISTER_MEM,
+ F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(2), 8,
+ cmd_handler_srm, CMD_LEN(2)},
{"MI_FLUSH_DW", OP_MI_FLUSH_DW, F_LEN_VAR, R_ALL, D_ALL, 0, 6,
cmd_handler_mi_flush_dw},
@@ -1949,26 +2056,30 @@ static const struct cmd_info cmd_info[] = {
{"MI_CLFLUSH", OP_MI_CLFLUSH, F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(1),
10, cmd_handler_mi_clflush},
- {"MI_REPORT_PERF_COUNT", OP_MI_REPORT_PERF_COUNT, F_LEN_VAR, R_ALL,
- D_ALL, ADDR_FIX_1(1), 6, cmd_handler_mi_report_perf_count},
+ {"MI_REPORT_PERF_COUNT", OP_MI_REPORT_PERF_COUNT,
+ F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(1), 6,
+ cmd_handler_mi_report_perf_count, CMD_LEN(2)},
- {"MI_LOAD_REGISTER_MEM", OP_MI_LOAD_REGISTER_MEM, F_LEN_VAR, R_ALL,
- D_ALL, ADDR_FIX_1(2), 8, cmd_handler_lrm},
+ {"MI_LOAD_REGISTER_MEM", OP_MI_LOAD_REGISTER_MEM,
+ F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(2), 8,
+ cmd_handler_lrm, CMD_LEN(2)},
- {"MI_LOAD_REGISTER_REG", OP_MI_LOAD_REGISTER_REG, F_LEN_VAR, R_ALL,
- D_ALL, 0, 8, cmd_handler_lrr},
+ {"MI_LOAD_REGISTER_REG", OP_MI_LOAD_REGISTER_REG,
+ F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, 0, 8,
+ cmd_handler_lrr, CMD_LEN(1)},
- {"MI_RS_STORE_DATA_IMM", OP_MI_RS_STORE_DATA_IMM, F_LEN_VAR, R_RCS,
- D_ALL, 0, 8, NULL},
+ {"MI_RS_STORE_DATA_IMM", OP_MI_RS_STORE_DATA_IMM,
+ F_LEN_VAR | F_LEN_VAR_FIXED, R_RCS, D_ALL, 0,
+ 8, NULL, CMD_LEN(2)},
- {"MI_LOAD_URB_MEM", OP_MI_LOAD_URB_MEM, F_LEN_VAR, R_RCS, D_ALL,
- ADDR_FIX_1(2), 8, NULL},
+ {"MI_LOAD_URB_MEM", OP_MI_LOAD_URB_MEM, F_LEN_VAR | F_LEN_VAR_FIXED,
+ R_RCS, D_ALL, ADDR_FIX_1(2), 8, NULL, CMD_LEN(2)},
{"MI_STORE_URM_MEM", OP_MI_STORE_URM_MEM, F_LEN_VAR, R_RCS, D_ALL,
ADDR_FIX_1(2), 8, NULL},
- {"MI_OP_2E", OP_MI_2E, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_2(1, 2),
- 8, cmd_handler_mi_op_2e},
+ {"MI_OP_2E", OP_MI_2E, F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_BDW_PLUS,
+ ADDR_FIX_2(1, 2), 8, cmd_handler_mi_op_2e, CMD_LEN(3)},
{"MI_OP_2F", OP_MI_2F, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_1(1),
8, cmd_handler_mi_op_2f},
@@ -1978,8 +2089,8 @@ static const struct cmd_info cmd_info[] = {
cmd_handler_mi_batch_buffer_start},
{"MI_CONDITIONAL_BATCH_BUFFER_END", OP_MI_CONDITIONAL_BATCH_BUFFER_END,
- F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(2), 8,
- cmd_handler_mi_conditional_batch_buffer_end},
+ F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(2), 8,
+ cmd_handler_mi_conditional_batch_buffer_end, CMD_LEN(2)},
{"MI_LOAD_SCAN_LINES_INCL", OP_MI_LOAD_SCAN_LINES_INCL, F_LEN_CONST,
R_RCS | R_BCS, D_ALL, 0, 2, NULL},
@@ -2569,6 +2680,13 @@ static int cmd_parser_exec(struct parser_exec_state *s)
cmd_length(s), s->buf_type, s->buf_addr_type,
s->workload, info->name);
+ if ((info->flag & F_LEN_MASK) == F_LEN_VAR_FIXED) {
+ ret = gvt_check_valid_cmd_length(cmd_length(s),
+ info->valid_len);
+ if (ret)
+ return ret;
+ }
+
if (info->handler) {
ret = info->handler(s);
if (ret < 0) {
diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c
index 2fb7b73b260d..285f6011a537 100644
--- a/drivers/gpu/drm/i915/gvt/debugfs.c
+++ b/drivers/gpu/drm/i915/gvt/debugfs.c
@@ -189,36 +189,19 @@ DEFINE_SIMPLE_ATTRIBUTE(vgpu_scan_nonprivbb_fops,
/**
* intel_gvt_debugfs_add_vgpu - register debugfs entries for a vGPU
* @vgpu: a vGPU
- *
- * Returns:
- * Zero on success, negative error code if failed.
*/
-int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu)
+void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu)
{
- struct dentry *ent;
char name[16] = "";
snprintf(name, 16, "vgpu%d", vgpu->id);
vgpu->debugfs = debugfs_create_dir(name, vgpu->gvt->debugfs_root);
- if (!vgpu->debugfs)
- return -ENOMEM;
-
- ent = debugfs_create_bool("active", 0444, vgpu->debugfs,
- &vgpu->active);
- if (!ent)
- return -ENOMEM;
-
- ent = debugfs_create_file("mmio_diff", 0444, vgpu->debugfs,
- vgpu, &vgpu_mmio_diff_fops);
- if (!ent)
- return -ENOMEM;
- ent = debugfs_create_file("scan_nonprivbb", 0644, vgpu->debugfs,
- vgpu, &vgpu_scan_nonprivbb_fops);
- if (!ent)
- return -ENOMEM;
-
- return 0;
+ debugfs_create_bool("active", 0444, vgpu->debugfs, &vgpu->active);
+ debugfs_create_file("mmio_diff", 0444, vgpu->debugfs, vgpu,
+ &vgpu_mmio_diff_fops);
+ debugfs_create_file("scan_nonprivbb", 0644, vgpu->debugfs, vgpu,
+ &vgpu_scan_nonprivbb_fops);
}
/**
@@ -234,27 +217,15 @@ void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu)
/**
* intel_gvt_debugfs_init - register gvt debugfs root entry
* @gvt: GVT device
- *
- * Returns:
- * zero on success, negative if failed.
*/
-int intel_gvt_debugfs_init(struct intel_gvt *gvt)
+void intel_gvt_debugfs_init(struct intel_gvt *gvt)
{
struct drm_minor *minor = gvt->dev_priv->drm.primary;
- struct dentry *ent;
gvt->debugfs_root = debugfs_create_dir("gvt", minor->debugfs_root);
- if (!gvt->debugfs_root) {
- gvt_err("Cannot create debugfs dir\n");
- return -ENOMEM;
- }
- ent = debugfs_create_ulong("num_tracked_mmio", 0444, gvt->debugfs_root,
- &gvt->mmio.num_tracked_mmio);
- if (!ent)
- return -ENOMEM;
-
- return 0;
+ debugfs_create_ulong("num_tracked_mmio", 0444, gvt->debugfs_root,
+ &gvt->mmio.num_tracked_mmio);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 43f4242062dd..8f37eefa0a02 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -375,9 +375,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
}
gvt->idle_vgpu = vgpu;
- ret = intel_gvt_debugfs_init(gvt);
- if (ret)
- gvt_err("debugfs registration failed, go on.\n");
+ intel_gvt_debugfs_init(gvt);
gvt_dbg_core("gvt device initialization is done\n");
dev_priv->gvt = gvt;
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 7a1fe44d45af..b47c6acaf9c0 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -334,6 +334,10 @@ struct intel_gvt {
struct {
struct engine_mmio *mmio;
int ctx_mmio_count[I915_NUM_ENGINES];
+ u32 *tlb_mmio_offset_list;
+ u32 tlb_mmio_offset_list_cnt;
+ u32 *mocs_mmio_offset_list;
+ u32 mocs_mmio_offset_list_cnt;
} engine_mmio_list;
struct dentry *debugfs_root;
@@ -682,9 +686,9 @@ static inline void intel_gvt_mmio_set_in_ctx(
gvt->mmio.mmio_attribute[offset >> 2] |= F_IN_CTX;
}
-int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
+void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
-int intel_gvt_debugfs_init(struct intel_gvt *gvt);
+void intel_gvt_debugfs_init(struct intel_gvt *gvt);
void intel_gvt_debugfs_clean(struct intel_gvt *gvt);
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index 951681813230..11accd3e1023 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -672,7 +672,7 @@ void intel_gvt_clean_irq(struct intel_gvt *gvt)
hrtimer_cancel(&irq->vblank_timer.timer);
}
-#define VBLNAK_TIMER_PERIOD 16000000
+#define VBLANK_TIMER_PERIOD 16000000
/**
* intel_gvt_init_irq - initialize GVT-g IRQ emulation subsystem
@@ -704,7 +704,7 @@ int intel_gvt_init_irq(struct intel_gvt *gvt)
hrtimer_init(&vblank_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
vblank_timer->timer.function = vblank_timer_fn;
- vblank_timer->period = VBLNAK_TIMER_PERIOD;
+ vblank_timer->period = VBLANK_TIMER_PERIOD;
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 23aa3e50cbf8..343d79c1cb7e 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -1306,7 +1306,6 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
unsigned int i;
int ret;
struct vfio_region_info_cap_sparse_mmap *sparse = NULL;
- size_t size;
int nr_areas = 1;
int cap_type_id;
@@ -1349,9 +1348,8 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
VFIO_REGION_INFO_FLAG_WRITE;
info.size = gvt_aperture_sz(vgpu->gvt);
- size = sizeof(*sparse) +
- (nr_areas * sizeof(*sparse->areas));
- sparse = kzalloc(size, GFP_KERNEL);
+ sparse = kzalloc(struct_size(sparse, areas, nr_areas),
+ GFP_KERNEL);
if (!sparse)
return -ENOMEM;
@@ -1416,9 +1414,9 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
switch (cap_type_id) {
case VFIO_REGION_INFO_CAP_SPARSE_MMAP:
ret = vfio_info_add_capability(&caps,
- &sparse->header, sizeof(*sparse) +
- (sparse->nr_areas *
- sizeof(*sparse->areas)));
+ &sparse->header,
+ struct_size(sparse, areas,
+ sparse->nr_areas));
if (ret) {
kfree(sparse);
return ret;
@@ -1798,9 +1796,6 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
"kvmgt_nr_cache_entries",
0444, vgpu->debugfs,
&vgpu->vdev.nr_cache_entries);
- if (!info->debugfs_cache_entries)
- gvt_vgpu_err("Cannot create kvmgt debugfs entry\n");
-
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 2998999e8568..4208e40445b1 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -148,19 +148,27 @@ static struct {
u32 l3cc_table[GEN9_MOCS_SIZE / 2];
} gen9_render_mocs;
+static u32 gen9_mocs_mmio_offset_list[] = {
+ [RCS0] = 0xc800,
+ [VCS0] = 0xc900,
+ [VCS1] = 0xca00,
+ [BCS0] = 0xcc00,
+ [VECS0] = 0xcb00,
+};
+
static void load_render_mocs(struct drm_i915_private *dev_priv)
{
+ struct intel_gvt *gvt = dev_priv->gvt;
i915_reg_t offset;
- u32 regs[] = {
- [RCS0] = 0xc800,
- [VCS0] = 0xc900,
- [VCS1] = 0xca00,
- [BCS0] = 0xcc00,
- [VECS0] = 0xcb00,
- };
+ u32 cnt = gvt->engine_mmio_list.mocs_mmio_offset_list_cnt;
+ u32 *regs = gvt->engine_mmio_list.mocs_mmio_offset_list;
int ring_id, i;
- for (ring_id = 0; ring_id < ARRAY_SIZE(regs); ring_id++) {
+ /* Platform doesn't have mocs mmios. */
+ if (!regs)
+ return;
+
+ for (ring_id = 0; ring_id < cnt; ring_id++) {
if (!HAS_ENGINE(dev_priv, ring_id))
continue;
offset.reg = regs[ring_id];
@@ -327,22 +335,28 @@ out:
return ret;
}
+static u32 gen8_tlb_mmio_offset_list[] = {
+ [RCS0] = 0x4260,
+ [VCS0] = 0x4264,
+ [VCS1] = 0x4268,
+ [BCS0] = 0x426c,
+ [VECS0] = 0x4270,
+};
+
static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_uncore *uncore = &dev_priv->uncore;
struct intel_vgpu_submission *s = &vgpu->submission;
+ u32 *regs = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list;
+ u32 cnt = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list_cnt;
enum forcewake_domains fw;
i915_reg_t reg;
- u32 regs[] = {
- [RCS0] = 0x4260,
- [VCS0] = 0x4264,
- [VCS1] = 0x4268,
- [BCS0] = 0x426c,
- [VECS0] = 0x4270,
- };
- if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
+ if (!regs)
+ return;
+
+ if (WARN_ON(ring_id >= cnt))
return;
if (!test_and_clear_bit(ring_id, (void *)s->tlb_handle_pending))
@@ -565,10 +579,17 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
{
struct engine_mmio *mmio;
- if (INTEL_GEN(gvt->dev_priv) >= 9)
+ if (INTEL_GEN(gvt->dev_priv) >= 9) {
gvt->engine_mmio_list.mmio = gen9_engine_mmio_list;
- else
+ gvt->engine_mmio_list.tlb_mmio_offset_list = gen8_tlb_mmio_offset_list;
+ gvt->engine_mmio_list.tlb_mmio_offset_list_cnt = ARRAY_SIZE(gen8_tlb_mmio_offset_list);
+ gvt->engine_mmio_list.mocs_mmio_offset_list = gen9_mocs_mmio_offset_list;
+ gvt->engine_mmio_list.mocs_mmio_offset_list_cnt = ARRAY_SIZE(gen9_mocs_mmio_offset_list);
+ } else {
gvt->engine_mmio_list.mmio = gen8_engine_mmio_list;
+ gvt->engine_mmio_list.tlb_mmio_offset_list = gen8_tlb_mmio_offset_list;
+ gvt->engine_mmio_list.tlb_mmio_offset_list_cnt = ARRAY_SIZE(gen8_tlb_mmio_offset_list);
+ }
for (mmio = gvt->engine_mmio_list.mmio;
i915_mmio_reg_valid(mmio->reg); mmio++) {
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index f40524b0e300..1a28e3666951 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -84,8 +84,8 @@ static void sr_oa_regs(struct intel_vgpu_workload *workload,
u32 *reg_state, bool save)
{
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
- u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset;
- u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset;
+ u32 ctx_oactxctrl = dev_priv->perf.ctx_oactxctrl_offset;
+ u32 ctx_flexeu0 = dev_priv->perf.ctx_flexeu0_offset;
int i = 0;
u32 flex_mmio[] = {
i915_mmio_reg_offset(EU_PERF_CNTL0),
@@ -291,9 +291,6 @@ shadow_context_descriptor_update(struct intel_context *ce,
* Update bits 0-11 of the context descriptor which includes flags
* like GEN8_CTX_* cached in desc_template
*/
- desc &= U64_MAX << 12;
- desc |= ce->gem_context->desc_template & ((1ULL << 12) - 1);
-
desc &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
desc |= workload->ctx_desc.addressing_mode <<
GEN8_CTX_ADDRESSING_MODE_SHIFT;
@@ -1215,30 +1212,43 @@ i915_context_ppgtt_root_save(struct intel_vgpu_submission *s,
*/
int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
{
+ struct drm_i915_private *i915 = vgpu->gvt->dev_priv;
struct intel_vgpu_submission *s = &vgpu->submission;
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
enum intel_engine_id i;
int ret;
- ctx = i915_gem_context_create_gvt(&vgpu->gvt->dev_priv->drm);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
+ mutex_lock(&i915->drm.struct_mutex);
+
+ ctx = i915_gem_context_create_kernel(i915, I915_PRIORITY_MAX);
+ if (IS_ERR(ctx)) {
+ ret = PTR_ERR(ctx);
+ goto out_unlock;
+ }
+
+ i915_gem_context_set_force_single_submission(ctx);
i915_context_ppgtt_root_save(s, i915_vm_to_ppgtt(ctx->vm));
- for_each_engine(engine, vgpu->gvt->dev_priv, i) {
+ for_each_engine(engine, i915, i) {
struct intel_context *ce;
INIT_LIST_HEAD(&s->workload_q_head[i]);
s->shadow[i] = ERR_PTR(-EINVAL);
- ce = i915_gem_context_get_engine(ctx, i);
+ ce = intel_context_create(ctx, engine);
if (IS_ERR(ce)) {
ret = PTR_ERR(ce);
goto out_shadow_ctx;
}
+ if (!USES_GUC_SUBMISSION(i915)) { /* Max ring buffer size */
+ const unsigned int ring_size = 512 * SZ_4K;
+
+ ce->ring = __intel_context_ring_size(ring_size);
+ }
+
ret = intel_context_pin(ce);
intel_context_put(ce);
if (ret)
@@ -1265,17 +1275,21 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES);
i915_gem_context_put(ctx);
+ mutex_unlock(&i915->drm.struct_mutex);
return 0;
out_shadow_ctx:
i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(ctx->vm));
- for_each_engine(engine, vgpu->gvt->dev_priv, i) {
+ for_each_engine(engine, i915, i) {
if (IS_ERR(s->shadow[i]))
break;
intel_context_unpin(s->shadow[i]);
+ intel_context_put(s->shadow[i]);
}
i915_gem_context_put(ctx);
+out_unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
return ret;
}
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 44ce3c2b9ac1..d5a6e4e3d0fd 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -420,9 +420,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret)
goto out_clean_submission;
- ret = intel_gvt_debugfs_add_vgpu(vgpu);
- if (ret)
- goto out_clean_sched_policy;
+ intel_gvt_debugfs_add_vgpu(vgpu);
ret = intel_gvt_hypervisor_set_opregion(vgpu);
if (ret)
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index d32db8a4db5c..48e16ad93bbd 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -33,6 +33,44 @@ struct active_node {
u64 timeline;
};
+static inline struct active_node *
+node_from_active(struct i915_active_request *active)
+{
+ return container_of(active, struct active_node, base);
+}
+
+#define take_preallocated_barriers(x) llist_del_all(&(x)->preallocated_barriers)
+
+static inline bool is_barrier(const struct i915_active_request *active)
+{
+ return IS_ERR(rcu_access_pointer(active->request));
+}
+
+static inline struct llist_node *barrier_to_ll(struct active_node *node)
+{
+ GEM_BUG_ON(!is_barrier(&node->base));
+ return (struct llist_node *)&node->base.link;
+}
+
+static inline struct intel_engine_cs *
+__barrier_to_engine(struct active_node *node)
+{
+ return (struct intel_engine_cs *)READ_ONCE(node->base.link.prev);
+}
+
+static inline struct intel_engine_cs *
+barrier_to_engine(struct active_node *node)
+{
+ GEM_BUG_ON(!is_barrier(&node->base));
+ return __barrier_to_engine(node);
+}
+
+static inline struct active_node *barrier_from_ll(struct llist_node *x)
+{
+ return container_of((struct list_head *)x,
+ struct active_node, base.link);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS)
static void *active_debug_hint(void *addr)
@@ -104,12 +142,14 @@ __active_retire(struct i915_active *ref)
if (!retire)
return;
- ref->retire(ref);
-
rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
GEM_BUG_ON(i915_active_request_isset(&it->base));
kmem_cache_free(global.slab_cache, it);
}
+
+ /* After the final retire, the entire struct may be freed */
+ if (ref->retire)
+ ref->retire(ref);
}
static void
@@ -127,14 +167,15 @@ active_retire(struct i915_active *ref)
static void
node_retire(struct i915_active_request *base, struct i915_request *rq)
{
- active_retire(container_of(base, struct active_node, base)->ref);
+ active_retire(node_from_active(base)->ref);
}
static struct i915_active_request *
-active_instance(struct i915_active *ref, u64 idx)
+active_instance(struct i915_active *ref, struct intel_timeline *tl)
{
struct active_node *node, *prealloc;
struct rb_node **p, *parent;
+ u64 idx = tl->fence_context;
/*
* We track the most recently used timeline to skip a rbtree search
@@ -173,7 +214,7 @@ active_instance(struct i915_active *ref, u64 idx)
}
node = prealloc;
- i915_active_request_init(&node->base, NULL, node_retire);
+ i915_active_request_init(&node->base, &tl->mutex, NULL, node_retire);
node->ref = ref;
node->timeline = idx;
@@ -184,6 +225,7 @@ out:
ref->cache = node;
mutex_unlock(&ref->mutex);
+ BUILD_BUG_ON(offsetof(typeof(*node), base));
return &node->base;
}
@@ -201,31 +243,93 @@ void __i915_active_init(struct drm_i915_private *i915,
ref->retire = retire;
ref->tree = RB_ROOT;
ref->cache = NULL;
- init_llist_head(&ref->barriers);
+ init_llist_head(&ref->preallocated_barriers);
atomic_set(&ref->count, 0);
__mutex_init(&ref->mutex, "i915_active", key);
}
+static bool ____active_del_barrier(struct i915_active *ref,
+ struct active_node *node,
+ struct intel_engine_cs *engine)
+
+{
+ struct llist_node *head = NULL, *tail = NULL;
+ struct llist_node *pos, *next;
+
+ GEM_BUG_ON(node->timeline != engine->kernel_context->timeline->fence_context);
+
+ /*
+ * Rebuild the llist excluding our node. We may perform this
+ * outside of the kernel_context timeline mutex and so someone
+ * else may be manipulating the engine->barrier_tasks, in
+ * which case either we or they will be upset :)
+ *
+ * A second __active_del_barrier() will report failure to claim
+ * the active_node and the caller will just shrug and know not to
+ * claim ownership of its node.
+ *
+ * A concurrent i915_request_add_active_barriers() will miss adding
+ * any of the tasks, but we will try again on the next -- and since
+ * we are actively using the barrier, we know that there will be
+ * at least another opportunity when we idle.
+ */
+ llist_for_each_safe(pos, next, llist_del_all(&engine->barrier_tasks)) {
+ if (node == barrier_from_ll(pos)) {
+ node = NULL;
+ continue;
+ }
+
+ pos->next = head;
+ head = pos;
+ if (!tail)
+ tail = pos;
+ }
+ if (head)
+ llist_add_batch(head, tail, &engine->barrier_tasks);
+
+ return !node;
+}
+
+static bool
+__active_del_barrier(struct i915_active *ref, struct active_node *node)
+{
+ return ____active_del_barrier(ref, node, barrier_to_engine(node));
+}
+
int i915_active_ref(struct i915_active *ref,
- u64 timeline,
+ struct intel_timeline *tl,
struct i915_request *rq)
{
struct i915_active_request *active;
int err;
+ lockdep_assert_held(&tl->mutex);
+
/* Prevent reaping in case we malloc/wait while building the tree */
err = i915_active_acquire(ref);
if (err)
return err;
- active = active_instance(ref, timeline);
+ active = active_instance(ref, tl);
if (!active) {
err = -ENOMEM;
goto out;
}
- if (!i915_active_request_isset(active))
- atomic_inc(&ref->count);
+ if (is_barrier(active)) { /* proto-node used by our idle barrier */
+ /*
+ * This request is on the kernel_context timeline, and so
+ * we can use it to substitute for the pending idle-barrer
+ * request that we want to emit on the kernel_context.
+ */
+ __active_del_barrier(ref, node_from_active(active));
+ RCU_INIT_POINTER(active->request, NULL);
+ INIT_LIST_HEAD(&active->link);
+ } else {
+ if (!i915_active_request_isset(active))
+ atomic_inc(&ref->count);
+ }
+ GEM_BUG_ON(!atomic_read(&ref->count));
__i915_active_request_set(active, rq);
out:
@@ -312,6 +416,11 @@ int i915_active_wait(struct i915_active *ref)
}
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
+ if (is_barrier(&it->base)) { /* unconnected idle-barrier */
+ err = -EBUSY;
+ break;
+ }
+
err = i915_active_request_retire(&it->base, BKL(ref));
if (err)
break;
@@ -374,6 +483,96 @@ void i915_active_fini(struct i915_active *ref)
}
#endif
+static inline bool is_idle_barrier(struct active_node *node, u64 idx)
+{
+ return node->timeline == idx && !i915_active_request_isset(&node->base);
+}
+
+static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
+{
+ struct rb_node *prev, *p;
+
+ if (RB_EMPTY_ROOT(&ref->tree))
+ return NULL;
+
+ mutex_lock(&ref->mutex);
+ GEM_BUG_ON(i915_active_is_idle(ref));
+
+ /*
+ * Try to reuse any existing barrier nodes already allocated for this
+ * i915_active, due to overlapping active phases there is likely a
+ * node kept alive (as we reuse before parking). We prefer to reuse
+ * completely idle barriers (less hassle in manipulating the llists),
+ * but otherwise any will do.
+ */
+ if (ref->cache && is_idle_barrier(ref->cache, idx)) {
+ p = &ref->cache->node;
+ goto match;
+ }
+
+ prev = NULL;
+ p = ref->tree.rb_node;
+ while (p) {
+ struct active_node *node =
+ rb_entry(p, struct active_node, node);
+
+ if (is_idle_barrier(node, idx))
+ goto match;
+
+ prev = p;
+ if (node->timeline < idx)
+ p = p->rb_right;
+ else
+ p = p->rb_left;
+ }
+
+ /*
+ * No quick match, but we did find the leftmost rb_node for the
+ * kernel_context. Walk the rb_tree in-order to see if there were
+ * any idle-barriers on this timeline that we missed, or just use
+ * the first pending barrier.
+ */
+ for (p = prev; p; p = rb_next(p)) {
+ struct active_node *node =
+ rb_entry(p, struct active_node, node);
+ struct intel_engine_cs *engine;
+
+ if (node->timeline > idx)
+ break;
+
+ if (node->timeline < idx)
+ continue;
+
+ if (is_idle_barrier(node, idx))
+ goto match;
+
+ /*
+ * The list of pending barriers is protected by the
+ * kernel_context timeline, which notably we do not hold
+ * here. i915_request_add_active_barriers() may consume
+ * the barrier before we claim it, so we have to check
+ * for success.
+ */
+ engine = __barrier_to_engine(node);
+ smp_rmb(); /* serialise with add_active_barriers */
+ if (is_barrier(&node->base) &&
+ ____active_del_barrier(ref, node, engine))
+ goto match;
+ }
+
+ mutex_unlock(&ref->mutex);
+
+ return NULL;
+
+match:
+ rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */
+ if (p == &ref->cache->node)
+ ref->cache = NULL;
+ mutex_unlock(&ref->mutex);
+
+ return rb_entry(p, struct active_node, node);
+}
+
int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
struct intel_engine_cs *engine)
{
@@ -382,39 +581,65 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
struct llist_node *pos, *next;
int err;
- GEM_BUG_ON(!mask);
+ GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers));
+
+ /*
+ * Preallocate a node for each physical engine supporting the target
+ * engine (remember virtual engines have more than one sibling).
+ * We can then use the preallocated nodes in
+ * i915_active_acquire_barrier()
+ */
for_each_engine_masked(engine, i915, mask, tmp) {
- struct intel_context *kctx = engine->kernel_context;
+ u64 idx = engine->kernel_context->timeline->fence_context;
struct active_node *node;
- node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
- if (unlikely(!node)) {
- err = -ENOMEM;
- goto unwind;
+ node = reuse_idle_barrier(ref, idx);
+ if (!node) {
+ node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
+ if (!node) {
+ err = ENOMEM;
+ goto unwind;
+ }
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+ node->base.lock =
+ &engine->kernel_context->timeline->mutex;
+#endif
+ RCU_INIT_POINTER(node->base.request, NULL);
+ node->base.retire = node_retire;
+ node->timeline = idx;
+ node->ref = ref;
}
- i915_active_request_init(&node->base,
- (void *)engine, node_retire);
- node->timeline = kctx->ring->timeline->fence_context;
- node->ref = ref;
- atomic_inc(&ref->count);
+ if (!i915_active_request_isset(&node->base)) {
+ /*
+ * Mark this as being *our* unconnected proto-node.
+ *
+ * Since this node is not in any list, and we have
+ * decoupled it from the rbtree, we can reuse the
+ * request to indicate this is an idle-barrier node
+ * and then we can use the rb_node and list pointers
+ * for our tracking of the pending barrier.
+ */
+ RCU_INIT_POINTER(node->base.request, ERR_PTR(-EAGAIN));
+ node->base.link.prev = (void *)engine;
+ atomic_inc(&ref->count);
+ }
+ GEM_BUG_ON(barrier_to_engine(node) != engine);
+ llist_add(barrier_to_ll(node), &ref->preallocated_barriers);
intel_engine_pm_get(engine);
- llist_add((struct llist_node *)&node->base.link,
- &ref->barriers);
}
return 0;
unwind:
- llist_for_each_safe(pos, next, llist_del_all(&ref->barriers)) {
- struct active_node *node;
+ llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
+ struct active_node *node = barrier_from_ll(pos);
- node = container_of((struct list_head *)pos,
- typeof(*node), base.link);
- engine = (void *)rcu_access_pointer(node->base.request);
+ atomic_dec(&ref->count);
+ intel_engine_pm_put(barrier_to_engine(node));
- intel_engine_pm_put(engine);
kmem_cache_free(global.slab_cache, node);
}
return err;
@@ -426,25 +651,27 @@ void i915_active_acquire_barrier(struct i915_active *ref)
GEM_BUG_ON(i915_active_is_idle(ref));
+ /*
+ * Transfer the list of preallocated barriers into the
+ * i915_active rbtree, but only as proto-nodes. They will be
+ * populated by i915_request_add_active_barriers() to point to the
+ * request that will eventually release them.
+ */
mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING);
- llist_for_each_safe(pos, next, llist_del_all(&ref->barriers)) {
- struct intel_engine_cs *engine;
- struct active_node *node;
+ llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
+ struct active_node *node = barrier_from_ll(pos);
+ struct intel_engine_cs *engine = barrier_to_engine(node);
struct rb_node **p, *parent;
- node = container_of((struct list_head *)pos,
- typeof(*node), base.link);
-
- engine = (void *)rcu_access_pointer(node->base.request);
- RCU_INIT_POINTER(node->base.request, ERR_PTR(-EAGAIN));
-
parent = NULL;
p = &ref->tree.rb_node;
while (*p) {
+ struct active_node *it;
+
parent = *p;
- if (rb_entry(parent,
- struct active_node,
- node)->timeline < node->timeline)
+
+ it = rb_entry(parent, struct active_node, node);
+ if (it->timeline < node->timeline)
p = &parent->rb_right;
else
p = &parent->rb_left;
@@ -452,20 +679,30 @@ void i915_active_acquire_barrier(struct i915_active *ref)
rb_link_node(&node->node, parent, p);
rb_insert_color(&node->node, &ref->tree);
- llist_add((struct llist_node *)&node->base.link,
- &engine->barrier_tasks);
+ llist_add(barrier_to_ll(node), &engine->barrier_tasks);
intel_engine_pm_put(engine);
}
mutex_unlock(&ref->mutex);
}
-void i915_request_add_barriers(struct i915_request *rq)
+void i915_request_add_active_barriers(struct i915_request *rq)
{
struct intel_engine_cs *engine = rq->engine;
struct llist_node *node, *next;
- llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks))
+ GEM_BUG_ON(intel_engine_is_virtual(engine));
+ GEM_BUG_ON(rq->timeline != engine->kernel_context->timeline);
+
+ /*
+ * Attach the list of proto-fences to the in-flight request such
+ * that the parent i915_active will be released when this request
+ * is retired.
+ */
+ llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
+ RCU_INIT_POINTER(barrier_from_ll(node)->base.request, rq);
+ smp_wmb(); /* serialise with reuse_idle_barrier */
list_add_tail((struct list_head *)node, &rq->active_list);
+ }
}
int i915_active_request_set(struct i915_active_request *active,
@@ -473,6 +710,10 @@ int i915_active_request_set(struct i915_active_request *active,
{
int err;
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+ lockdep_assert_held(active->lock);
+#endif
+
/* Must maintain ordering wrt previous active requests */
err = i915_request_await_active_request(rq, active);
if (err)
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index ba68b077ec6c..f95058f99057 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -58,15 +58,20 @@ void i915_active_retire_noop(struct i915_active_request *active,
*/
static inline void
i915_active_request_init(struct i915_active_request *active,
+ struct mutex *lock,
struct i915_request *rq,
i915_active_retire_fn retire)
{
RCU_INIT_POINTER(active->request, rq);
INIT_LIST_HEAD(&active->link);
active->retire = retire ?: i915_active_retire_noop;
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+ active->lock = lock;
+#endif
}
-#define INIT_ACTIVE_REQUEST(name) i915_active_request_init((name), NULL, NULL)
+#define INIT_ACTIVE_REQUEST(name, lock) \
+ i915_active_request_init((name), (lock), NULL, NULL)
/**
* i915_active_request_set - updates the tracker to watch the current request
@@ -81,6 +86,9 @@ static inline void
__i915_active_request_set(struct i915_active_request *active,
struct i915_request *request)
{
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+ lockdep_assert_held(active->lock);
+#endif
list_move(&active->link, &request->active_list);
rcu_assign_pointer(active->request, request);
}
@@ -90,25 +98,6 @@ i915_active_request_set(struct i915_active_request *active,
struct i915_request *rq);
/**
- * i915_active_request_set_retire_fn - updates the retirement callback
- * @active - the active tracker
- * @fn - the routine called when the request is retired
- * @mutex - struct_mutex used to guard retirements
- *
- * i915_active_request_set_retire_fn() updates the function pointer that
- * is called when the final request associated with the @active tracker
- * is retired.
- */
-static inline void
-i915_active_request_set_retire_fn(struct i915_active_request *active,
- i915_active_retire_fn fn,
- struct mutex *mutex)
-{
- lockdep_assert_held(mutex);
- active->retire = fn ?: i915_active_retire_noop;
-}
-
-/**
* i915_active_request_raw - return the active request
* @active - the active tracker
*
@@ -381,7 +370,7 @@ void __i915_active_init(struct drm_i915_private *i915,
} while (0)
int i915_active_ref(struct i915_active *ref,
- u64 timeline,
+ struct intel_timeline *tl,
struct i915_request *rq);
int i915_active_wait(struct i915_active *ref);
@@ -413,6 +402,6 @@ static inline void i915_active_fini(struct i915_active *ref) { }
int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
struct intel_engine_cs *engine);
void i915_active_acquire_barrier(struct i915_active *ref);
-void i915_request_add_barriers(struct i915_request *rq);
+void i915_request_add_active_barriers(struct i915_request *rq);
#endif /* _I915_ACTIVE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_active_types.h b/drivers/gpu/drm/i915/i915_active_types.h
index 74743dd0d5f0..1854e7d168c1 100644
--- a/drivers/gpu/drm/i915/i915_active_types.h
+++ b/drivers/gpu/drm/i915/i915_active_types.h
@@ -24,6 +24,21 @@ struct i915_active_request {
struct i915_request __rcu *request;
struct list_head link;
i915_active_retire_fn retire;
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+ /*
+ * Incorporeal!
+ *
+ * Updates to the i915_active_request must be serialised under a lock
+ * to ensure that the timeline is ordered. Normally, this is the
+ * timeline->mutex, but another mutex may be used so long as it is
+ * done so consistently.
+ *
+ * For lockdep tracking of the above, we store the lock we intend
+ * to always use for updates of this i915_active_request during
+ * construction and assert that is held on every update.
+ */
+ struct mutex *lock;
+#endif
};
struct active_node;
@@ -42,7 +57,7 @@ struct i915_active {
int (*active)(struct i915_active *ref);
void (*retire)(struct i915_active *ref);
- struct llist_head barriers;
+ struct llist_head preallocated_barriers;
};
#endif /* _I915_ACTIVE_TYPES_H_ */
diff --git a/drivers/gpu/drm/i915/i915_buddy.c b/drivers/gpu/drm/i915/i915_buddy.c
new file mode 100644
index 000000000000..fe1871d7c126
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_buddy.c
@@ -0,0 +1,428 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/kmemleak.h>
+#include <linux/slab.h>
+
+#include "i915_buddy.h"
+
+#include "i915_gem.h"
+#include "i915_globals.h"
+#include "i915_utils.h"
+
+static struct i915_global_block {
+ struct i915_global base;
+ struct kmem_cache *slab_blocks;
+} global;
+
+static void i915_global_buddy_shrink(void)
+{
+ kmem_cache_shrink(global.slab_blocks);
+}
+
+static void i915_global_buddy_exit(void)
+{
+ kmem_cache_destroy(global.slab_blocks);
+}
+
+static struct i915_global_block global = { {
+ .shrink = i915_global_buddy_shrink,
+ .exit = i915_global_buddy_exit,
+} };
+
+int __init i915_global_buddy_init(void)
+{
+ global.slab_blocks = KMEM_CACHE(i915_buddy_block, SLAB_HWCACHE_ALIGN);
+ if (!global.slab_blocks)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static struct i915_buddy_block *i915_block_alloc(struct i915_buddy_block *parent,
+ unsigned int order,
+ u64 offset)
+{
+ struct i915_buddy_block *block;
+
+ block = kmem_cache_zalloc(global.slab_blocks, GFP_KERNEL);
+ if (!block)
+ return NULL;
+
+ block->header = offset;
+ block->header |= order;
+ block->parent = parent;
+
+ return block;
+}
+
+static void i915_block_free(struct i915_buddy_block *block)
+{
+ kmem_cache_free(global.slab_blocks, block);
+}
+
+static void mark_allocated(struct i915_buddy_block *block)
+{
+ block->header &= ~I915_BUDDY_HEADER_STATE;
+ block->header |= I915_BUDDY_ALLOCATED;
+
+ list_del(&block->link);
+}
+
+static void mark_free(struct i915_buddy_mm *mm,
+ struct i915_buddy_block *block)
+{
+ block->header &= ~I915_BUDDY_HEADER_STATE;
+ block->header |= I915_BUDDY_FREE;
+
+ list_add(&block->link,
+ &mm->free_list[i915_buddy_block_order(block)]);
+}
+
+static void mark_split(struct i915_buddy_block *block)
+{
+ block->header &= ~I915_BUDDY_HEADER_STATE;
+ block->header |= I915_BUDDY_SPLIT;
+
+ list_del(&block->link);
+}
+
+int i915_buddy_init(struct i915_buddy_mm *mm, u64 size, u64 chunk_size)
+{
+ unsigned int i;
+ u64 offset;
+
+ if (size < chunk_size)
+ return -EINVAL;
+
+ if (chunk_size < PAGE_SIZE)
+ return -EINVAL;
+
+ if (!is_power_of_2(chunk_size))
+ return -EINVAL;
+
+ size = round_down(size, chunk_size);
+
+ mm->size = size;
+ mm->chunk_size = chunk_size;
+ mm->max_order = ilog2(size) - ilog2(chunk_size);
+
+ GEM_BUG_ON(mm->max_order > I915_BUDDY_MAX_ORDER);
+
+ mm->free_list = kmalloc_array(mm->max_order + 1,
+ sizeof(struct list_head),
+ GFP_KERNEL);
+ if (!mm->free_list)
+ return -ENOMEM;
+
+ for (i = 0; i <= mm->max_order; ++i)
+ INIT_LIST_HEAD(&mm->free_list[i]);
+
+ mm->n_roots = hweight64(size);
+
+ mm->roots = kmalloc_array(mm->n_roots,
+ sizeof(struct i915_buddy_block *),
+ GFP_KERNEL);
+ if (!mm->roots)
+ goto out_free_list;
+
+ offset = 0;
+ i = 0;
+
+ /*
+ * Split into power-of-two blocks, in case we are given a size that is
+ * not itself a power-of-two.
+ */
+ do {
+ struct i915_buddy_block *root;
+ unsigned int order;
+ u64 root_size;
+
+ root_size = rounddown_pow_of_two(size);
+ order = ilog2(root_size) - ilog2(chunk_size);
+
+ root = i915_block_alloc(NULL, order, offset);
+ if (!root)
+ goto out_free_roots;
+
+ mark_free(mm, root);
+
+ GEM_BUG_ON(i > mm->max_order);
+ GEM_BUG_ON(i915_buddy_block_size(mm, root) < chunk_size);
+
+ mm->roots[i] = root;
+
+ offset += root_size;
+ size -= root_size;
+ i++;
+ } while (size);
+
+ return 0;
+
+out_free_roots:
+ while (i--)
+ i915_block_free(mm->roots[i]);
+ kfree(mm->roots);
+out_free_list:
+ kfree(mm->free_list);
+ return -ENOMEM;
+}
+
+void i915_buddy_fini(struct i915_buddy_mm *mm)
+{
+ int i;
+
+ for (i = 0; i < mm->n_roots; ++i) {
+ GEM_WARN_ON(!i915_buddy_block_is_free(mm->roots[i]));
+ i915_block_free(mm->roots[i]);
+ }
+
+ kfree(mm->roots);
+ kfree(mm->free_list);
+}
+
+static int split_block(struct i915_buddy_mm *mm,
+ struct i915_buddy_block *block)
+{
+ unsigned int block_order = i915_buddy_block_order(block) - 1;
+ u64 offset = i915_buddy_block_offset(block);
+
+ GEM_BUG_ON(!i915_buddy_block_is_free(block));
+ GEM_BUG_ON(!i915_buddy_block_order(block));
+
+ block->left = i915_block_alloc(block, block_order, offset);
+ if (!block->left)
+ return -ENOMEM;
+
+ block->right = i915_block_alloc(block, block_order,
+ offset + (mm->chunk_size << block_order));
+ if (!block->right) {
+ i915_block_free(block->left);
+ return -ENOMEM;
+ }
+
+ mark_free(mm, block->left);
+ mark_free(mm, block->right);
+
+ mark_split(block);
+
+ return 0;
+}
+
+static struct i915_buddy_block *
+get_buddy(struct i915_buddy_block *block)
+{
+ struct i915_buddy_block *parent;
+
+ parent = block->parent;
+ if (!parent)
+ return NULL;
+
+ if (parent->left == block)
+ return parent->right;
+
+ return parent->left;
+}
+
+static void __i915_buddy_free(struct i915_buddy_mm *mm,
+ struct i915_buddy_block *block)
+{
+ struct i915_buddy_block *parent;
+
+ while ((parent = block->parent)) {
+ struct i915_buddy_block *buddy;
+
+ buddy = get_buddy(block);
+
+ if (!i915_buddy_block_is_free(buddy))
+ break;
+
+ list_del(&buddy->link);
+
+ i915_block_free(block);
+ i915_block_free(buddy);
+
+ block = parent;
+ }
+
+ mark_free(mm, block);
+}
+
+void i915_buddy_free(struct i915_buddy_mm *mm,
+ struct i915_buddy_block *block)
+{
+ GEM_BUG_ON(!i915_buddy_block_is_allocated(block));
+ __i915_buddy_free(mm, block);
+}
+
+void i915_buddy_free_list(struct i915_buddy_mm *mm, struct list_head *objects)
+{
+ struct i915_buddy_block *block, *on;
+
+ list_for_each_entry_safe(block, on, objects, link)
+ i915_buddy_free(mm, block);
+ INIT_LIST_HEAD(objects);
+}
+
+/*
+ * Allocate power-of-two block. The order value here translates to:
+ *
+ * 0 = 2^0 * mm->chunk_size
+ * 1 = 2^1 * mm->chunk_size
+ * 2 = 2^2 * mm->chunk_size
+ * ...
+ */
+struct i915_buddy_block *
+i915_buddy_alloc(struct i915_buddy_mm *mm, unsigned int order)
+{
+ struct i915_buddy_block *block = NULL;
+ unsigned int i;
+ int err;
+
+ for (i = order; i <= mm->max_order; ++i) {
+ block = list_first_entry_or_null(&mm->free_list[i],
+ struct i915_buddy_block,
+ link);
+ if (block)
+ break;
+ }
+
+ if (!block)
+ return ERR_PTR(-ENOSPC);
+
+ GEM_BUG_ON(!i915_buddy_block_is_free(block));
+
+ while (i != order) {
+ err = split_block(mm, block);
+ if (unlikely(err))
+ goto out_free;
+
+ /* Go low */
+ block = block->left;
+ i--;
+ }
+
+ mark_allocated(block);
+ kmemleak_update_trace(block);
+ return block;
+
+out_free:
+ __i915_buddy_free(mm, block);
+ return ERR_PTR(err);
+}
+
+static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
+{
+ return s1 <= e2 && e1 >= s2;
+}
+
+static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2)
+{
+ return s1 <= s2 && e1 >= e2;
+}
+
+/*
+ * Allocate range. Note that it's safe to chain together multiple alloc_ranges
+ * with the same blocks list.
+ *
+ * Intended for pre-allocating portions of the address space, for example to
+ * reserve a block for the initial framebuffer or similar, hence the expectation
+ * here is that i915_buddy_alloc() is still the main vehicle for
+ * allocations, so if that's not the case then the drm_mm range allocator is
+ * probably a much better fit, and so you should probably go use that instead.
+ */
+int i915_buddy_alloc_range(struct i915_buddy_mm *mm,
+ struct list_head *blocks,
+ u64 start, u64 size)
+{
+ struct i915_buddy_block *block;
+ struct i915_buddy_block *buddy;
+ LIST_HEAD(allocated);
+ LIST_HEAD(dfs);
+ u64 end;
+ int err;
+ int i;
+
+ if (size < mm->chunk_size)
+ return -EINVAL;
+
+ if (!IS_ALIGNED(size | start, mm->chunk_size))
+ return -EINVAL;
+
+ if (range_overflows(start, size, mm->size))
+ return -EINVAL;
+
+ for (i = 0; i < mm->n_roots; ++i)
+ list_add_tail(&mm->roots[i]->tmp_link, &dfs);
+
+ end = start + size - 1;
+
+ do {
+ u64 block_start;
+ u64 block_end;
+
+ block = list_first_entry_or_null(&dfs,
+ struct i915_buddy_block,
+ tmp_link);
+ if (!block)
+ break;
+
+ list_del(&block->tmp_link);
+
+ block_start = i915_buddy_block_offset(block);
+ block_end = block_start + i915_buddy_block_size(mm, block) - 1;
+
+ if (!overlaps(start, end, block_start, block_end))
+ continue;
+
+ if (i915_buddy_block_is_allocated(block)) {
+ err = -ENOSPC;
+ goto err_free;
+ }
+
+ if (contains(start, end, block_start, block_end)) {
+ if (!i915_buddy_block_is_free(block)) {
+ err = -ENOSPC;
+ goto err_free;
+ }
+
+ mark_allocated(block);
+ list_add_tail(&block->link, &allocated);
+ continue;
+ }
+
+ if (!i915_buddy_block_is_split(block)) {
+ err = split_block(mm, block);
+ if (unlikely(err))
+ goto err_undo;
+ }
+
+ list_add(&block->right->tmp_link, &dfs);
+ list_add(&block->left->tmp_link, &dfs);
+ } while (1);
+
+ list_splice_tail(&allocated, blocks);
+ return 0;
+
+err_undo:
+ /*
+ * We really don't want to leave around a bunch of split blocks, since
+ * bigger is better, so make sure we merge everything back before we
+ * free the allocated blocks.
+ */
+ buddy = get_buddy(block);
+ if (buddy &&
+ (i915_buddy_block_is_free(block) &&
+ i915_buddy_block_is_free(buddy)))
+ __i915_buddy_free(mm, block);
+
+err_free:
+ i915_buddy_free_list(mm, &allocated);
+ return err;
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/i915_buddy.c"
+#endif
diff --git a/drivers/gpu/drm/i915/i915_buddy.h b/drivers/gpu/drm/i915/i915_buddy.h
new file mode 100644
index 000000000000..ed41f3507cdc
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_buddy.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_BUDDY_H__
+#define __I915_BUDDY_H__
+
+#include <linux/bitops.h>
+#include <linux/list.h>
+
+struct i915_buddy_block {
+#define I915_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
+#define I915_BUDDY_HEADER_STATE GENMASK_ULL(11, 10)
+#define I915_BUDDY_ALLOCATED (1 << 10)
+#define I915_BUDDY_FREE (2 << 10)
+#define I915_BUDDY_SPLIT (3 << 10)
+#define I915_BUDDY_HEADER_ORDER GENMASK_ULL(9, 0)
+ u64 header;
+
+ struct i915_buddy_block *left;
+ struct i915_buddy_block *right;
+ struct i915_buddy_block *parent;
+
+ void *private; /* owned by creator */
+
+ /*
+ * While the block is allocated by the user through i915_buddy_alloc*,
+ * the user has ownership of the link, for example to maintain within
+ * a list, if so desired. As soon as the block is freed with
+ * i915_buddy_free* ownership is given back to the mm.
+ */
+ struct list_head link;
+ struct list_head tmp_link;
+};
+
+#define I915_BUDDY_MAX_ORDER I915_BUDDY_HEADER_ORDER
+
+/*
+ * Binary Buddy System.
+ *
+ * Locking should be handled by the user, a simple mutex around
+ * i915_buddy_alloc* and i915_buddy_free* should suffice.
+ */
+struct i915_buddy_mm {
+ /* Maintain a free list for each order. */
+ struct list_head *free_list;
+
+ /*
+ * Maintain explicit binary tree(s) to track the allocation of the
+ * address space. This gives us a simple way of finding a buddy block
+ * and performing the potentially recursive merge step when freeing a
+ * block. Nodes are either allocated or free, in which case they will
+ * also exist on the respective free list.
+ */
+ struct i915_buddy_block **roots;
+
+ /*
+ * Anything from here is public, and remains static for the lifetime of
+ * the mm. Everything above is considered do-not-touch.
+ */
+ unsigned int n_roots;
+ unsigned int max_order;
+
+ /* Must be at least PAGE_SIZE */
+ u64 chunk_size;
+ u64 size;
+};
+
+static inline u64
+i915_buddy_block_offset(struct i915_buddy_block *block)
+{
+ return block->header & I915_BUDDY_HEADER_OFFSET;
+}
+
+static inline unsigned int
+i915_buddy_block_order(struct i915_buddy_block *block)
+{
+ return block->header & I915_BUDDY_HEADER_ORDER;
+}
+
+static inline unsigned int
+i915_buddy_block_state(struct i915_buddy_block *block)
+{
+ return block->header & I915_BUDDY_HEADER_STATE;
+}
+
+static inline bool
+i915_buddy_block_is_allocated(struct i915_buddy_block *block)
+{
+ return i915_buddy_block_state(block) == I915_BUDDY_ALLOCATED;
+}
+
+static inline bool
+i915_buddy_block_is_free(struct i915_buddy_block *block)
+{
+ return i915_buddy_block_state(block) == I915_BUDDY_FREE;
+}
+
+static inline bool
+i915_buddy_block_is_split(struct i915_buddy_block *block)
+{
+ return i915_buddy_block_state(block) == I915_BUDDY_SPLIT;
+}
+
+static inline u64
+i915_buddy_block_size(struct i915_buddy_mm *mm,
+ struct i915_buddy_block *block)
+{
+ return mm->chunk_size << i915_buddy_block_order(block);
+}
+
+int i915_buddy_init(struct i915_buddy_mm *mm, u64 size, u64 chunk_size);
+
+void i915_buddy_fini(struct i915_buddy_mm *mm);
+
+struct i915_buddy_block *
+i915_buddy_alloc(struct i915_buddy_mm *mm, unsigned int order);
+
+int i915_buddy_alloc_range(struct i915_buddy_mm *mm,
+ struct list_head *blocks,
+ u64 start, u64 size);
+
+void i915_buddy_free(struct i915_buddy_mm *mm, struct i915_buddy_block *block);
+
+void i915_buddy_free_list(struct i915_buddy_mm *mm, struct list_head *objects);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index a28bcd2d7c09..24555102e198 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -28,6 +28,7 @@
#include "gt/intel_engine.h"
#include "i915_drv.h"
+#include "i915_memcpy.h"
/**
* DOC: batch buffer command parser
@@ -1352,11 +1353,10 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
- enum intel_engine_id id;
bool active = false;
/* If the command parser is not enabled, report 0 - unsupported */
- for_each_engine(engine, dev_priv, id) {
+ for_each_uabi_engine(engine, dev_priv) {
if (intel_engine_needs_cmd_parser(engine)) {
active = true;
break;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 24787bb48c9f..b0f51591f2e4 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -32,6 +32,7 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_fourcc.h>
+#include "display/intel_display_types.h"
#include "display/intel_dp.h"
#include "display/intel_fbc.h"
#include "display/intel_hdcp.h"
@@ -39,13 +40,14 @@
#include "display/intel_psr.h"
#include "gem/i915_gem_context.h"
+#include "gt/intel_gt_pm.h"
#include "gt/intel_reset.h"
#include "gt/uc/intel_guc_submission.h"
#include "i915_debugfs.h"
#include "i915_irq.h"
+#include "i915_trace.h"
#include "intel_csr.h"
-#include "intel_drv.h"
#include "intel_pm.h"
#include "intel_sideband.h"
@@ -92,7 +94,7 @@ static char get_tiling_flag(struct drm_i915_gem_object *obj)
static char get_global_flag(struct drm_i915_gem_object *obj)
{
- return obj->userfault_count ? 'g' : ' ';
+ return READ_ONCE(obj->userfault_count) ? 'g' : ' ';
}
static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
@@ -136,7 +138,6 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct intel_engine_cs *engine;
struct i915_vma *vma;
- unsigned int frontbuffer_bits;
int pin_count = 0;
seq_printf(m, "%pK: %c%c%c%c %8zdKiB %02x %02x %s%s%s",
@@ -210,9 +211,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
}
}
if (vma->fence)
- seq_printf(m, " , fence: %d%s",
- vma->fence->id,
- i915_active_request_isset(&vma->last_fence) ? "*" : "");
+ seq_printf(m, " , fence: %d", vma->fence->id);
seq_puts(m, ")");
spin_lock(&obj->vma.lock);
@@ -228,17 +227,12 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
engine = i915_gem_object_last_write_engine(obj);
if (engine)
seq_printf(m, " (%s)", engine->name);
-
- frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
- if (frontbuffer_bits)
- seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
}
struct file_stats {
struct i915_address_space *vm;
unsigned long count;
u64 total, unbound;
- u64 global, shared;
u64 active, inactive;
u64 closed;
};
@@ -249,73 +243,68 @@ static int per_file_stats(int id, void *ptr, void *data)
struct file_stats *stats = data;
struct i915_vma *vma;
- lockdep_assert_held(&obj->base.dev->struct_mutex);
-
stats->count++;
stats->total += obj->base.size;
if (!atomic_read(&obj->bind_count))
stats->unbound += obj->base.size;
- if (obj->base.name || obj->base.dma_buf)
- stats->shared += obj->base.size;
-
- list_for_each_entry(vma, &obj->vma.list, obj_link) {
- if (!drm_mm_node_allocated(&vma->node))
- continue;
- if (i915_vma_is_ggtt(vma)) {
- stats->global += vma->node.size;
- } else {
- if (vma->vm != stats->vm)
+ spin_lock(&obj->vma.lock);
+ if (!stats->vm) {
+ for_each_ggtt_vma(vma, obj) {
+ if (!drm_mm_node_allocated(&vma->node))
continue;
- }
- if (i915_vma_is_active(vma))
- stats->active += vma->node.size;
- else
- stats->inactive += vma->node.size;
+ if (i915_vma_is_active(vma))
+ stats->active += vma->node.size;
+ else
+ stats->inactive += vma->node.size;
- if (i915_vma_is_closed(vma))
- stats->closed += vma->node.size;
+ if (i915_vma_is_closed(vma))
+ stats->closed += vma->node.size;
+ }
+ } else {
+ struct rb_node *p = obj->vma.tree.rb_node;
+
+ while (p) {
+ long cmp;
+
+ vma = rb_entry(p, typeof(*vma), obj_node);
+ cmp = i915_vma_compare(vma, stats->vm, NULL);
+ if (cmp == 0) {
+ if (drm_mm_node_allocated(&vma->node)) {
+ if (i915_vma_is_active(vma))
+ stats->active += vma->node.size;
+ else
+ stats->inactive += vma->node.size;
+
+ if (i915_vma_is_closed(vma))
+ stats->closed += vma->node.size;
+ }
+ break;
+ }
+ if (cmp < 0)
+ p = p->rb_right;
+ else
+ p = p->rb_left;
+ }
}
+ spin_unlock(&obj->vma.lock);
return 0;
}
#define print_file_stats(m, name, stats) do { \
if (stats.count) \
- seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound, %llu closed)\n", \
+ seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu unbound, %llu closed)\n", \
name, \
stats.count, \
stats.total, \
stats.active, \
stats.inactive, \
- stats.global, \
- stats.shared, \
stats.unbound, \
stats.closed); \
} while (0)
-static void print_batch_pool_stats(struct seq_file *m,
- struct drm_i915_private *dev_priv)
-{
- struct drm_i915_gem_object *obj;
- struct intel_engine_cs *engine;
- struct file_stats stats = {};
- enum intel_engine_id id;
- int j;
-
- for_each_engine(engine, dev_priv, id) {
- for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
- list_for_each_entry(obj,
- &engine->batch_pool.cache_list[j],
- batch_pool_link)
- per_file_stats(0, obj, &stats);
- }
- }
-
- print_file_stats(m, "[k]batch pool", stats);
-}
-
static void print_context_stats(struct seq_file *m,
struct drm_i915_private *i915)
{
@@ -328,10 +317,14 @@ static void print_context_stats(struct seq_file *m,
for_each_gem_engine(ce,
i915_gem_context_lock_engines(ctx), it) {
- if (ce->state)
- per_file_stats(0, ce->state->obj, &kstats);
- if (ce->ring)
+ intel_context_lock_pinned(ce);
+ if (intel_context_is_pinned(ce)) {
+ if (ce->state)
+ per_file_stats(0,
+ ce->state->obj, &kstats);
per_file_stats(0, ce->ring->vma->obj, &kstats);
+ }
+ intel_context_unlock_pinned(ce);
}
i915_gem_context_unlock_engines(ctx);
@@ -363,8 +356,9 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
struct drm_i915_private *i915 = node_to_i915(m->private);
int ret;
- seq_printf(m, "%u shrinkable objects, %llu bytes\n",
+ seq_printf(m, "%u shrinkable [%u free] objects, %llu bytes\n",
i915->mm.shrink_count,
+ atomic_read(&i915->mm.free_count),
i915->mm.shrink_memory);
seq_putc(m, '\n');
@@ -373,58 +367,12 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
if (ret)
return ret;
- print_batch_pool_stats(m, i915);
print_context_stats(m, i915);
mutex_unlock(&i915->drm.struct_mutex);
return 0;
}
-static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct drm_i915_gem_object *obj;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int total = 0;
- int ret, j;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- for_each_engine(engine, dev_priv, id) {
- for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
- int count;
-
- count = 0;
- list_for_each_entry(obj,
- &engine->batch_pool.cache_list[j],
- batch_pool_link)
- count++;
- seq_printf(m, "%s cache[%d]: %d objects\n",
- engine->name, j, count);
-
- list_for_each_entry(obj,
- &engine->batch_pool.cache_list[j],
- batch_pool_link) {
- seq_puts(m, " ");
- describe_obj(m, obj);
- seq_putc(m, '\n');
- }
-
- total += count;
- }
- }
-
- seq_printf(m, "total: %d\n", total);
-
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
static void gen8_display_interrupt_info(struct seq_file *m)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -481,7 +429,6 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_engine_cs *engine;
- enum intel_engine_id id;
intel_wakeref_t wakeref;
int i, pipe;
@@ -684,7 +631,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
} else if (INTEL_GEN(dev_priv) >= 6) {
- for_each_engine(engine, dev_priv, id) {
+ for_each_uabi_engine(engine, dev_priv) {
seq_printf(m,
"Graphics Interrupt mask (%s): %08x\n",
engine->name, ENGINE_READ(engine, RING_IMR));
@@ -705,10 +652,11 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
rcu_read_lock();
for (i = 0; i < i915->ggtt.num_fences; i++) {
- struct i915_vma *vma = i915->ggtt.fence_regs[i].vma;
+ struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
+ struct i915_vma *vma = reg->vma;
seq_printf(m, "Fence %d, pin count = %d, object = ",
- i, i915->ggtt.fence_regs[i].pin_count);
+ i, atomic_read(&reg->pin_count));
if (!vma)
seq_puts(m, "unused");
else
@@ -1195,7 +1143,7 @@ static int i915_forcewake_domains(struct seq_file *m, void *data)
unsigned int tmp;
seq_printf(m, "user.bypass_count = %u\n",
- uncore->user_forcewake.count);
+ uncore->user_forcewake_count);
for_each_fw_domain(fw_domain, uncore, tmp)
seq_printf(m, "%s.wake_count = %u\n",
@@ -1488,30 +1436,6 @@ static int i915_sr_status(struct seq_file *m, void *unused)
return 0;
}
-static int i915_emon_status(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *i915 = node_to_i915(m->private);
- intel_wakeref_t wakeref;
-
- if (!IS_GEN(i915, 5))
- return -ENODEV;
-
- with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- unsigned long temp, chipset, gfx;
-
- temp = i915_mch_val(i915);
- chipset = i915_chipset_val(i915);
- gfx = i915_gfx_val(i915);
-
- seq_printf(m, "GMCH temp: %ld\n", temp);
- seq_printf(m, "Chipset power: %ld\n", chipset);
- seq_printf(m, "GFX power: %ld\n", gfx);
- seq_printf(m, "Total power: %ld\n", chipset + gfx);
- }
-
- return 0;
-}
-
static int i915_ring_freq_table(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -1677,12 +1601,15 @@ static int i915_context_status(struct seq_file *m, void *unused)
for_each_gem_engine(ce,
i915_gem_context_lock_engines(ctx), it) {
- seq_printf(m, "%s: ", ce->engine->name);
- if (ce->state)
- describe_obj(m, ce->state->obj);
- if (ce->ring)
+ intel_context_lock_pinned(ce);
+ if (intel_context_is_pinned(ce)) {
+ seq_printf(m, "%s: ", ce->engine->name);
+ if (ce->state)
+ describe_obj(m, ce->state->obj);
describe_ctx_ring(m, ce->ring);
- seq_putc(m, '\n');
+ seq_putc(m, '\n');
+ }
+ intel_context_unlock_pinned(ce);
}
i915_gem_context_unlock_engines(ctx);
@@ -1951,32 +1878,11 @@ static void i915_guc_log_info(struct seq_file *m,
}
}
-static void i915_guc_client_info(struct seq_file *m,
- struct drm_i915_private *dev_priv,
- struct intel_guc_client *client)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- u64 tot = 0;
-
- seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
- client->priority, client->stage_id, client->proc_desc_offset);
- seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
- client->doorbell_id, client->doorbell_offset);
-
- for_each_engine(engine, dev_priv, id) {
- u64 submissions = client->submissions[id];
- tot += submissions;
- seq_printf(m, "\tSubmissions: %llu %s\n",
- submissions, engine->name);
- }
- seq_printf(m, "\tTotal: %llu\n", tot);
-}
-
static int i915_guc_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
const struct intel_guc *guc = &dev_priv->gt.uc.guc;
+ struct intel_guc_client *client = guc->execbuf_client;
if (!USES_GUC(dev_priv))
return -ENODEV;
@@ -1992,9 +1898,13 @@ static int i915_guc_info(struct seq_file *m, void *data)
seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
- seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
- i915_guc_client_info(m, dev_priv, guc->execbuf_client);
-
+ seq_printf(m, "\nGuC execbuf client @ %p:\n", client);
+ seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
+ client->priority,
+ client->stage_id,
+ client->proc_desc_offset);
+ seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
+ client->doorbell_id, client->doorbell_offset);
/* Add more as required ... */
return 0;
@@ -2005,7 +1915,6 @@ static int i915_guc_stage_pool(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
const struct intel_guc *guc = &dev_priv->gt.uc.guc;
struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
- intel_engine_mask_t tmp;
int index;
if (!USES_GUC_SUBMISSION(dev_priv))
@@ -2034,7 +1943,7 @@ static int i915_guc_stage_pool(struct seq_file *m, void *data)
desc->wq_addr, desc->wq_size);
seq_putc(m, '\n');
- for_each_engine(engine, dev_priv, tmp) {
+ for_each_uabi_engine(engine, dev_priv) {
u32 guc_engine_id = engine->guc_id;
struct guc_execlist_context *lrc =
&desc->lrc[guc_engine_id];
@@ -2066,7 +1975,7 @@ static int i915_guc_log_dump(struct seq_file *m, void *data)
return -ENODEV;
if (dump_load_err)
- obj = dev_priv->gt.uc.guc.load_err_log;
+ obj = dev_priv->gt.uc.load_err_log;
else if (dev_priv->gt.uc.guc.log.vma)
obj = dev_priv->gt.uc.guc.log.vma->obj;
@@ -2120,14 +2029,16 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
{
- struct drm_i915_private *dev_priv = inode->i_private;
+ struct drm_i915_private *i915 = inode->i_private;
+ struct intel_guc *guc = &i915->gt.uc.guc;
+ struct intel_guc_log *log = &guc->log;
- if (!USES_GUC(dev_priv))
+ if (!intel_guc_is_running(guc))
return -ENODEV;
- file->private_data = &dev_priv->gt.uc.guc.log;
+ file->private_data = log;
- return intel_guc_log_relay_open(&dev_priv->gt.uc.guc.log);
+ return intel_guc_log_relay_open(log);
}
static ssize_t
@@ -2139,16 +2050,15 @@ i915_guc_log_relay_write(struct file *filp,
struct intel_guc_log *log = filp->private_data;
intel_guc_log_relay_flush(log);
-
return cnt;
}
static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
{
- struct drm_i915_private *dev_priv = inode->i_private;
-
- intel_guc_log_relay_close(&dev_priv->gt.uc.guc.log);
+ struct drm_i915_private *i915 = inode->i_private;
+ struct intel_guc *guc = &i915->gt.uc.guc;
+ intel_guc_log_relay_close(&guc->log);
return 0;
}
@@ -2465,6 +2375,7 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
intel_wakeref_t wakeref;
struct intel_csr *csr;
+ i915_reg_t dc5_reg, dc6_reg = {};
if (!HAS_CSR(dev_priv))
return -ENODEV;
@@ -2482,15 +2393,19 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version));
- if (WARN_ON(INTEL_GEN(dev_priv) > 11))
- goto out;
+ if (INTEL_GEN(dev_priv) >= 12) {
+ dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
+ dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
+ } else {
+ dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
+ SKL_CSR_DC3_DC5_COUNT;
+ if (!IS_GEN9_LP(dev_priv))
+ dc6_reg = SKL_CSR_DC5_DC6_COUNT;
+ }
- seq_printf(m, "DC3 -> DC5 count: %d\n",
- I915_READ(IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
- SKL_CSR_DC3_DC5_COUNT));
- if (!IS_GEN9_LP(dev_priv))
- seq_printf(m, "DC5 -> DC6 count: %d\n",
- I915_READ(SKL_CSR_DC5_DC6_COUNT));
+ seq_printf(m, "DC3 -> DC5 count: %d\n", I915_READ(dc5_reg));
+ if (dc6_reg.reg)
+ seq_printf(m, "DC5 -> DC6 count: %d\n", I915_READ(dc6_reg));
out:
seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
@@ -2867,7 +2782,6 @@ static int i915_engine_info(struct seq_file *m, void *unused)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_engine_cs *engine;
intel_wakeref_t wakeref;
- enum intel_engine_id id;
struct drm_printer p;
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
@@ -2879,7 +2793,7 @@ static int i915_engine_info(struct seq_file *m, void *unused)
RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
p = drm_seq_file_printer(m);
- for_each_engine(engine, dev_priv, id)
+ for_each_uabi_engine(engine, dev_priv)
intel_engine_dump(engine, &p, "%s\n", engine->name);
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
@@ -2960,9 +2874,8 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
struct intel_engine_cs *engine;
- enum intel_engine_id id;
- for_each_engine(engine, i915, id) {
+ for_each_uabi_engine(engine, i915) {
const struct i915_wa_list *wal = &engine->ctx_wa_list;
const struct i915_wa *wa;
unsigned int count;
@@ -3730,6 +3643,9 @@ i915_drop_caches_set(void *data, u64 val)
i915_retire_requests(i915);
mutex_unlock(&i915->drm.struct_mutex);
+
+ if (ret == 0 && val & DROP_IDLE)
+ ret = intel_gt_pm_wait_for_idle(&i915->gt);
}
if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(&i915->gt))
@@ -4379,7 +4295,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_objects", i915_gem_object_info, 0},
{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
{"i915_gem_interrupt", i915_interrupt_info, 0},
- {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
{"i915_guc_info", i915_guc_info, 0},
{"i915_guc_load_status", i915_guc_load_status_info, 0},
{"i915_guc_log_dump", i915_guc_log_dump, 0},
@@ -4389,7 +4304,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_frequency_info", i915_frequency_info, 0},
{"i915_hangcheck_info", i915_hangcheck_info, 0},
{"i915_drpc_info", i915_drpc_info, 0},
- {"i915_emon_status", i915_emon_status, 0},
{"i915_ring_freq_table", i915_ring_freq_table, 0},
{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
{"i915_fbc_status", i915_fbc_status, 0},
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f2d3d754af37..b5b2a64753e6 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -51,6 +51,7 @@
#include "display/intel_audio.h"
#include "display/intel_bw.h"
#include "display/intel_cdclk.h"
+#include "display/intel_display_types.h"
#include "display/intel_dp.h"
#include "display/intel_fbdev.h"
#include "display/intel_gmbus.h"
@@ -63,442 +64,83 @@
#include "gem/i915_gem_ioctls.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
-#include "gt/intel_reset.h"
-#include "gt/intel_workarounds.h"
-#include "gt/uc/intel_uc.h"
#include "i915_debugfs.h"
#include "i915_drv.h"
#include "i915_irq.h"
-#include "i915_pmu.h"
+#include "i915_memcpy.h"
+#include "i915_perf.h"
#include "i915_query.h"
+#include "i915_suspend.h"
+#include "i915_sysfs.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
#include "intel_csr.h"
-#include "intel_drv.h"
#include "intel_pm.h"
static struct drm_driver driver;
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
-static unsigned int i915_probe_fail_count;
-
-bool __i915_inject_probe_failure(const char *func, int line)
-{
- if (i915_probe_fail_count >= i915_modparams.inject_load_failure)
- return false;
-
- if (++i915_probe_fail_count == i915_modparams.inject_load_failure) {
- DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
- i915_modparams.inject_load_failure, func, line);
- i915_modparams.inject_load_failure = 0;
- return true;
- }
-
- return false;
-}
-
-bool i915_error_injected(void)
-{
- return i915_probe_fail_count && !i915_modparams.inject_load_failure;
-}
-
-#endif
-
-#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
-#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
- "providing the dmesg log by booting with drm.debug=0xf"
-
-void
-__i915_printk(struct drm_i915_private *dev_priv, const char *level,
- const char *fmt, ...)
-{
- static bool shown_bug_once;
- struct device *kdev = dev_priv->drm.dev;
- bool is_error = level[1] <= KERN_ERR[1];
- bool is_debug = level[1] == KERN_DEBUG[1];
- struct va_format vaf;
- va_list args;
-
- if (is_debug && !(drm_debug & DRM_UT_DRIVER))
- return;
-
- va_start(args, fmt);
-
- vaf.fmt = fmt;
- vaf.va = &args;
-
- if (is_error)
- dev_printk(level, kdev, "%pV", &vaf);
- else
- dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV",
- __builtin_return_address(0), &vaf);
-
- va_end(args);
-
- if (is_error && !shown_bug_once) {
- /*
- * Ask the user to file a bug report for the error, except
- * if they may have caused the bug by fiddling with unsafe
- * module parameters.
- */
- if (!test_taint(TAINT_USER))
- dev_notice(kdev, "%s", FDO_BUG_MSG);
- shown_bug_once = true;
- }
-}
-
-/* Map PCH device id to PCH type, or PCH_NONE if unknown. */
-static enum intel_pch
-intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
-{
- switch (id) {
- case INTEL_PCH_IBX_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
- WARN_ON(!IS_GEN(dev_priv, 5));
- return PCH_IBX;
- case INTEL_PCH_CPT_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found CougarPoint PCH\n");
- WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
- return PCH_CPT;
- case INTEL_PCH_PPT_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found PantherPoint PCH\n");
- WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
- /* PantherPoint is CPT compatible */
- return PCH_CPT;
- case INTEL_PCH_LPT_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found LynxPoint PCH\n");
- WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
- WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
- return PCH_LPT;
- case INTEL_PCH_LPT_LP_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
- WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
- WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
- return PCH_LPT;
- case INTEL_PCH_WPT_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found WildcatPoint PCH\n");
- WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
- WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
- /* WildcatPoint is LPT compatible */
- return PCH_LPT;
- case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found WildcatPoint LP PCH\n");
- WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
- WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
- /* WildcatPoint is LPT compatible */
- return PCH_LPT;
- case INTEL_PCH_SPT_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
- WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
- return PCH_SPT;
- case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
- WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
- return PCH_SPT;
- case INTEL_PCH_KBP_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n");
- WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
- !IS_COFFEELAKE(dev_priv));
- /* KBP is SPT compatible */
- return PCH_SPT;
- case INTEL_PCH_CNP_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found Cannon Lake PCH (CNP)\n");
- WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
- return PCH_CNP;
- case INTEL_PCH_CNP_LP_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found Cannon Lake LP PCH (CNP-LP)\n");
- WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
- return PCH_CNP;
- case INTEL_PCH_CMP_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found Comet Lake PCH (CMP)\n");
- WARN_ON(!IS_COFFEELAKE(dev_priv));
- /* CometPoint is CNP Compatible */
- return PCH_CNP;
- case INTEL_PCH_ICP_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found Ice Lake PCH\n");
- WARN_ON(!IS_ICELAKE(dev_priv));
- return PCH_ICP;
- case INTEL_PCH_MCC_DEVICE_ID_TYPE:
- case INTEL_PCH_MCC2_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found Mule Creek Canyon PCH\n");
- WARN_ON(!IS_ELKHARTLAKE(dev_priv));
- return PCH_MCC;
- case INTEL_PCH_TGP_DEVICE_ID_TYPE:
- DRM_DEBUG_KMS("Found Tiger Lake LP PCH\n");
- WARN_ON(!IS_TIGERLAKE(dev_priv));
- return PCH_TGP;
- default:
- return PCH_NONE;
- }
-}
-
-static bool intel_is_virt_pch(unsigned short id,
- unsigned short svendor, unsigned short sdevice)
-{
- return (id == INTEL_PCH_P2X_DEVICE_ID_TYPE ||
- id == INTEL_PCH_P3X_DEVICE_ID_TYPE ||
- (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE &&
- svendor == PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
- sdevice == PCI_SUBDEVICE_ID_QEMU));
-}
-
-static unsigned short
-intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
-{
- unsigned short id = 0;
-
- /*
- * In a virtualized passthrough environment we can be in a
- * setup where the ISA bridge is not able to be passed through.
- * In this case, a south bridge can be emulated and we have to
- * make an educated guess as to which PCH is really there.
- */
-
- if (IS_TIGERLAKE(dev_priv))
- id = INTEL_PCH_TGP_DEVICE_ID_TYPE;
- else if (IS_ELKHARTLAKE(dev_priv))
- id = INTEL_PCH_MCC_DEVICE_ID_TYPE;
- else if (IS_ICELAKE(dev_priv))
- id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
- else if (IS_CANNONLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
- id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
- else if (IS_KABYLAKE(dev_priv) || IS_SKYLAKE(dev_priv))
- id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
- else if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
- id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
- else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- id = INTEL_PCH_LPT_DEVICE_ID_TYPE;
- else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
- id = INTEL_PCH_CPT_DEVICE_ID_TYPE;
- else if (IS_GEN(dev_priv, 5))
- id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
-
- if (id)
- DRM_DEBUG_KMS("Assuming PCH ID %04x\n", id);
- else
- DRM_DEBUG_KMS("Assuming no PCH\n");
-
- return id;
-}
-
-static void intel_detect_pch(struct drm_i915_private *dev_priv)
-{
- struct pci_dev *pch = NULL;
-
- /*
- * The reason to probe ISA bridge instead of Dev31:Fun0 is to
- * make graphics device passthrough work easy for VMM, that only
- * need to expose ISA bridge to let driver know the real hardware
- * underneath. This is a requirement from virtualization team.
- *
- * In some virtualized environments (e.g. XEN), there is irrelevant
- * ISA bridge in the system. To work reliably, we should scan trhough
- * all the ISA bridge devices and check for the first match, instead
- * of only checking the first one.
- */
- while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
- unsigned short id;
- enum intel_pch pch_type;
-
- if (pch->vendor != PCI_VENDOR_ID_INTEL)
- continue;
-
- id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
-
- pch_type = intel_pch_type(dev_priv, id);
- if (pch_type != PCH_NONE) {
- dev_priv->pch_type = pch_type;
- dev_priv->pch_id = id;
- break;
- } else if (intel_is_virt_pch(id, pch->subsystem_vendor,
- pch->subsystem_device)) {
- id = intel_virt_detect_pch(dev_priv);
- pch_type = intel_pch_type(dev_priv, id);
-
- /* Sanity check virtual PCH id */
- if (WARN_ON(id && pch_type == PCH_NONE))
- id = 0;
-
- dev_priv->pch_type = pch_type;
- dev_priv->pch_id = id;
- break;
- }
- }
-
- /*
- * Use PCH_NOP (PCH but no South Display) for PCH platforms without
- * display.
- */
- if (pch && !HAS_DISPLAY(dev_priv)) {
- DRM_DEBUG_KMS("Display disabled, reverting to NOP PCH\n");
- dev_priv->pch_type = PCH_NOP;
- dev_priv->pch_id = 0;
- }
-
- if (!pch)
- DRM_DEBUG_KMS("No PCH found.\n");
-
- pci_dev_put(pch);
-}
-
-static int i915_getparam_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct pci_dev *pdev = dev_priv->drm.pdev;
- const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
- drm_i915_getparam_t *param = data;
- int value;
-
- switch (param->param) {
- case I915_PARAM_IRQ_ACTIVE:
- case I915_PARAM_ALLOW_BATCHBUFFER:
- case I915_PARAM_LAST_DISPATCH:
- case I915_PARAM_HAS_EXEC_CONSTANTS:
- /* Reject all old ums/dri params. */
- return -ENODEV;
- case I915_PARAM_CHIPSET_ID:
- value = pdev->device;
- break;
- case I915_PARAM_REVISION:
- value = pdev->revision;
- break;
- case I915_PARAM_NUM_FENCES_AVAIL:
- value = dev_priv->ggtt.num_fences;
- break;
- case I915_PARAM_HAS_OVERLAY:
- value = dev_priv->overlay ? 1 : 0;
- break;
- case I915_PARAM_HAS_BSD:
- value = !!dev_priv->engine[VCS0];
- break;
- case I915_PARAM_HAS_BLT:
- value = !!dev_priv->engine[BCS0];
- break;
- case I915_PARAM_HAS_VEBOX:
- value = !!dev_priv->engine[VECS0];
- break;
- case I915_PARAM_HAS_BSD2:
- value = !!dev_priv->engine[VCS1];
- break;
- case I915_PARAM_HAS_LLC:
- value = HAS_LLC(dev_priv);
- break;
- case I915_PARAM_HAS_WT:
- value = HAS_WT(dev_priv);
- break;
- case I915_PARAM_HAS_ALIASING_PPGTT:
- value = INTEL_PPGTT(dev_priv);
- break;
- case I915_PARAM_HAS_SEMAPHORES:
- value = !!(dev_priv->caps.scheduler & I915_SCHEDULER_CAP_SEMAPHORES);
- break;
- case I915_PARAM_HAS_SECURE_BATCHES:
- value = capable(CAP_SYS_ADMIN);
- break;
- case I915_PARAM_CMD_PARSER_VERSION:
- value = i915_cmd_parser_get_version(dev_priv);
- break;
- case I915_PARAM_SUBSLICE_TOTAL:
- value = intel_sseu_subslice_total(sseu);
- if (!value)
- return -ENODEV;
- break;
- case I915_PARAM_EU_TOTAL:
- value = sseu->eu_total;
- if (!value)
- return -ENODEV;
- break;
- case I915_PARAM_HAS_GPU_RESET:
- value = i915_modparams.enable_hangcheck &&
- intel_has_gpu_reset(dev_priv);
- if (value && intel_has_reset_engine(dev_priv))
- value = 2;
- break;
- case I915_PARAM_HAS_RESOURCE_STREAMER:
- value = 0;
- break;
- case I915_PARAM_HAS_POOLED_EU:
- value = HAS_POOLED_EU(dev_priv);
- break;
- case I915_PARAM_MIN_EU_IN_POOL:
- value = sseu->min_eu_in_pool;
- break;
- case I915_PARAM_HUC_STATUS:
- value = intel_huc_check_status(&dev_priv->gt.uc.huc);
- if (value < 0)
- return value;
- break;
- case I915_PARAM_MMAP_GTT_VERSION:
- /* Though we've started our numbering from 1, and so class all
- * earlier versions as 0, in effect their value is undefined as
- * the ioctl will report EINVAL for the unknown param!
- */
- value = i915_gem_mmap_gtt_version();
- break;
- case I915_PARAM_HAS_SCHEDULER:
- value = dev_priv->caps.scheduler;
- break;
-
- case I915_PARAM_MMAP_VERSION:
- /* Remember to bump this if the version changes! */
- case I915_PARAM_HAS_GEM:
- case I915_PARAM_HAS_PAGEFLIPPING:
- case I915_PARAM_HAS_EXECBUF2: /* depends on GEM */
- case I915_PARAM_HAS_RELAXED_FENCING:
- case I915_PARAM_HAS_COHERENT_RINGS:
- case I915_PARAM_HAS_RELAXED_DELTA:
- case I915_PARAM_HAS_GEN7_SOL_RESET:
- case I915_PARAM_HAS_WAIT_TIMEOUT:
- case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
- case I915_PARAM_HAS_PINNED_BATCHES:
- case I915_PARAM_HAS_EXEC_NO_RELOC:
- case I915_PARAM_HAS_EXEC_HANDLE_LUT:
- case I915_PARAM_HAS_COHERENT_PHYS_GTT:
- case I915_PARAM_HAS_EXEC_SOFTPIN:
- case I915_PARAM_HAS_EXEC_ASYNC:
- case I915_PARAM_HAS_EXEC_FENCE:
- case I915_PARAM_HAS_EXEC_CAPTURE:
- case I915_PARAM_HAS_EXEC_BATCH_FIRST:
- case I915_PARAM_HAS_EXEC_FENCE_ARRAY:
- case I915_PARAM_HAS_EXEC_SUBMIT_FENCE:
- /* For the time being all of these are always true;
- * if some supported hardware does not have one of these
- * features this value needs to be provided from
- * INTEL_INFO(), a feature macro, or similar.
- */
- value = 1;
- break;
- case I915_PARAM_HAS_CONTEXT_ISOLATION:
- value = intel_engines_has_context_isolation(dev_priv);
- break;
- case I915_PARAM_SLICE_MASK:
- value = sseu->slice_mask;
- if (!value)
- return -ENODEV;
- break;
- case I915_PARAM_SUBSLICE_MASK:
- value = sseu->subslice_mask[0];
- if (!value)
- return -ENODEV;
- break;
- case I915_PARAM_CS_TIMESTAMP_FREQUENCY:
- value = 1000 * RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz;
- break;
- case I915_PARAM_MMAP_GTT_COHERENT:
- value = INTEL_INFO(dev_priv)->has_coherent_ggtt;
- break;
- default:
- DRM_DEBUG("Unknown parameter %d\n", param->param);
- return -EINVAL;
- }
-
- if (put_user(value, param->value))
- return -EFAULT;
-
- return 0;
-}
+struct vlv_s0ix_state {
+ /* GAM */
+ u32 wr_watermark;
+ u32 gfx_prio_ctrl;
+ u32 arb_mode;
+ u32 gfx_pend_tlb0;
+ u32 gfx_pend_tlb1;
+ u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
+ u32 media_max_req_count;
+ u32 gfx_max_req_count;
+ u32 render_hwsp;
+ u32 ecochk;
+ u32 bsd_hwsp;
+ u32 blt_hwsp;
+ u32 tlb_rd_addr;
+
+ /* MBC */
+ u32 g3dctl;
+ u32 gsckgctl;
+ u32 mbctl;
+
+ /* GCP */
+ u32 ucgctl1;
+ u32 ucgctl3;
+ u32 rcgctl1;
+ u32 rcgctl2;
+ u32 rstctl;
+ u32 misccpctl;
+
+ /* GPM */
+ u32 gfxpause;
+ u32 rpdeuhwtc;
+ u32 rpdeuc;
+ u32 ecobus;
+ u32 pwrdwnupctl;
+ u32 rp_down_timeout;
+ u32 rp_deucsw;
+ u32 rcubmabdtmr;
+ u32 rcedata;
+ u32 spare2gh;
+
+ /* Display 1 CZ domain */
+ u32 gt_imr;
+ u32 gt_ier;
+ u32 pm_imr;
+ u32 pm_ier;
+ u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];
+
+ /* GT SA CZ domain */
+ u32 tilectl;
+ u32 gt_fifoctl;
+ u32 gtlc_wake_ctrl;
+ u32 gtlc_survive;
+ u32 pmwgicz;
+
+ /* Display 2 CZ domain */
+ u32 gu_ctl0;
+ u32 gu_ctl1;
+ u32 pcbr;
+ u32 clock_gate_dis2;
+};
static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
{
@@ -640,39 +282,45 @@ static unsigned int i915_vga_set_decode(void *cookie, bool state)
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}
-static int i915_resume_switcheroo(struct drm_device *dev);
-static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
+static int i915_resume_switcheroo(struct drm_i915_private *i915);
+static int i915_suspend_switcheroo(struct drm_i915_private *i915,
+ pm_message_t state);
static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
{
- struct drm_device *dev = pci_get_drvdata(pdev);
+ struct drm_i915_private *i915 = pdev_to_i915(pdev);
pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
+ if (!i915) {
+ dev_err(&pdev->dev, "DRM not initialized, aborting switch.\n");
+ return;
+ }
+
if (state == VGA_SWITCHEROO_ON) {
pr_info("switched on\n");
- dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+ i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING;
/* i915 resume handler doesn't set to D0 */
pci_set_power_state(pdev, PCI_D0);
- i915_resume_switcheroo(dev);
- dev->switch_power_state = DRM_SWITCH_POWER_ON;
+ i915_resume_switcheroo(i915);
+ i915->drm.switch_power_state = DRM_SWITCH_POWER_ON;
} else {
pr_info("switched off\n");
- dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- i915_suspend_switcheroo(dev, pmm);
- dev->switch_power_state = DRM_SWITCH_POWER_OFF;
+ i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING;
+ i915_suspend_switcheroo(i915, pmm);
+ i915->drm.switch_power_state = DRM_SWITCH_POWER_OFF;
}
}
static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
{
- struct drm_device *dev = pci_get_drvdata(pdev);
+ struct drm_i915_private *i915 = pdev_to_i915(pdev);
/*
* FIXME: open_count is protected by drm_global_mutex but that would lead to
* locking inversion with the driver load path. And the access here is
* completely racy anyway. So don't bother with locking for now.
*/
- return dev->open_count == 0;
+ return i915 && i915->drm.open_count == 0;
}
static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
@@ -687,7 +335,7 @@ static int i915_driver_modeset_probe(struct drm_device *dev)
struct pci_dev *pdev = dev_priv->drm.pdev;
int ret;
- if (i915_inject_probe_failure())
+ if (i915_inject_probe_failure(dev_priv))
return -ENODEV;
if (HAS_DISPLAY(dev_priv)) {
@@ -880,6 +528,29 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
}
}
+static int vlv_alloc_s0ix_state(struct drm_i915_private *i915)
+{
+ if (!IS_VALLEYVIEW(i915))
+ return 0;
+
+ /* we write all the values in the struct, so no need to zero it out */
+ i915->vlv_s0ix_state = kmalloc(sizeof(*i915->vlv_s0ix_state),
+ GFP_KERNEL);
+ if (!i915->vlv_s0ix_state)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void vlv_free_s0ix_state(struct drm_i915_private *i915)
+{
+ if (!i915->vlv_s0ix_state)
+ return;
+
+ kfree(i915->vlv_s0ix_state);
+ i915->vlv_s0ix_state = NULL;
+}
+
/**
* i915_driver_early_probe - setup state not requiring device access
* @dev_priv: device private
@@ -894,11 +565,12 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
{
int ret = 0;
- if (i915_inject_probe_failure())
+ if (i915_inject_probe_failure(dev_priv))
return -ENODEV;
intel_device_info_subplatform_init(dev_priv);
+ intel_uncore_mmio_debug_init_early(&dev_priv->mmio_debug);
intel_uncore_init_early(&dev_priv->uncore, dev_priv);
spin_lock_init(&dev_priv->irq_lock);
@@ -921,22 +593,26 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
if (ret < 0)
return ret;
+ ret = vlv_alloc_s0ix_state(dev_priv);
+ if (ret < 0)
+ goto err_workqueues;
+
+ intel_wopcm_init_early(&dev_priv->wopcm);
+
intel_gt_init_early(&dev_priv->gt, dev_priv);
ret = i915_gem_init_early(dev_priv);
if (ret < 0)
- goto err_workqueues;
+ goto err_gt;
/* This must be called before any calls to HAS_PCH_* */
intel_detect_pch(dev_priv);
- intel_wopcm_init_early(&dev_priv->wopcm);
- intel_uc_init_early(&dev_priv->gt.uc);
intel_pm_setup(dev_priv);
intel_init_dpio(dev_priv);
ret = intel_power_domains_init(dev_priv);
if (ret < 0)
- goto err_uc;
+ goto err_gem;
intel_irq_init(dev_priv);
intel_init_display_hooks(dev_priv);
intel_init_clock_gating_hooks(dev_priv);
@@ -947,9 +623,11 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
return 0;
-err_uc:
- intel_uc_cleanup_early(&dev_priv->gt.uc);
+err_gem:
i915_gem_cleanup_early(dev_priv);
+err_gt:
+ intel_gt_driver_late_release(&dev_priv->gt);
+ vlv_free_s0ix_state(dev_priv);
err_workqueues:
i915_workqueues_cleanup(dev_priv);
return ret;
@@ -964,8 +642,9 @@ static void i915_driver_late_release(struct drm_i915_private *dev_priv)
{
intel_irq_fini(dev_priv);
intel_power_domains_cleanup(dev_priv);
- intel_uc_cleanup_early(&dev_priv->gt.uc);
i915_gem_cleanup_early(dev_priv);
+ intel_gt_driver_late_release(&dev_priv->gt);
+ vlv_free_s0ix_state(dev_priv);
i915_workqueues_cleanup(dev_priv);
pm_qos_remove_request(&dev_priv->sb_qos);
@@ -985,7 +664,7 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
{
int ret;
- if (i915_inject_probe_failure())
+ if (i915_inject_probe_failure(dev_priv))
return -ENODEV;
if (i915_get_bridge_dev(dev_priv))
@@ -1515,7 +1194,8 @@ static void edram_detect(struct drm_i915_private *dev_priv)
dev_priv->edram_size_mb =
gen9_edram_size_mb(dev_priv, edram_cap);
- DRM_INFO("Found %uMB of eDRAM\n", dev_priv->edram_size_mb);
+ dev_info(dev_priv->drm.dev,
+ "Found %uMB of eDRAM\n", dev_priv->edram_size_mb);
}
/**
@@ -1530,7 +1210,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
struct pci_dev *pdev = dev_priv->drm.pdev;
int ret;
- if (i915_inject_probe_failure())
+ if (i915_inject_probe_failure(dev_priv))
return -ENODEV;
intel_device_info_runtime_init(dev_priv);
@@ -1712,7 +1392,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
- i915_gem_shrinker_register(dev_priv);
+ i915_gem_driver_register(dev_priv);
i915_pmu_register(dev_priv);
/*
@@ -1792,7 +1472,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
i915_teardown_sysfs(dev_priv);
drm_dev_unplug(&dev_priv->drm);
- i915_gem_shrinker_unregister(dev_priv);
+ i915_gem_driver_unregister(dev_priv);
}
static void i915_welcome_messages(struct drm_i915_private *dev_priv)
@@ -1839,9 +1519,10 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
return ERR_PTR(err);
}
- i915->drm.pdev = pdev;
i915->drm.dev_private = i915;
- pci_set_drvdata(pdev, &i915->drm);
+
+ i915->drm.pdev = pdev;
+ pci_set_drvdata(pdev, i915);
/* Setup the write-once "constant" device info */
device_info = mkwrite_device_info(i915);
@@ -1941,51 +1622,50 @@ out_fini:
return ret;
}
-void i915_driver_remove(struct drm_device *dev)
+void i915_driver_remove(struct drm_i915_private *i915)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = i915->drm.pdev;
- disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
+ disable_rpm_wakeref_asserts(&i915->runtime_pm);
- i915_driver_unregister(dev_priv);
+ i915_driver_unregister(i915);
/*
* After unregistering the device to prevent any new users, cancel
* all in-flight requests so that we can quickly unbind the active
* resources.
*/
- intel_gt_set_wedged(&dev_priv->gt);
+ intel_gt_set_wedged(&i915->gt);
/* Flush any external code that still may be under the RCU lock */
synchronize_rcu();
- i915_gem_suspend(dev_priv);
+ i915_gem_suspend(i915);
- drm_atomic_helper_shutdown(dev);
+ drm_atomic_helper_shutdown(&i915->drm);
- intel_gvt_driver_remove(dev_priv);
+ intel_gvt_driver_remove(i915);
- intel_modeset_driver_remove(dev);
+ intel_modeset_driver_remove(&i915->drm);
- intel_bios_driver_remove(dev_priv);
+ intel_bios_driver_remove(i915);
vga_switcheroo_unregister_client(pdev);
vga_client_register(pdev, NULL, NULL, NULL);
- intel_csr_ucode_fini(dev_priv);
+ intel_csr_ucode_fini(i915);
/* Free error state after interrupts are fully disabled. */
- cancel_delayed_work_sync(&dev_priv->gt.hangcheck.work);
- i915_reset_error_state(dev_priv);
+ cancel_delayed_work_sync(&i915->gt.hangcheck.work);
+ i915_reset_error_state(i915);
- i915_gem_driver_remove(dev_priv);
+ i915_gem_driver_remove(i915);
- intel_power_domains_driver_remove(dev_priv);
+ intel_power_domains_driver_remove(i915);
- i915_driver_hw_remove(dev_priv);
+ i915_driver_hw_remove(i915);
- enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
+ enable_rpm_wakeref_asserts(&i915->runtime_pm);
}
static void i915_driver_release(struct drm_device *dev)
@@ -2051,6 +1731,9 @@ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
mutex_unlock(&dev->struct_mutex);
kfree(file_priv);
+
+ /* Catch up with all the deferred frees from "this" client */
+ i915_gem_flush_free_objects(to_i915(dev));
}
static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
@@ -2155,7 +1838,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
- int ret;
+ int ret = 0;
disable_rpm_wakeref_asserts(rpm);
@@ -2166,12 +1849,9 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
intel_power_domains_suspend(dev_priv,
get_suspend_mode(dev_priv, hibernation));
- ret = 0;
- if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv))
- bxt_enable_dc9(dev_priv);
- else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- hsw_enable_pc8(dev_priv);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ intel_display_power_suspend_late(dev_priv);
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
ret = vlv_suspend_complete(dev_priv);
if (ret) {
@@ -2199,34 +1879,29 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
out:
enable_rpm_wakeref_asserts(rpm);
- if (!dev_priv->uncore.user_forcewake.count)
+ if (!dev_priv->uncore.user_forcewake_count)
intel_runtime_pm_driver_release(rpm);
return ret;
}
-static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
+static int
+i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state)
{
int error;
- if (!dev) {
- DRM_ERROR("dev: %p\n", dev);
- DRM_ERROR("DRM not initialized, aborting suspend.\n");
- return -ENODEV;
- }
-
if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
state.event != PM_EVENT_FREEZE))
return -EINVAL;
- if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- error = i915_drm_suspend(dev);
+ error = i915_drm_suspend(&i915->drm);
if (error)
return error;
- return i915_drm_suspend_late(dev, false);
+ return i915_drm_suspend_late(&i915->drm, false);
}
static int i915_drm_resume(struct drm_device *dev)
@@ -2361,12 +2036,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
intel_gt_check_and_clear_faults(&dev_priv->gt);
- if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv)) {
- gen9_sanitize_dc_state(dev_priv);
- bxt_disable_dc9(dev_priv);
- } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- hsw_disable_pc8(dev_priv);
- }
+ intel_display_power_resume_early(dev_priv);
intel_sanitize_gt_powersave(dev_priv);
@@ -2379,53 +2049,53 @@ static int i915_drm_resume_early(struct drm_device *dev)
return ret;
}
-static int i915_resume_switcheroo(struct drm_device *dev)
+static int i915_resume_switcheroo(struct drm_i915_private *i915)
{
int ret;
- if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- ret = i915_drm_resume_early(dev);
+ ret = i915_drm_resume_early(&i915->drm);
if (ret)
return ret;
- return i915_drm_resume(dev);
+ return i915_drm_resume(&i915->drm);
}
static int i915_pm_prepare(struct device *kdev)
{
- struct drm_device *dev = dev_get_drvdata(kdev);
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
- if (!dev) {
+ if (!i915) {
dev_err(kdev, "DRM not initialized, aborting suspend.\n");
return -ENODEV;
}
- if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- return i915_drm_prepare(dev);
+ return i915_drm_prepare(&i915->drm);
}
static int i915_pm_suspend(struct device *kdev)
{
- struct drm_device *dev = dev_get_drvdata(kdev);
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
- if (!dev) {
+ if (!i915) {
dev_err(kdev, "DRM not initialized, aborting suspend.\n");
return -ENODEV;
}
- if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- return i915_drm_suspend(dev);
+ return i915_drm_suspend(&i915->drm);
}
static int i915_pm_suspend_late(struct device *kdev)
{
- struct drm_device *dev = &kdev_to_i915(kdev)->drm;
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
/*
* We have a suspend ordering issue with the snd-hda driver also
@@ -2436,55 +2106,55 @@ static int i915_pm_suspend_late(struct device *kdev)
* FIXME: This should be solved with a special hdmi sink device or
* similar so that power domains can be employed.
*/
- if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- return i915_drm_suspend_late(dev, false);
+ return i915_drm_suspend_late(&i915->drm, false);
}
static int i915_pm_poweroff_late(struct device *kdev)
{
- struct drm_device *dev = &kdev_to_i915(kdev)->drm;
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
- if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- return i915_drm_suspend_late(dev, true);
+ return i915_drm_suspend_late(&i915->drm, true);
}
static int i915_pm_resume_early(struct device *kdev)
{
- struct drm_device *dev = &kdev_to_i915(kdev)->drm;
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
- if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- return i915_drm_resume_early(dev);
+ return i915_drm_resume_early(&i915->drm);
}
static int i915_pm_resume(struct device *kdev)
{
- struct drm_device *dev = &kdev_to_i915(kdev)->drm;
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
- if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- return i915_drm_resume(dev);
+ return i915_drm_resume(&i915->drm);
}
/* freeze: before creating the hibernation_image */
static int i915_pm_freeze(struct device *kdev)
{
- struct drm_device *dev = &kdev_to_i915(kdev)->drm;
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
int ret;
- if (dev->switch_power_state != DRM_SWITCH_POWER_OFF) {
- ret = i915_drm_suspend(dev);
+ if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
+ ret = i915_drm_suspend(&i915->drm);
if (ret)
return ret;
}
- ret = i915_gem_freeze(kdev_to_i915(kdev));
+ ret = i915_gem_freeze(i915);
if (ret)
return ret;
@@ -2493,16 +2163,16 @@ static int i915_pm_freeze(struct device *kdev)
static int i915_pm_freeze_late(struct device *kdev)
{
- struct drm_device *dev = &kdev_to_i915(kdev)->drm;
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
int ret;
- if (dev->switch_power_state != DRM_SWITCH_POWER_OFF) {
- ret = i915_drm_suspend_late(dev, true);
+ if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
+ ret = i915_drm_suspend_late(&i915->drm, true);
if (ret)
return ret;
}
- ret = i915_gem_freeze_late(kdev_to_i915(kdev));
+ ret = i915_gem_freeze_late(i915);
if (ret)
return ret;
@@ -2559,9 +2229,12 @@ static int i915_pm_restore(struct device *kdev)
*/
static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
{
- struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
+ struct vlv_s0ix_state *s = dev_priv->vlv_s0ix_state;
int i;
+ if (!s)
+ return;
+
/* GAM 0x4000-0x4770 */
s->wr_watermark = I915_READ(GEN7_WR_WATERMARK);
s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL);
@@ -2640,10 +2313,13 @@ static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
{
- struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
+ struct vlv_s0ix_state *s = dev_priv->vlv_s0ix_state;
u32 val;
int i;
+ if (!s)
+ return;
+
/* GAM 0x4000-0x4770 */
I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark);
I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl);
@@ -2852,8 +2528,7 @@ static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
if (err)
goto err2;
- if (!IS_CHERRYVIEW(dev_priv))
- vlv_save_gunit_s0ix_state(dev_priv);
+ vlv_save_gunit_s0ix_state(dev_priv);
err = vlv_force_gfx_clock(dev_priv, false);
if (err)
@@ -2883,8 +2558,7 @@ static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
*/
ret = vlv_force_gfx_clock(dev_priv, true);
- if (!IS_CHERRYVIEW(dev_priv))
- vlv_restore_gunit_s0ix_state(dev_priv);
+ vlv_restore_gunit_s0ix_state(dev_priv);
err = vlv_allow_gt_wake(dev_priv, true);
if (!ret)
@@ -2904,10 +2578,9 @@ static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
static int intel_runtime_suspend(struct device *kdev)
{
- struct drm_device *dev = dev_get_drvdata(kdev);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
- int ret;
+ int ret = 0;
if (WARN_ON_ONCE(!(dev_priv->gt_pm.rc6.enabled && HAS_RC6(dev_priv))))
return -ENODEV;
@@ -2925,24 +2598,16 @@ static int intel_runtime_suspend(struct device *kdev)
*/
i915_gem_runtime_suspend(dev_priv);
- intel_uc_runtime_suspend(&dev_priv->gt.uc);
+ intel_gt_runtime_suspend(&dev_priv->gt);
intel_runtime_pm_disable_interrupts(dev_priv);
intel_uncore_suspend(&dev_priv->uncore);
- ret = 0;
- if (INTEL_GEN(dev_priv) >= 11) {
- icl_display_core_uninit(dev_priv);
- bxt_enable_dc9(dev_priv);
- } else if (IS_GEN9_LP(dev_priv)) {
- bxt_display_core_uninit(dev_priv);
- bxt_enable_dc9(dev_priv);
- } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- hsw_enable_pc8(dev_priv);
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ intel_display_power_suspend(dev_priv);
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
ret = vlv_suspend_complete(dev_priv);
- }
if (ret) {
DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
@@ -2950,9 +2615,8 @@ static int intel_runtime_suspend(struct device *kdev)
intel_runtime_pm_enable_interrupts(dev_priv);
- intel_uc_resume(&dev_priv->gt.uc);
+ intel_gt_runtime_resume(&dev_priv->gt);
- intel_gt_init_swizzling(&dev_priv->gt);
i915_gem_restore_fences(dev_priv);
enable_rpm_wakeref_asserts(rpm);
@@ -3002,8 +2666,7 @@ static int intel_runtime_suspend(struct device *kdev)
static int intel_runtime_resume(struct device *kdev)
{
- struct drm_device *dev = dev_get_drvdata(kdev);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
int ret = 0;
@@ -3020,40 +2683,20 @@ static int intel_runtime_resume(struct device *kdev)
if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
- if (INTEL_GEN(dev_priv) >= 11) {
- bxt_disable_dc9(dev_priv);
- icl_display_core_init(dev_priv, true);
- if (dev_priv->csr.dmc_payload) {
- if (dev_priv->csr.allowed_dc_mask &
- DC_STATE_EN_UPTO_DC6)
- skl_enable_dc6(dev_priv);
- else if (dev_priv->csr.allowed_dc_mask &
- DC_STATE_EN_UPTO_DC5)
- gen9_enable_dc5(dev_priv);
- }
- } else if (IS_GEN9_LP(dev_priv)) {
- bxt_disable_dc9(dev_priv);
- bxt_display_core_init(dev_priv, true);
- if (dev_priv->csr.dmc_payload &&
- (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
- gen9_enable_dc5(dev_priv);
- } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- hsw_disable_pc8(dev_priv);
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ intel_display_power_resume(dev_priv);
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
ret = vlv_resume_prepare(dev_priv, true);
- }
intel_uncore_runtime_resume(&dev_priv->uncore);
intel_runtime_pm_enable_interrupts(dev_priv);
- intel_uc_resume(&dev_priv->gt.uc);
-
/*
* No point of rolling back things in case of an error, as the best
* we can do is to hope that things will still work (and disable RPM).
*/
- intel_gt_init_swizzling(&dev_priv->gt);
+ intel_gt_runtime_resume(&dev_priv->gt);
i915_gem_restore_fences(dev_priv);
/*
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 2e13ecc9cbb6..1d725e0bba40 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -43,7 +43,7 @@
#include <linux/mm_types.h>
#include <linux/perf_event.h>
#include <linux/pm_qos.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
#include <linux/shmem_fs.h>
#include <linux/stackdepot.h>
@@ -68,8 +68,13 @@
#include "display/intel_display_power.h"
#include "display/intel_dpll_mgr.h"
#include "display/intel_frontbuffer.h"
+#include "display/intel_gmbus.h"
#include "display/intel_opregion.h"
+#include "gem/i915_gem_context_types.h"
+#include "gem/i915_gem_shrinker.h"
+#include "gem/i915_gem_stolen.h"
+
#include "gt/intel_lrc.h"
#include "gt/intel_engine.h"
#include "gt/intel_gt_types.h"
@@ -77,13 +82,13 @@
#include "gt/uc/intel_uc.h"
#include "intel_device_info.h"
+#include "intel_pch.h"
#include "intel_runtime_pm.h"
#include "intel_uncore.h"
#include "intel_wakeref.h"
#include "intel_wopcm.h"
#include "i915_gem.h"
-#include "gem/i915_gem_context_types.h"
#include "i915_gem_fence_reg.h"
#include "i915_gem_gtt.h"
#include "i915_gpu_error.h"
@@ -91,6 +96,7 @@
#include "i915_scheduler.h"
#include "gt/intel_timeline.h"
#include "i915_vma.h"
+#include "i915_irq.h"
#include "intel_gvt.h"
@@ -99,45 +105,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20190730"
-#define DRIVER_TIMESTAMP 1564512624
-
-/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
- * WARN_ON()) for hw state sanity checks to check for unexpected conditions
- * which may not necessarily be a user visible problem. This will either
- * WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to
- * enable distros and users to tailor their preferred amount of i915 abrt
- * spam.
- */
-#define I915_STATE_WARN(condition, format...) ({ \
- int __ret_warn_on = !!(condition); \
- if (unlikely(__ret_warn_on)) \
- if (!WARN(i915_modparams.verbose_state_checks, format)) \
- DRM_ERROR(format); \
- unlikely(__ret_warn_on); \
-})
-
-#define I915_STATE_WARN_ON(x) \
- I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
-
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
-
-bool __i915_inject_probe_failure(const char *func, int line);
-#define i915_inject_probe_failure() \
- __i915_inject_probe_failure(__func__, __LINE__)
-
-bool i915_error_injected(void);
-
-#else
-
-#define i915_inject_probe_failure() false
-#define i915_error_injected() false
-
-#endif
-
-#define i915_probe_error(i915, fmt, ...) \
- __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
- fmt, ##__VA_ARGS__)
+#define DRIVER_DATE "20190822"
+#define DRIVER_TIMESTAMP 1566477988
struct drm_i915_gem_object;
@@ -153,6 +122,10 @@ enum hpd_pin {
HPD_PORT_D,
HPD_PORT_E,
HPD_PORT_F,
+ HPD_PORT_G,
+ HPD_PORT_H,
+ HPD_PORT_I,
+
HPD_NUM_PINS
};
@@ -521,25 +494,6 @@ struct i915_psr {
u16 su_x_granularity;
};
-/*
- * Sorted by south display engine compatibility.
- * If the new PCH comes with a south display engine that is not
- * inherited from the latest item, please do not add it to the
- * end. Instead, add it right after its "parent" PCH.
- */
-enum intel_pch {
- PCH_NOP = -1, /* PCH without south display */
- PCH_NONE = 0, /* No PCH present */
- PCH_IBX, /* Ibexpeak PCH */
- PCH_CPT, /* Cougarpoint/Pantherpoint PCH */
- PCH_LPT, /* Lynxpoint/Wildcatpoint PCH */
- PCH_SPT, /* Sunrisepoint/Kaby Lake PCH */
- PCH_CNP, /* Cannon/Comet Lake PCH */
- PCH_ICP, /* Ice Lake PCH */
- PCH_MCC, /* Mule Creek Canyon PCH */
- PCH_TGP, /* Tiger Lake PCH */
-};
-
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
#define QUIRK_BACKLIGHT_PRESENT (1<<3)
@@ -573,67 +527,7 @@ struct i915_suspend_saved_registers {
u16 saveGCDGMBUS;
};
-struct vlv_s0ix_state {
- /* GAM */
- u32 wr_watermark;
- u32 gfx_prio_ctrl;
- u32 arb_mode;
- u32 gfx_pend_tlb0;
- u32 gfx_pend_tlb1;
- u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
- u32 media_max_req_count;
- u32 gfx_max_req_count;
- u32 render_hwsp;
- u32 ecochk;
- u32 bsd_hwsp;
- u32 blt_hwsp;
- u32 tlb_rd_addr;
-
- /* MBC */
- u32 g3dctl;
- u32 gsckgctl;
- u32 mbctl;
-
- /* GCP */
- u32 ucgctl1;
- u32 ucgctl3;
- u32 rcgctl1;
- u32 rcgctl2;
- u32 rstctl;
- u32 misccpctl;
-
- /* GPM */
- u32 gfxpause;
- u32 rpdeuhwtc;
- u32 rpdeuc;
- u32 ecobus;
- u32 pwrdwnupctl;
- u32 rp_down_timeout;
- u32 rp_deucsw;
- u32 rcubmabdtmr;
- u32 rcedata;
- u32 spare2gh;
-
- /* Display 1 CZ domain */
- u32 gt_imr;
- u32 gt_ier;
- u32 pm_imr;
- u32 pm_ier;
- u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];
-
- /* GT SA CZ domain */
- u32 tilectl;
- u32 gt_fifoctl;
- u32 gtlc_wake_ctrl;
- u32 gtlc_survive;
- u32 pmwgicz;
-
- /* Display 2 CZ domain */
- u32 gu_ctl0;
- u32 gu_ctl1;
- u32 pcbr;
- u32 clock_gate_dis2;
-};
+struct vlv_s0ix_state;
struct intel_rps_ei {
ktime_t ktime;
@@ -767,7 +661,6 @@ struct i915_gem_mm {
*/
struct llist_head free_list;
struct work_struct free_work;
- spinlock_t free_lock;
/**
* Count of objects pending destructions. Used to skip needlessly
* waiting on an RCU barrier if no objects are waiting to be freed.
@@ -795,11 +688,6 @@ struct i915_gem_mm {
*/
struct workqueue_struct *userptr_wq;
- u64 unordered_timeline;
-
- /* the indicator for dispatch video commands on two BSD rings */
- atomic_t bsd_engine_dispatch_index;
-
/** Bit 6 swizzling required for X tiling */
u32 bit_6_swizzle_x;
/** Bit 6 swizzling required for Y tiling */
@@ -1235,6 +1123,86 @@ struct i915_perf_stream {
* @oa_config: The OA configuration used by the stream.
*/
struct i915_oa_config *oa_config;
+
+ /**
+ * The OA context specific information.
+ */
+ struct intel_context *pinned_ctx;
+ u32 specific_ctx_id;
+ u32 specific_ctx_id_mask;
+
+ struct hrtimer poll_check_timer;
+ wait_queue_head_t poll_wq;
+ bool pollin;
+
+ bool periodic;
+ int period_exponent;
+
+ /**
+ * State of the OA buffer.
+ */
+ struct {
+ struct i915_vma *vma;
+ u8 *vaddr;
+ u32 last_ctx_id;
+ int format;
+ int format_size;
+ int size_exponent;
+
+ /**
+ * Locks reads and writes to all head/tail state
+ *
+ * Consider: the head and tail pointer state needs to be read
+ * consistently from a hrtimer callback (atomic context) and
+ * read() fop (user context) with tail pointer updates happening
+ * in atomic context and head updates in user context and the
+ * (unlikely) possibility of read() errors needing to reset all
+ * head/tail state.
+ *
+ * Note: Contention/performance aren't currently a significant
+ * concern here considering the relatively low frequency of
+ * hrtimer callbacks (5ms period) and that reads typically only
+ * happen in response to a hrtimer event and likely complete
+ * before the next callback.
+ *
+ * Note: This lock is not held *while* reading and copying data
+ * to userspace so the value of head observed in htrimer
+ * callbacks won't represent any partial consumption of data.
+ */
+ spinlock_t ptr_lock;
+
+ /**
+ * One 'aging' tail pointer and one 'aged' tail pointer ready to
+ * used for reading.
+ *
+ * Initial values of 0xffffffff are invalid and imply that an
+ * update is required (and should be ignored by an attempted
+ * read)
+ */
+ struct {
+ u32 offset;
+ } tails[2];
+
+ /**
+ * Index for the aged tail ready to read() data up to.
+ */
+ unsigned int aged_tail_idx;
+
+ /**
+ * A monotonic timestamp for when the current aging tail pointer
+ * was read; used to determine when it is old enough to trust.
+ */
+ u64 aging_timestamp;
+
+ /**
+ * Although we can always read back the head pointer register,
+ * we prefer to avoid trusting the HW state, just to avoid any
+ * risk that some hardware condition could * somehow bump the
+ * head pointer unpredictably and cause us to forward the wrong
+ * OA buffer data to userspace.
+ */
+ u32 head;
+ } oa_buffer;
};
/**
@@ -1272,7 +1240,7 @@ struct i915_oa_ops {
* @disable_metric_set: Remove system constraints associated with using
* the OA unit.
*/
- void (*disable_metric_set)(struct drm_i915_private *dev_priv);
+ void (*disable_metric_set)(struct i915_perf_stream *stream);
/**
* @oa_enable: Enable periodic sampling
@@ -1300,7 +1268,7 @@ struct i915_oa_ops {
* handling the OA unit tail pointer race that affects multiple
* generations.
*/
- u32 (*oa_hw_tail_read)(struct drm_i915_private *dev_priv);
+ u32 (*oa_hw_tail_read)(struct i915_perf_stream *stream);
};
struct intel_cdclk_state {
@@ -1340,6 +1308,7 @@ struct drm_i915_private {
resource_size_t stolen_usable_size; /* Total size minus reserved ranges */
struct intel_uncore uncore;
+ struct intel_uncore_mmio_debug mmio_debug;
struct i915_virtual_gpu vgpu;
@@ -1371,11 +1340,12 @@ struct drm_i915_private {
wait_queue_head_t gmbus_wait_queue;
struct pci_dev *bridge_dev;
- struct intel_engine_cs *engine[I915_NUM_ENGINES];
+
/* Context used internally to idle the GPU and setup initial state */
struct i915_gem_context *kernel_context;
- struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
- [MAX_ENGINE_INSTANCE + 1];
+
+ struct intel_engine_cs *engine[I915_NUM_ENGINES];
+ struct rb_root uabi_engines;
struct resource mch_res;
@@ -1396,7 +1366,6 @@ struct drm_i915_private {
u32 irq_mask;
u32 de_irq_mask[I915_MAX_PIPES];
};
- u32 gt_irq_mask;
u32 pm_rps_events;
u32 pipestat_irq_mask[I915_MAX_PIPES];
@@ -1414,9 +1383,6 @@ struct drm_i915_private {
/* backlight registers and fields in struct intel_panel */
struct mutex backlight_lock;
- /* LVDS info */
- bool no_aux_handshake;
-
/* protects panel power sequencer state */
struct mutex pps_mutex;
@@ -1576,6 +1542,8 @@ struct drm_i915_private {
#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
#define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */
#define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */
+/* in Gen12 ID 0x7FF is reserved to indicate idle */
+#define GEN12_MAX_CONTEXT_HW_ID (GEN11_MAX_CONTEXT_HW_ID - 1)
struct list_head hw_id_list;
} contexts;
@@ -1594,7 +1562,7 @@ struct drm_i915_private {
u32 suspend_count;
bool power_domains_suspended;
struct i915_suspend_saved_registers regfile;
- struct vlv_s0ix_state vlv_s0ix_state;
+ struct vlv_s0ix_state *vlv_s0ix_state;
enum {
I915_SAGV_UNKNOWN = 0,
@@ -1698,120 +1666,35 @@ struct drm_i915_private {
struct mutex lock;
struct list_head streams;
- struct {
- /*
- * The stream currently using the OA unit. If accessed
- * outside a syscall associated to its file
- * descriptor, you need to hold
- * dev_priv->drm.struct_mutex.
- */
- struct i915_perf_stream *exclusive_stream;
+ /*
+ * The stream currently using the OA unit. If accessed
+ * outside a syscall associated to its file
+ * descriptor, you need to hold
+ * dev_priv->drm.struct_mutex.
+ */
+ struct i915_perf_stream *exclusive_stream;
- struct intel_context *pinned_ctx;
- u32 specific_ctx_id;
- u32 specific_ctx_id_mask;
+ /**
+ * For rate limiting any notifications of spurious
+ * invalid OA reports
+ */
+ struct ratelimit_state spurious_report_rs;
- struct hrtimer poll_check_timer;
- wait_queue_head_t poll_wq;
- bool pollin;
+ struct i915_oa_config test_config;
- /**
- * For rate limiting any notifications of spurious
- * invalid OA reports
- */
- struct ratelimit_state spurious_report_rs;
-
- bool periodic;
- int period_exponent;
-
- struct i915_oa_config test_config;
-
- struct {
- struct i915_vma *vma;
- u8 *vaddr;
- u32 last_ctx_id;
- int format;
- int format_size;
-
- /**
- * Locks reads and writes to all head/tail state
- *
- * Consider: the head and tail pointer state
- * needs to be read consistently from a hrtimer
- * callback (atomic context) and read() fop
- * (user context) with tail pointer updates
- * happening in atomic context and head updates
- * in user context and the (unlikely)
- * possibility of read() errors needing to
- * reset all head/tail state.
- *
- * Note: Contention or performance aren't
- * currently a significant concern here
- * considering the relatively low frequency of
- * hrtimer callbacks (5ms period) and that
- * reads typically only happen in response to a
- * hrtimer event and likely complete before the
- * next callback.
- *
- * Note: This lock is not held *while* reading
- * and copying data to userspace so the value
- * of head observed in htrimer callbacks won't
- * represent any partial consumption of data.
- */
- spinlock_t ptr_lock;
-
- /**
- * One 'aging' tail pointer and one 'aged'
- * tail pointer ready to used for reading.
- *
- * Initial values of 0xffffffff are invalid
- * and imply that an update is required
- * (and should be ignored by an attempted
- * read)
- */
- struct {
- u32 offset;
- } tails[2];
-
- /**
- * Index for the aged tail ready to read()
- * data up to.
- */
- unsigned int aged_tail_idx;
-
- /**
- * A monotonic timestamp for when the current
- * aging tail pointer was read; used to
- * determine when it is old enough to trust.
- */
- u64 aging_timestamp;
-
- /**
- * Although we can always read back the head
- * pointer register, we prefer to avoid
- * trusting the HW state, just to avoid any
- * risk that some hardware condition could
- * somehow bump the head pointer unpredictably
- * and cause us to forward the wrong OA buffer
- * data to userspace.
- */
- u32 head;
- } oa_buffer;
-
- u32 gen7_latched_oastatus1;
- u32 ctx_oactxctrl_offset;
- u32 ctx_flexeu0_offset;
-
- /**
- * The RPT_ID/reason field for Gen8+ includes a bit
- * to determine if the CTX ID in the report is valid
- * but the specific bit differs between Gen 8 and 9
- */
- u32 gen8_valid_ctx_bit;
+ u32 gen7_latched_oastatus1;
+ u32 ctx_oactxctrl_offset;
+ u32 ctx_flexeu0_offset;
- struct i915_oa_ops ops;
- const struct i915_oa_format *oa_formats;
- } oa;
+ /**
+ * The RPT_ID/reason field for Gen8+ includes a bit
+ * to determine if the CTX ID in the report is valid
+ * but the specific bit differs between Gen 8 and 9
+ */
+ u32 gen8_valid_ctx_bit;
+
+ struct i915_oa_ops ops;
+ const struct i915_oa_format *oa_formats;
} perf;
/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
@@ -1892,12 +1775,12 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
{
- return to_i915(dev_get_drvdata(kdev));
+ return dev_get_drvdata(kdev);
}
-static inline struct drm_i915_private *wopcm_to_i915(struct intel_wopcm *wopcm)
+static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
{
- return container_of(wopcm, struct drm_i915_private, wopcm);
+ return pci_get_drvdata(pdev);
}
/* Simple iterator over all initialised engines */
@@ -1914,12 +1797,13 @@ static inline struct drm_i915_private *wopcm_to_i915(struct intel_wopcm *wopcm)
((engine__) = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : \
0;)
-enum hdmi_force_audio {
- HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
- HDMI_AUDIO_OFF, /* force turn off HDMI audio */
- HDMI_AUDIO_AUTO, /* trust EDID */
- HDMI_AUDIO_ON, /* force turn on HDMI audio */
-};
+#define rb_to_uabi_engine(rb) \
+ rb_entry_safe(rb, struct intel_engine_cs, uabi_node)
+
+#define for_each_uabi_engine(engine__, i915__) \
+ for ((engine__) = rb_to_uabi_engine(rb_first(&(i915__)->uabi_engines));\
+ (engine__); \
+ (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
#define I915_GTT_OFFSET_NONE ((u32)-1)
@@ -2270,53 +2154,14 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_GT_UC(dev_priv) (INTEL_INFO(dev_priv)->has_gt_uc)
-/* Having GuC/HuC is not the same as using GuC/HuC */
-#define USES_GUC(dev_priv) intel_uc_is_using_guc(&(dev_priv)->gt.uc)
-#define USES_GUC_SUBMISSION(dev_priv) intel_uc_is_using_guc_submission(&(dev_priv)->gt.uc)
-#define USES_HUC(dev_priv) intel_uc_is_using_huc(&(dev_priv)->gt.uc)
+/* Having GuC is not the same as using GuC */
+#define USES_GUC(dev_priv) intel_uc_uses_guc(&(dev_priv)->gt.uc)
+#define USES_GUC_SUBMISSION(dev_priv) intel_uc_uses_guc_submission(&(dev_priv)->gt.uc)
#define HAS_POOLED_EU(dev_priv) (INTEL_INFO(dev_priv)->has_pooled_eu)
-#define INTEL_PCH_DEVICE_ID_MASK 0xff80
-#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
-#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
-#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
-#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
-#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
-#define INTEL_PCH_WPT_DEVICE_ID_TYPE 0x8c80
-#define INTEL_PCH_WPT_LP_DEVICE_ID_TYPE 0x9c80
-#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
-#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
-#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA280
-#define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300
-#define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80
-#define INTEL_PCH_CMP_DEVICE_ID_TYPE 0x0280
-#define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480
-#define INTEL_PCH_MCC_DEVICE_ID_TYPE 0x4B00
-#define INTEL_PCH_MCC2_DEVICE_ID_TYPE 0x3880
-#define INTEL_PCH_TGP_DEVICE_ID_TYPE 0xA080
-#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
-#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
-#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
-
-#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
-#define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id)
-#define HAS_PCH_MCC(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_MCC)
-#define HAS_PCH_TGP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_TGP)
-#define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP)
-#define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP)
-#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
-#define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT)
-#define HAS_PCH_LPT_LP(dev_priv) \
- (INTEL_PCH_ID(dev_priv) == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE || \
- INTEL_PCH_ID(dev_priv) == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE)
-#define HAS_PCH_LPT_H(dev_priv) \
- (INTEL_PCH_ID(dev_priv) == INTEL_PCH_LPT_DEVICE_ID_TYPE || \
- INTEL_PCH_ID(dev_priv) == INTEL_PCH_WPT_DEVICE_ID_TYPE)
-#define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT)
-#define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX)
-#define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
-#define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE)
+#define HAS_GLOBAL_MOCS_REGISTERS(dev_priv) (INTEL_INFO(dev_priv)->has_global_mocs)
+
#define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch)
@@ -2332,8 +2177,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->num_pipes > 0)
-#include "i915_trace.h"
-
static inline bool intel_vtd_active(void)
{
#ifdef CONFIG_INTEL_IOMMU
@@ -2355,13 +2198,6 @@ intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
}
/* i915_drv.c */
-void __printf(3, 4)
-__i915_printk(struct drm_i915_private *dev_priv, const char *level,
- const char *fmt, ...);
-
-#define i915_report_error(dev_priv, fmt, ...) \
- __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__)
-
#ifdef CONFIG_COMPAT
long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
#else
@@ -2370,7 +2206,7 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
extern const struct dev_pm_ops i915_pm_ops;
int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
-void i915_driver_remove(struct drm_device *dev);
+void i915_driver_remove(struct drm_i915_private *i915);
void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
@@ -2385,6 +2221,9 @@ static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
return dev_priv->vgpu.active;
}
+int i915_getparam_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
/* i915_gem.c */
int i915_gem_init_userptr(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv);
@@ -2457,10 +2296,6 @@ int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
u32 handle, u64 *offset);
int i915_gem_mmap_gtt_version(void);
-void i915_gem_track_fb(struct drm_i915_gem_object *old,
- struct drm_i915_gem_object *new,
- unsigned frontbuffer_bits);
-
int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
static inline u32 i915_reset_count(struct i915_gpu_error *error)
@@ -2477,6 +2312,8 @@ static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
void i915_gem_init_mmio(struct drm_i915_private *i915);
int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv);
+void i915_gem_driver_register(struct drm_i915_private *i915);
+void i915_gem_driver_unregister(struct drm_i915_private *i915);
void i915_gem_driver_remove(struct drm_i915_private *dev_priv);
void i915_gem_driver_release(struct drm_i915_private *dev_priv);
int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
@@ -2517,16 +2354,6 @@ i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
return ctx;
}
-int i915_perf_open_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file);
-int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file);
-int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file);
-void i915_oa_init_reg_state(struct intel_engine_cs *engine,
- struct intel_context *ce,
- u32 *reg_state);
-
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct i915_address_space *vm,
u64 min_size, u64 alignment,
@@ -2538,49 +2365,11 @@ int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
unsigned int flags);
int i915_gem_evict_vm(struct i915_address_space *vm);
-/* i915_gem_stolen.c */
-int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
- struct drm_mm_node *node, u64 size,
- unsigned alignment);
-int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
- struct drm_mm_node *node, u64 size,
- unsigned alignment, u64 start,
- u64 end);
-void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
- struct drm_mm_node *node);
-int i915_gem_init_stolen(struct drm_i915_private *dev_priv);
-void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv);
-struct drm_i915_gem_object *
-i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
- resource_size_t size);
-struct drm_i915_gem_object *
-i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
- resource_size_t stolen_offset,
- resource_size_t gtt_offset,
- resource_size_t size);
-
/* i915_gem_internal.c */
struct drm_i915_gem_object *
i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
phys_addr_t size);
-/* i915_gem_shrinker.c */
-unsigned long i915_gem_shrink(struct drm_i915_private *i915,
- unsigned long target,
- unsigned long *nr_scanned,
- unsigned flags);
-#define I915_SHRINK_UNBOUND BIT(0)
-#define I915_SHRINK_BOUND BIT(1)
-#define I915_SHRINK_ACTIVE BIT(2)
-#define I915_SHRINK_VMAPS BIT(3)
-#define I915_SHRINK_WRITEBACK BIT(4)
-
-unsigned long i915_gem_shrink_all(struct drm_i915_private *i915);
-void i915_gem_shrinker_register(struct drm_i915_private *i915);
-void i915_gem_shrinker_unregister(struct drm_i915_private *i915);
-void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
- struct mutex *mutex);
-
/* i915_gem_tiling.c */
static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
{
@@ -2608,20 +2397,6 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
u32 batch_len,
bool is_master);
-/* i915_perf.c */
-void i915_perf_init(struct drm_i915_private *dev_priv);
-void i915_perf_fini(struct drm_i915_private *dev_priv);
-void i915_perf_register(struct drm_i915_private *dev_priv);
-void i915_perf_unregister(struct drm_i915_private *dev_priv);
-
-/* i915_suspend.c */
-int i915_save_state(struct drm_i915_private *dev_priv);
-int i915_restore_state(struct drm_i915_private *dev_priv);
-
-/* i915_sysfs.c */
-void i915_setup_sysfs(struct drm_i915_private *dev_priv);
-void i915_teardown_sysfs(struct drm_i915_private *dev_priv);
-
/* intel_device_info.c */
static inline struct intel_device_info *
mkwrite_device_info(struct drm_i915_private *dev_priv)
@@ -2629,24 +2404,9 @@ mkwrite_device_info(struct drm_i915_private *dev_priv)
return (struct intel_device_info *)INTEL_INFO(dev_priv);
}
-/* modesetting */
-void intel_modeset_init_hw(struct drm_device *dev);
-int intel_modeset_init(struct drm_device *dev);
-void intel_modeset_driver_remove(struct drm_device *dev);
-int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state);
-void intel_display_resume(struct drm_device *dev);
-void i915_redisable_vga(struct drm_i915_private *dev_priv);
-void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv);
-void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
-
int i915_reg_read_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
-struct intel_display_error_state *
-intel_display_capture_error_state(struct drm_i915_private *dev_priv);
-void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
- struct intel_display_error_state *error);
-
#define __I915_REG_OP(op__, dev_priv__, ...) \
intel_uncore_##op__(&(dev_priv__)->uncore, __VA_ARGS__)
@@ -2684,29 +2444,19 @@ void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
#define I915_READ_FW(reg__) __I915_REG_OP(read_fw, dev_priv, (reg__))
#define I915_WRITE_FW(reg__, val__) __I915_REG_OP(write_fw, dev_priv, (reg__), (val__))
-/* "Broadcast RGB" property */
-#define INTEL_BROADCAST_RGB_AUTO 0
-#define INTEL_BROADCAST_RGB_FULL 1
-#define INTEL_BROADCAST_RGB_LIMITED 2
+/* register wait wrappers for display regs */
+#define intel_de_wait_for_register(dev_priv_, reg_, mask_, value_, timeout_) \
+ intel_wait_for_register(&(dev_priv_)->uncore, \
+ (reg_), (mask_), (value_), (timeout_))
-void i915_memcpy_init_early(struct drm_i915_private *dev_priv);
-bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len);
-
-/* The movntdqa instructions used for memcpy-from-wc require 16-byte alignment,
- * as well as SSE4.1 support. i915_memcpy_from_wc() will report if it cannot
- * perform the operation. To check beforehand, pass in the parameters to
- * to i915_can_memcpy_from_wc() - since we only care about the low 4 bits,
- * you only need to pass in the minor offsets, page-aligned pointers are
- * always valid.
- *
- * For just checking for SSE4.1, in the foreknowledge that the future use
- * will be correctly aligned, just use i915_has_memcpy_from_wc().
- */
-#define i915_can_memcpy_from_wc(dst, src, len) \
- i915_memcpy_from_wc((void *)((unsigned long)(dst) | (unsigned long)(src) | (len)), NULL, 0)
+#define intel_de_wait_for_set(dev_priv_, reg_, mask_, timeout_) ({ \
+ u32 mask__ = (mask_); \
+ intel_de_wait_for_register((dev_priv_), (reg_), \
+ mask__, mask__, (timeout_)); \
+})
-#define i915_has_memcpy_from_wc() \
- i915_memcpy_from_wc(NULL, NULL, 0)
+#define intel_de_wait_for_clear(dev_priv_, reg_, mask_, timeout_) \
+ intel_de_wait_for_register((dev_priv_), (reg_), (mask_), 0, (timeout_))
/* i915_mm.c */
int remap_io_mapping(struct vm_area_struct *vma,
@@ -2727,15 +2477,4 @@ i915_coherent_map_type(struct drm_i915_private *i915)
return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
}
-static inline void add_taint_for_CI(unsigned int taint)
-{
- /*
- * The system is "ok", just about surviving for the user, but
- * CI results are now unreliable as the HW is very suspect.
- * CI checks the taint state after every test and will reboot
- * the machine if the kernel is tainted.
- */
- add_taint(taint, LOCKDEP_STILL_OK);
-}
-
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 65863e955f40..eb31b69a316a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -29,7 +29,7 @@
#include <drm/i915_drm.h>
#include <linux/dma-fence-array.h>
#include <linux/kthread.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/stop_machine.h>
@@ -46,6 +46,7 @@
#include "gem/i915_gem_ioctls.h"
#include "gem/i915_gem_pm.h"
#include "gem/i915_gemfs.h"
+#include "gt/intel_engine_user.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_mocs.h"
@@ -58,7 +59,6 @@
#include "i915_trace.h"
#include "i915_vgpu.h"
-#include "intel_drv.h"
#include "intel_pm.h"
static int
@@ -139,17 +139,19 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
void *vaddr = obj->phys_handle->vaddr + args->offset;
char __user *user_data = u64_to_user_ptr(args->data_ptr);
- /* We manually control the domain here and pretend that it
+ /*
+ * We manually control the domain here and pretend that it
* remains coherent i.e. in the GTT domain, like shmem_pwrite.
*/
- intel_fb_obj_invalidate(obj, ORIGIN_CPU);
+ intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
+
if (copy_from_user(vaddr, user_data, args->size))
return -EFAULT;
drm_clflush_virt_range(vaddr, args->size);
intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
- intel_fb_obj_flush(obj, ORIGIN_CPU);
+ intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
return 0;
}
@@ -341,20 +343,16 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
return ret;
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
- PIN_MAPPABLE |
- PIN_NONFAULT |
- PIN_NONBLOCK);
+ vma = ERR_PTR(-ENODEV);
+ if (!i915_gem_object_is_tiled(obj))
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
+ PIN_MAPPABLE |
+ PIN_NONBLOCK /* NOWARN */ |
+ PIN_NOEVICT);
if (!IS_ERR(vma)) {
node.start = i915_ggtt_offset(vma);
node.allocated = false;
- ret = i915_vma_put_fence(vma);
- if (ret) {
- i915_vma_unpin(vma);
- vma = ERR_PTR(ret);
- }
- }
- if (IS_ERR(vma)) {
+ } else {
ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
if (ret)
goto out_unlock;
@@ -555,20 +553,16 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
wakeref = intel_runtime_pm_get(rpm);
}
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
- PIN_MAPPABLE |
- PIN_NONFAULT |
- PIN_NONBLOCK);
+ vma = ERR_PTR(-ENODEV);
+ if (!i915_gem_object_is_tiled(obj))
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
+ PIN_MAPPABLE |
+ PIN_NONBLOCK /* NOWARN */ |
+ PIN_NOEVICT);
if (!IS_ERR(vma)) {
node.start = i915_ggtt_offset(vma);
node.allocated = false;
- ret = i915_vma_put_fence(vma);
- if (ret) {
- i915_vma_unpin(vma);
- vma = ERR_PTR(ret);
- }
- }
- if (IS_ERR(vma)) {
+ } else {
ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
if (ret)
goto out_rpm;
@@ -594,7 +588,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
goto out_unpin;
}
- intel_fb_obj_invalidate(obj, ORIGIN_CPU);
+ intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
user_data = u64_to_user_ptr(args->data_ptr);
offset = args->offset;
@@ -636,7 +630,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
user_data += page_length;
offset += page_length;
}
- intel_fb_obj_flush(obj, ORIGIN_CPU);
+ intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
i915_gem_object_unlock_fence(obj, fence);
out_unpin:
@@ -729,7 +723,7 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
offset = 0;
}
- intel_fb_obj_flush(obj, ORIGIN_CPU);
+ intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
i915_gem_object_unlock_fence(obj, fence);
return ret;
@@ -893,35 +887,22 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915)
}
}
-static int wait_for_engines(struct intel_gt *gt)
-{
- if (wait_for(intel_engines_are_idle(gt), I915_IDLE_ENGINES_TIMEOUT)) {
- dev_err(gt->i915->drm.dev,
- "Failed to idle engines, declaring wedged!\n");
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(gt);
- return -EIO;
- }
-
- return 0;
-}
-
static long
wait_for_timelines(struct drm_i915_private *i915,
unsigned int flags, long timeout)
{
- struct intel_gt_timelines *gt = &i915->gt.timelines;
+ struct intel_gt_timelines *timelines = &i915->gt.timelines;
struct intel_timeline *tl;
- mutex_lock(&gt->mutex);
- list_for_each_entry(tl, &gt->active_list, link) {
+ spin_lock(&timelines->lock);
+ list_for_each_entry(tl, &timelines->active_list, link) {
struct i915_request *rq;
rq = i915_active_request_get_unlocked(&tl->last_request);
if (!rq)
continue;
- mutex_unlock(&gt->mutex);
+ spin_unlock(&timelines->lock);
/*
* "Race-to-idle".
@@ -941,10 +922,10 @@ wait_for_timelines(struct drm_i915_private *i915,
return timeout;
/* restart after reacquiring the lock */
- mutex_lock(&gt->mutex);
- tl = list_entry(&gt->active_list, typeof(*tl), link);
+ spin_lock(&timelines->lock);
+ tl = list_entry(&timelines->active_list, typeof(*tl), link);
}
- mutex_unlock(&gt->mutex);
+ spin_unlock(&timelines->lock);
return timeout;
}
@@ -953,27 +934,20 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915,
unsigned int flags, long timeout)
{
/* If the device is asleep, we have no requests outstanding */
- if (!READ_ONCE(i915->gt.awake))
+ if (!intel_gt_pm_is_awake(&i915->gt))
return 0;
- GEM_TRACE("flags=%x (%s), timeout=%ld%s, awake?=%s\n",
+ GEM_TRACE("flags=%x (%s), timeout=%ld%s\n",
flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked",
- timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "",
- yesno(i915->gt.awake));
+ timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "");
timeout = wait_for_timelines(i915, flags, timeout);
if (timeout < 0)
return timeout;
if (flags & I915_WAIT_LOCKED) {
- int err;
-
lockdep_assert_held(&i915->drm.struct_mutex);
- err = wait_for_engines(&i915->gt);
- if (err)
- return err;
-
i915_retire_requests(i915);
}
@@ -1240,22 +1214,14 @@ int i915_gem_init_hw(struct drm_i915_private *i915)
goto out;
}
- ret = intel_wopcm_init_hw(&i915->wopcm, gt);
- if (ret) {
- DRM_ERROR("Enabling WOPCM failed (%d)\n", ret);
- goto out;
- }
-
/* We can't enable contexts until all firmware is loaded */
- ret = intel_uc_init_hw(&i915->gt.uc);
+ ret = intel_uc_init_hw(&gt->uc);
if (ret) {
- DRM_ERROR("Enabling uc failed (%d)\n", ret);
+ i915_probe_error(i915, "Enabling uc failed (%d)\n", ret);
goto out;
}
- intel_mocs_init_l3cc_table(gt);
-
- intel_engines_set_scheduler_caps(i915);
+ intel_mocs_init(gt);
out:
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
@@ -1264,9 +1230,8 @@ out:
static int __intel_engines_record_defaults(struct drm_i915_private *i915)
{
+ struct i915_request *requests[I915_NUM_ENGINES] = {};
struct intel_engine_cs *engine;
- struct i915_gem_context *ctx;
- struct i915_gem_engines *e;
enum intel_engine_id id;
int err = 0;
@@ -1279,20 +1244,25 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
* from the same default HW values.
*/
- ctx = i915_gem_context_create_kernel(i915, 0);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
-
- e = i915_gem_context_lock_engines(ctx);
-
for_each_engine(engine, i915, id) {
- struct intel_context *ce = e->engines[id];
+ struct intel_context *ce;
struct i915_request *rq;
+ /* We must be able to switch to something! */
+ GEM_BUG_ON(!engine->kernel_context);
+ engine->serial++; /* force the kernel context switch */
+
+ ce = intel_context_create(i915->kernel_context, engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto err_active;
+ intel_context_put(ce);
+ goto out;
}
err = intel_engine_emit_ctx_wa(rq);
@@ -1313,26 +1283,33 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
goto err_rq;
err_rq:
+ requests[id] = i915_request_get(rq);
i915_request_add(rq);
if (err)
- goto err_active;
+ goto out;
}
/* Flush the default context image to memory, and enable powersaving. */
if (!i915_gem_load_power_context(i915)) {
err = -EIO;
- goto err_active;
+ goto out;
}
- for_each_engine(engine, i915, id) {
- struct intel_context *ce = e->engines[id];
- struct i915_vma *state = ce->state;
+ for (id = 0; id < ARRAY_SIZE(requests); id++) {
+ struct i915_request *rq;
+ struct i915_vma *state;
void *vaddr;
- if (!state)
+ rq = requests[id];
+ if (!rq)
continue;
- GEM_BUG_ON(intel_context_is_pinned(ce));
+ /* We want to be able to unbind the state from the GGTT */
+ GEM_BUG_ON(intel_context_is_pinned(rq->hw_context));
+
+ state = rq->hw_context->state;
+ if (!state)
+ continue;
/*
* As we will hold a reference to the logical state, it will
@@ -1344,61 +1321,49 @@ err_rq:
*/
err = i915_vma_unbind(state);
if (err)
- goto err_active;
+ goto out;
i915_gem_object_lock(state->obj);
err = i915_gem_object_set_to_cpu_domain(state->obj, false);
i915_gem_object_unlock(state->obj);
if (err)
- goto err_active;
+ goto out;
- engine->default_state = i915_gem_object_get(state->obj);
- i915_gem_object_set_cache_coherency(engine->default_state,
- I915_CACHE_LLC);
+ i915_gem_object_set_cache_coherency(state->obj, I915_CACHE_LLC);
/* Check we can acquire the image of the context state */
- vaddr = i915_gem_object_pin_map(engine->default_state,
- I915_MAP_FORCE_WB);
+ vaddr = i915_gem_object_pin_map(state->obj, I915_MAP_FORCE_WB);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
- goto err_active;
+ goto out;
}
- i915_gem_object_unpin_map(engine->default_state);
- }
-
- if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
- unsigned int found = intel_engines_has_context_isolation(i915);
-
- /*
- * Make sure that classes with multiple engine instances all
- * share the same basic configuration.
- */
- for_each_engine(engine, i915, id) {
- unsigned int bit = BIT(engine->uabi_class);
- unsigned int expected = engine->default_state ? bit : 0;
-
- if ((found & bit) != expected) {
- DRM_ERROR("mismatching default context state for class %d on engine %s\n",
- engine->uabi_class, engine->name);
- }
- }
+ rq->engine->default_state = i915_gem_object_get(state->obj);
+ i915_gem_object_unpin_map(state->obj);
}
-out_ctx:
- i915_gem_context_unlock_engines(ctx);
- i915_gem_context_set_closed(ctx);
- i915_gem_context_put(ctx);
- return err;
-
-err_active:
+out:
/*
* If we have to abandon now, we expect the engines to be idle
* and ready to be torn-down. The quickest way we can accomplish
* this is by declaring ourselves wedged.
*/
- intel_gt_set_wedged(&i915->gt);
- goto out_ctx;
+ if (err)
+ intel_gt_set_wedged(&i915->gt);
+
+ for (id = 0; id < ARRAY_SIZE(requests); id++) {
+ struct intel_context *ce;
+ struct i915_request *rq;
+
+ rq = requests[id];
+ if (!rq)
+ continue;
+
+ ce = rq->hw_context;
+ i915_request_put(rq);
+ intel_context_put(ce);
+ }
+ return err;
}
static int
@@ -1438,8 +1403,6 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
mkwrite_device_info(dev_priv)->page_sizes =
I915_GTT_PAGE_SIZE_4K;
- dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
-
intel_timelines_init(dev_priv);
ret = i915_gem_init_userptr(dev_priv);
@@ -1447,10 +1410,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
return ret;
intel_uc_fetch_firmwares(&dev_priv->gt.uc);
-
- ret = intel_wopcm_init(&dev_priv->wopcm);
- if (ret)
- goto err_uc_fw;
+ intel_wopcm_init(&dev_priv->wopcm);
/* This is just a security blanket to placate dragons.
* On some systems, we very sporadically observe that the first TLBs
@@ -1494,9 +1454,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
intel_init_gt_powersave(dev_priv);
- ret = intel_uc_init(&dev_priv->gt.uc);
- if (ret)
- goto err_pm;
+ intel_uc_init(&dev_priv->gt.uc);
ret = i915_gem_init_hw(dev_priv);
if (ret)
@@ -1526,15 +1484,13 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
if (ret)
goto err_gt;
- if (i915_inject_probe_failure()) {
- ret = -ENODEV;
+ ret = i915_inject_load_error(dev_priv, -ENODEV);
+ if (ret)
goto err_gt;
- }
- if (i915_inject_probe_failure()) {
- ret = -EIO;
+ ret = i915_inject_load_error(dev_priv, -EIO);
+ if (ret)
goto err_gt;
- }
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -1560,9 +1516,8 @@ err_gt:
err_init_hw:
intel_uc_fini_hw(&dev_priv->gt.uc);
err_uc_init:
- intel_uc_fini(&dev_priv->gt.uc);
-err_pm:
if (ret != -EIO) {
+ intel_uc_fini(&dev_priv->gt.uc);
intel_cleanup_gt_powersave(dev_priv);
intel_engines_cleanup(dev_priv);
}
@@ -1576,10 +1531,8 @@ err_unlock:
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
mutex_unlock(&dev_priv->drm.struct_mutex);
-err_uc_fw:
- intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
-
if (ret != -EIO) {
+ intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
i915_gem_cleanup_userptr(dev_priv);
intel_timelines_fini(dev_priv);
}
@@ -1588,8 +1541,8 @@ err_uc_fw:
mutex_lock(&dev_priv->drm.struct_mutex);
/*
- * Allow engine initialisation to fail by marking the GPU as
- * wedged. But we only want to do this where the GPU is angry,
+ * Allow engines or uC initialisation to fail by marking the GPU
+ * as wedged. But we only want to do this when the GPU is angry,
* for all other failure, such as an allocation failure, bail.
*/
if (!intel_gt_is_wedged(&dev_priv->gt)) {
@@ -1611,6 +1564,18 @@ err_uc_fw:
return ret;
}
+void i915_gem_driver_register(struct drm_i915_private *i915)
+{
+ i915_gem_driver_register__shrinker(i915);
+
+ intel_engines_driver_register(i915);
+}
+
+void i915_gem_driver_unregister(struct drm_i915_private *i915)
+{
+ i915_gem_driver_unregister__shrinker(i915);
+}
+
void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
{
GEM_BUG_ON(dev_priv->gt.awake);
@@ -1660,7 +1625,6 @@ void i915_gem_init_mmio(struct drm_i915_private *i915)
static void i915_gem_init__mm(struct drm_i915_private *i915)
{
spin_lock_init(&i915->mm.obj_lock);
- spin_lock_init(&i915->mm.free_lock);
init_llist_head(&i915->mm.free_list);
@@ -1677,8 +1641,6 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv)
i915_gem_init__mm(dev_priv);
i915_gem_init__pm(dev_priv);
- atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
-
spin_lock_init(&dev_priv->fb_tracking.lock);
err = i915_gemfs_init(dev_priv);
@@ -1695,8 +1657,6 @@ void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
WARN_ON(dev_priv->mm.shrink_count);
- intel_gt_cleanup_early(&dev_priv->gt);
-
i915_gemfs_fini(dev_priv);
}
@@ -1789,39 +1749,6 @@ int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
return ret;
}
-/**
- * i915_gem_track_fb - update frontbuffer tracking
- * @old: current GEM buffer for the frontbuffer slots
- * @new: new GEM buffer for the frontbuffer slots
- * @frontbuffer_bits: bitmask of frontbuffer slots
- *
- * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
- * from @old and setting them in @new. Both @old and @new can be NULL.
- */
-void i915_gem_track_fb(struct drm_i915_gem_object *old,
- struct drm_i915_gem_object *new,
- unsigned frontbuffer_bits)
-{
- /* Control of individual bits within the mask are guarded by
- * the owning plane->mutex, i.e. we can never see concurrent
- * manipulation of individual bits. But since the bitfield as a whole
- * is updated using RMW, we need to use atomics in order to update
- * the bits.
- */
- BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
- BITS_PER_TYPE(atomic_t));
-
- if (old) {
- WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
- atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
- }
-
- if (new) {
- WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
- atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
- }
-}
-
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_gem_device.c"
#include "selftests/i915_gem.c"
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index fe82d3571072..167a7b56ed5b 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -28,6 +28,8 @@
#include <linux/bug.h>
#include <linux/interrupt.h>
+#include <drm/drm_drv.h>
+
struct drm_i915_private;
#ifdef CONFIG_DRM_I915_DEBUG_GEM
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
deleted file mode 100644
index b17f23991253..000000000000
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2014-2018 Intel Corporation
- */
-
-#include "i915_gem_batch_pool.h"
-#include "i915_drv.h"
-
-/**
- * DOC: batch pool
- *
- * In order to submit batch buffers as 'secure', the software command parser
- * must ensure that a batch buffer cannot be modified after parsing. It does
- * this by copying the user provided batch buffer contents to a kernel owned
- * buffer from which the hardware will actually execute, and by carefully
- * managing the address space bindings for such buffers.
- *
- * The batch pool framework provides a mechanism for the driver to manage a
- * set of scratch buffers to use for this purpose. The framework can be
- * extended to support other uses cases should they arise.
- */
-
-/**
- * i915_gem_batch_pool_init() - initialize a batch buffer pool
- * @pool: the batch buffer pool
- * @engine: the associated request submission engine
- */
-void i915_gem_batch_pool_init(struct i915_gem_batch_pool *pool,
- struct intel_engine_cs *engine)
-{
- int n;
-
- pool->engine = engine;
-
- for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
- INIT_LIST_HEAD(&pool->cache_list[n]);
-}
-
-/**
- * i915_gem_batch_pool_fini() - clean up a batch buffer pool
- * @pool: the pool to clean up
- *
- * Note: Callers must hold the struct_mutex.
- */
-void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
-{
- int n;
-
- lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
-
- for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
- struct drm_i915_gem_object *obj, *next;
-
- list_for_each_entry_safe(obj, next,
- &pool->cache_list[n],
- batch_pool_link)
- i915_gem_object_put(obj);
-
- INIT_LIST_HEAD(&pool->cache_list[n]);
- }
-}
-
-/**
- * i915_gem_batch_pool_get() - allocate a buffer from the pool
- * @pool: the batch buffer pool
- * @size: the minimum desired size of the returned buffer
- *
- * Returns an inactive buffer from @pool with at least @size bytes,
- * with the pages pinned. The caller must i915_gem_object_unpin_pages()
- * on the returned object.
- *
- * Note: Callers must hold the struct_mutex
- *
- * Return: the buffer object or an error pointer
- */
-struct drm_i915_gem_object *
-i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
- size_t size)
-{
- struct drm_i915_gem_object *obj;
- struct list_head *list;
- int n, ret;
-
- lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
-
- /* Compute a power-of-two bucket, but throw everything greater than
- * 16KiB into the same bucket: i.e. the the buckets hold objects of
- * (1 page, 2 pages, 4 pages, 8+ pages).
- */
- n = fls(size >> PAGE_SHIFT) - 1;
- if (n >= ARRAY_SIZE(pool->cache_list))
- n = ARRAY_SIZE(pool->cache_list) - 1;
- list = &pool->cache_list[n];
-
- list_for_each_entry(obj, list, batch_pool_link) {
- struct reservation_object *resv = obj->base.resv;
-
- /* The batches are strictly LRU ordered */
- if (!reservation_object_test_signaled_rcu(resv, true))
- break;
-
- /*
- * The object is now idle, clear the array of shared
- * fences before we add a new request. Although, we
- * remain on the same engine, we may be on a different
- * timeline and so may continually grow the array,
- * trapping a reference to all the old fences, rather
- * than replace the existing fence.
- */
- if (rcu_access_pointer(resv->fence)) {
- reservation_object_lock(resv, NULL);
- reservation_object_add_excl_fence(resv, NULL);
- reservation_object_unlock(resv);
- }
-
- if (obj->base.size >= size)
- goto found;
- }
-
- obj = i915_gem_object_create_internal(pool->engine->i915, size);
- if (IS_ERR(obj))
- return obj;
-
-found:
- ret = i915_gem_object_pin_pages(obj);
- if (ret)
- return ERR_PTR(ret);
-
- list_move_tail(&obj->batch_pool_link, list);
- return obj;
-}
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.h b/drivers/gpu/drm/i915/i915_gem_batch_pool.h
deleted file mode 100644
index feeeeeaa54d8..000000000000
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2014-2018 Intel Corporation
- */
-
-#ifndef I915_GEM_BATCH_POOL_H
-#define I915_GEM_BATCH_POOL_H
-
-#include <linux/types.h>
-
-struct drm_i915_gem_object;
-struct intel_engine_cs;
-
-struct i915_gem_batch_pool {
- struct intel_engine_cs *engine;
- struct list_head cache_list[4];
-};
-
-void i915_gem_batch_pool_init(struct i915_gem_batch_pool *pool,
- struct intel_engine_cs *engine);
-void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool);
-struct drm_i915_gem_object *
-i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, size_t size);
-
-#endif /* I915_GEM_BATCH_POOL_H */
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index a5783c4cb98b..52c86c6e0673 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -31,7 +31,6 @@
#include "gem/i915_gem_context.h"
#include "i915_drv.h"
-#include "intel_drv.h"
#include "i915_trace.h"
I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
@@ -62,9 +61,6 @@ mark_free(struct drm_mm_scan *scan,
if (i915_vma_is_pinned(vma))
return false;
- if (flags & PIN_NONFAULT && i915_vma_has_userfault(vma))
- return false;
-
list_add(&vma->evict_link, unwind);
return drm_mm_scan_add_block(scan, &vma->node);
}
@@ -331,11 +327,6 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
break;
}
- if (flags & PIN_NONFAULT && i915_vma_has_userfault(vma)) {
- ret = -ENOSPC;
- break;
- }
-
/* Overlap of objects in the same batch? */
if (i915_vma_is_pinned(vma)) {
ret = -ENOSPC;
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index bcac359ec661..615a9f4ef30c 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -230,16 +230,14 @@ static int fence_update(struct i915_fence_reg *fence,
i915_gem_object_get_tiling(vma->obj)))
return -EINVAL;
- ret = i915_active_request_retire(&vma->last_fence,
- &vma->obj->base.dev->struct_mutex);
+ ret = i915_active_wait(&vma->active);
if (ret)
return ret;
}
old = xchg(&fence->vma, NULL);
if (old) {
- ret = i915_active_request_retire(&old->last_fence,
- &old->obj->base.dev->struct_mutex);
+ ret = i915_active_wait(&old->active);
if (ret) {
fence->vma = old;
return ret;
@@ -289,7 +287,7 @@ static int fence_update(struct i915_fence_reg *fence,
}
/**
- * i915_vma_put_fence - force-remove fence for a VMA
+ * i915_vma_revoke_fence - force-remove fence for a VMA
* @vma: vma to map linearly (not through a fence reg)
*
* This function force-removes any fence from the given object, which is useful
@@ -299,14 +297,15 @@ static int fence_update(struct i915_fence_reg *fence,
*
* 0 on success, negative error code on failure.
*/
-int i915_vma_put_fence(struct i915_vma *vma)
+int i915_vma_revoke_fence(struct i915_vma *vma)
{
struct i915_fence_reg *fence = vma->fence;
+ lockdep_assert_held(&vma->vm->mutex);
if (!fence)
return 0;
- if (fence->pin_count)
+ if (atomic_read(&fence->pin_count))
return -EBUSY;
return fence_update(fence, NULL);
@@ -319,7 +318,7 @@ static struct i915_fence_reg *fence_find(struct drm_i915_private *i915)
list_for_each_entry(fence, &i915->ggtt.fence_list, link) {
GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
- if (fence->pin_count)
+ if (atomic_read(&fence->pin_count))
continue;
return fence;
@@ -332,6 +331,48 @@ static struct i915_fence_reg *fence_find(struct drm_i915_private *i915)
return ERR_PTR(-EDEADLK);
}
+static int __i915_vma_pin_fence(struct i915_vma *vma)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
+ struct i915_fence_reg *fence;
+ struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
+ int err;
+
+ /* Just update our place in the LRU if our fence is getting reused. */
+ if (vma->fence) {
+ fence = vma->fence;
+ GEM_BUG_ON(fence->vma != vma);
+ atomic_inc(&fence->pin_count);
+ if (!fence->dirty) {
+ list_move_tail(&fence->link, &ggtt->fence_list);
+ return 0;
+ }
+ } else if (set) {
+ fence = fence_find(vma->vm->i915);
+ if (IS_ERR(fence))
+ return PTR_ERR(fence);
+
+ GEM_BUG_ON(atomic_read(&fence->pin_count));
+ atomic_inc(&fence->pin_count);
+ } else {
+ return 0;
+ }
+
+ err = fence_update(fence, set);
+ if (err)
+ goto out_unpin;
+
+ GEM_BUG_ON(fence->vma != set);
+ GEM_BUG_ON(vma->fence != (set ? fence : NULL));
+
+ if (set)
+ return 0;
+
+out_unpin:
+ atomic_dec(&fence->pin_count);
+ return err;
+}
+
/**
* i915_vma_pin_fence - set up fencing for a vma
* @vma: vma to map through a fence reg
@@ -352,8 +393,6 @@ static struct i915_fence_reg *fence_find(struct drm_i915_private *i915)
*/
int i915_vma_pin_fence(struct i915_vma *vma)
{
- struct i915_fence_reg *fence;
- struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
int err;
/*
@@ -361,39 +400,16 @@ int i915_vma_pin_fence(struct i915_vma *vma)
* must keep the device awake whilst using the fence.
*/
assert_rpm_wakelock_held(&vma->vm->i915->runtime_pm);
+ GEM_BUG_ON(!i915_vma_is_pinned(vma));
+ GEM_BUG_ON(!i915_vma_is_ggtt(vma));
- /* Just update our place in the LRU if our fence is getting reused. */
- if (vma->fence) {
- fence = vma->fence;
- GEM_BUG_ON(fence->vma != vma);
- fence->pin_count++;
- if (!fence->dirty) {
- list_move_tail(&fence->link,
- &fence->i915->ggtt.fence_list);
- return 0;
- }
- } else if (set) {
- fence = fence_find(vma->vm->i915);
- if (IS_ERR(fence))
- return PTR_ERR(fence);
-
- GEM_BUG_ON(fence->pin_count);
- fence->pin_count++;
- } else
- return 0;
-
- err = fence_update(fence, set);
+ err = mutex_lock_interruptible(&vma->vm->mutex);
if (err)
- goto out_unpin;
+ return err;
- GEM_BUG_ON(fence->vma != set);
- GEM_BUG_ON(vma->fence != (set ? fence : NULL));
-
- if (set)
- return 0;
+ err = __i915_vma_pin_fence(vma);
+ mutex_unlock(&vma->vm->mutex);
-out_unpin:
- fence->pin_count--;
return err;
}
@@ -406,16 +422,17 @@ out_unpin:
*/
struct i915_fence_reg *i915_reserve_fence(struct drm_i915_private *i915)
{
+ struct i915_ggtt *ggtt = &i915->ggtt;
struct i915_fence_reg *fence;
int count;
int ret;
- lockdep_assert_held(&i915->drm.struct_mutex);
+ lockdep_assert_held(&ggtt->vm.mutex);
/* Keep at least one fence available for the display engine. */
count = 0;
- list_for_each_entry(fence, &i915->ggtt.fence_list, link)
- count += !fence->pin_count;
+ list_for_each_entry(fence, &ggtt->fence_list, link)
+ count += !atomic_read(&fence->pin_count);
if (count <= 1)
return ERR_PTR(-ENOSPC);
@@ -431,6 +448,7 @@ struct i915_fence_reg *i915_reserve_fence(struct drm_i915_private *i915)
}
list_del(&fence->link);
+
return fence;
}
@@ -442,9 +460,11 @@ struct i915_fence_reg *i915_reserve_fence(struct drm_i915_private *i915)
*/
void i915_unreserve_fence(struct i915_fence_reg *fence)
{
- lockdep_assert_held(&fence->i915->drm.struct_mutex);
+ struct i915_ggtt *ggtt = &fence->i915->ggtt;
+
+ lockdep_assert_held(&ggtt->vm.mutex);
- list_add(&fence->link, &fence->i915->ggtt.fence_list);
+ list_add(&fence->link, &ggtt->fence_list);
}
/**
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.h b/drivers/gpu/drm/i915/i915_gem_fence_reg.h
index 37e4f104f7c0..99866fb9d94f 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.h
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.h
@@ -41,7 +41,7 @@ struct i915_fence_reg {
struct list_head link;
struct drm_i915_private *i915;
struct i915_vma *vma;
- int pin_count;
+ atomic_t pin_count;
int id;
/**
* Whether the tiling parameters for the currently
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index c3028722d4e3..0b81e0b64393 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -32,6 +32,7 @@
#include <linux/stop_machine.h>
#include <asm/set_memory.h>
+#include <asm/smp.h>
#include <drm/i915_drm.h>
@@ -42,7 +43,6 @@
#include "i915_scatterlist.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
-#include "intel_drv.h"
#define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
@@ -120,7 +120,7 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma);
static void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
{
- struct intel_uncore *uncore = &ggtt->vm.i915->uncore;
+ struct intel_uncore *uncore = ggtt->vm.gt->uncore;
/*
* Note that as an uncached mmio write, this will flush the
@@ -131,7 +131,7 @@ static void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
{
- struct intel_uncore *uncore = &ggtt->vm.i915->uncore;
+ struct intel_uncore *uncore = ggtt->vm.gt->uncore;
gen6_ggtt_invalidate(ggtt);
intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE);
@@ -772,7 +772,8 @@ __set_pd_entry(struct i915_page_directory * const pd,
struct i915_page_dma * const to,
u64 (*encode)(const dma_addr_t, const enum i915_cache_level))
{
- GEM_BUG_ON(atomic_read(px_used(pd)) > ARRAY_SIZE(pd->entry));
+ /* Each thread pre-pins the pd, and we may have a thread per pde. */
+ GEM_BUG_ON(atomic_read(px_used(pd)) > 2 * ARRAY_SIZE(pd->entry));
atomic_inc(px_used(pd));
pd->entry[idx] = to;
@@ -911,6 +912,23 @@ static inline unsigned int gen8_pd_top_count(const struct i915_address_space *vm
return (vm->total + (1ull << shift) - 1) >> shift;
}
+static inline struct i915_page_directory *
+gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx)
+{
+ struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
+
+ if (vm->top == 2)
+ return ppgtt->pd;
+ else
+ return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top));
+}
+
+static inline struct i915_page_directory *
+gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr)
+{
+ return gen8_pdp_for_page_index(vm, addr >> GEN8_PTE_SHIFT);
+}
+
static void __gen8_ppgtt_cleanup(struct i915_address_space *vm,
struct i915_page_directory *pd,
int count, int lvl)
@@ -947,8 +965,10 @@ static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
unsigned int idx, len;
+ GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
+
len = gen8_pd_range(start, end, lvl--, &idx);
- DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d}\n",
+ DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
__func__, vm, lvl + 1, start, end,
idx, len, atomic_read(px_used(pd)));
GEM_BUG_ON(!len || len >= atomic_read(px_used(pd)));
@@ -974,7 +994,7 @@ static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
u64 *vaddr;
count = gen8_pt_count(start, end);
- DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d} removing pte\n",
+ DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } removing pte\n",
__func__, vm, lvl, start, end,
gen8_pd_index(start, 0), count,
atomic_read(&pt->used));
@@ -1002,6 +1022,7 @@ static void gen8_ppgtt_clear(struct i915_address_space *vm,
{
GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
+ GEM_BUG_ON(range_overflows(start, length, vm->total));
start >>= GEN8_PTE_SHIFT;
length >>= GEN8_PTE_SHIFT;
@@ -1013,15 +1034,17 @@ static void gen8_ppgtt_clear(struct i915_address_space *vm,
static int __gen8_ppgtt_alloc(struct i915_address_space * const vm,
struct i915_page_directory * const pd,
- u64 * const start, u64 end, int lvl)
+ u64 * const start, const u64 end, int lvl)
{
const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
struct i915_page_table *alloc = NULL;
unsigned int idx, len;
int ret = 0;
+ GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
+
len = gen8_pd_range(*start, end, lvl--, &idx);
- DBG("%s(%p):{lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d}\n",
+ DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
__func__, vm, lvl + 1, *start, end,
idx, len, atomic_read(px_used(pd)));
GEM_BUG_ON(!len || (idx + len - 1) >> gen8_pd_shift(1));
@@ -1087,13 +1110,14 @@ static int __gen8_ppgtt_alloc(struct i915_address_space * const vm,
} else {
unsigned int count = gen8_pt_count(*start, end);
- DBG("%s(%p):{lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d} inserting pte\n",
+ DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } inserting pte\n",
__func__, vm, lvl, *start, end,
gen8_pd_index(*start, 0), count,
atomic_read(&pt->used));
atomic_add(count, &pt->used);
- GEM_BUG_ON(atomic_read(&pt->used) > I915_PDES);
+ /* All other pdes may be simultaneously removed */
+ GEM_BUG_ON(atomic_read(&pt->used) > 2 * I915_PDES);
*start += count;
}
} while (idx++, --len);
@@ -1112,6 +1136,7 @@ static int gen8_ppgtt_alloc(struct i915_address_space *vm,
GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
+ GEM_BUG_ON(range_overflows(start, length, vm->total));
start >>= GEN8_PTE_SHIFT;
length >>= GEN8_PTE_SHIFT;
@@ -1137,12 +1162,12 @@ static inline struct sgt_dma {
}
static __always_inline u64
-gen8_ppgtt_insert_pte_entries(struct i915_ppgtt *ppgtt,
- struct i915_page_directory *pdp,
- struct sgt_dma *iter,
- u64 idx,
- enum i915_cache_level cache_level,
- u32 flags)
+gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
+ struct i915_page_directory *pdp,
+ struct sgt_dma *iter,
+ u64 idx,
+ enum i915_cache_level cache_level,
+ u32 flags)
{
struct i915_page_directory *pd;
const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
@@ -1183,35 +1208,21 @@ gen8_ppgtt_insert_pte_entries(struct i915_ppgtt *ppgtt,
return idx;
}
-static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
- struct i915_vma *vma,
+static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
+ struct sgt_dma *iter,
enum i915_cache_level cache_level,
u32 flags)
{
- struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- struct sgt_dma iter = sgt_dma(vma);
-
- gen8_ppgtt_insert_pte_entries(ppgtt, ppgtt->pd, &iter,
- vma->node.start >> GEN8_PTE_SHIFT,
- cache_level, flags);
-
- vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
-}
-
-static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
- struct i915_page_directory *pml4,
- struct sgt_dma *iter,
- enum i915_cache_level cache_level,
- u32 flags)
-{
const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
u64 start = vma->node.start;
dma_addr_t rem = iter->sg->length;
+ GEM_BUG_ON(!i915_vm_is_4lvl(vma->vm));
+
do {
- struct i915_page_directory *pdp =
- i915_pd_entry(pml4, __gen8_pte_index(start, 3));
- struct i915_page_directory *pd =
+ struct i915_page_directory * const pdp =
+ gen8_pdp_for_page_address(vma->vm, start);
+ struct i915_page_directory * const pd =
i915_pd_entry(pdp, __gen8_pte_index(start, 2));
gen8_pte_t encode = pte_encode;
unsigned int maybe_64K = -1;
@@ -1317,26 +1328,26 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
} while (iter->sg);
}
-static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
- struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags)
+static void gen8_ppgtt_insert(struct i915_address_space *vm,
+ struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags)
{
- struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
struct sgt_dma iter = sgt_dma(vma);
- struct i915_page_directory * const pml4 = ppgtt->pd;
if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
- gen8_ppgtt_insert_huge_entries(vma, pml4, &iter, cache_level,
- flags);
- } else {
+ gen8_ppgtt_insert_huge(vma, &iter, cache_level, flags);
+ } else {
u64 idx = vma->node.start >> GEN8_PTE_SHIFT;
- while ((idx = gen8_ppgtt_insert_pte_entries(ppgtt,
- i915_pd_entry(pml4, gen8_pd_index(idx, 3)),
- &iter, idx, cache_level,
- flags)))
- ;
+ do {
+ struct i915_page_directory * const pdp =
+ gen8_pdp_for_page_index(vm, idx);
+
+ idx = gen8_ppgtt_insert_pte(ppgtt, pdp, &iter, idx,
+ cache_level, flags);
+ } while (idx);
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
}
@@ -1495,18 +1506,15 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
goto err_free_scratch;
}
- if (i915_vm_is_4lvl(&ppgtt->vm)) {
- ppgtt->vm.insert_entries = gen8_ppgtt_insert_4lvl;
- } else {
+ if (!i915_vm_is_4lvl(&ppgtt->vm)) {
if (intel_vgpu_active(i915)) {
err = gen8_preallocate_top_level_pdp(ppgtt);
if (err)
goto err_free_pd;
}
-
- ppgtt->vm.insert_entries = gen8_ppgtt_insert_3lvl;
}
+ ppgtt->vm.insert_entries = gen8_ppgtt_insert;
ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
ppgtt->vm.clear_range = gen8_ppgtt_clear;
@@ -1868,7 +1876,6 @@ static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
return ERR_PTR(-ENOMEM);
i915_active_init(i915, &vma->active, NULL, NULL);
- INIT_ACTIVE_REQUEST(&vma->last_fence);
vma->vm = &ggtt->vm;
vma->ops = &pd_vma_ops;
@@ -2036,6 +2043,27 @@ static void gtt_write_workarounds(struct intel_gt *gt)
GEN8_GAMW_ECO_DEV_RW_IA,
0,
GAMW_ECO_ENABLE_64K_IPS_FIELD);
+
+ if (IS_GEN_RANGE(i915, 8, 11)) {
+ bool can_use_gtt_cache = true;
+
+ /*
+ * According to the BSpec if we use 2M/1G pages then we also
+ * need to disable the GTT cache. At least on BDW we can see
+ * visual corruption when using 2M pages, and not disabling the
+ * GTT cache.
+ */
+ if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_2M))
+ can_use_gtt_cache = false;
+
+ /* WaGttCachingOffByDefault */
+ intel_uncore_write(uncore,
+ HSW_GTT_CACHE_EN,
+ can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
+ WARN_ON_ONCE(can_use_gtt_cache &&
+ intel_uncore_read(uncore,
+ HSW_GTT_CACHE_EN) == 0);
+ }
}
int i915_ppgtt_init_hw(struct intel_gt *gt)
@@ -2843,6 +2871,19 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
return 0;
}
+static void tgl_setup_private_ppat(struct drm_i915_private *dev_priv)
+{
+ /* TGL doesn't support LLC or AGE settings */
+ I915_WRITE(GEN12_PAT_INDEX(0), GEN8_PPAT_WB);
+ I915_WRITE(GEN12_PAT_INDEX(1), GEN8_PPAT_WC);
+ I915_WRITE(GEN12_PAT_INDEX(2), GEN8_PPAT_WT);
+ I915_WRITE(GEN12_PAT_INDEX(3), GEN8_PPAT_UC);
+ I915_WRITE(GEN12_PAT_INDEX(4), GEN8_PPAT_WB);
+ I915_WRITE(GEN12_PAT_INDEX(5), GEN8_PPAT_WB);
+ I915_WRITE(GEN12_PAT_INDEX(6), GEN8_PPAT_WB);
+ I915_WRITE(GEN12_PAT_INDEX(7), GEN8_PPAT_WB);
+}
+
static void cnl_setup_private_ppat(struct drm_i915_private *dev_priv)
{
I915_WRITE(GEN10_PAT_INDEX(0), GEN8_PPAT_WB | GEN8_PPAT_LLC);
@@ -2923,7 +2964,9 @@ static void setup_private_pat(struct drm_i915_private *dev_priv)
{
GEM_BUG_ON(INTEL_GEN(dev_priv) < 8);
- if (INTEL_GEN(dev_priv) >= 10)
+ if (INTEL_GEN(dev_priv) >= 12)
+ tgl_setup_private_ppat(dev_priv);
+ else if (INTEL_GEN(dev_priv) >= 10)
cnl_setup_private_ppat(dev_priv);
else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
chv_setup_private_ppat(dev_priv);
@@ -3085,7 +3128,8 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.vma_ops.clear_pages = clear_pages;
if (unlikely(ggtt->do_idle_maps))
- DRM_INFO("applying Ironlake quirks for intel_iommu\n");
+ dev_notice(dev_priv->drm.dev,
+ "Applying Ironlake quirks for intel_iommu\n");
return 0;
}
@@ -3146,7 +3190,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *i915)
return ret;
if (intel_vtd_active())
- DRM_INFO("VT-d active for gfx access\n");
+ dev_info(i915->drm.dev, "VT-d active for gfx access\n");
return 0;
}
@@ -3254,6 +3298,7 @@ void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
{
struct i915_vma *vma, *vn;
+ bool flush = false;
intel_gt_check_and_clear_faults(ggtt->vm.gt);
@@ -3278,10 +3323,9 @@ static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
WARN_ON(i915_vma_bind(vma,
obj ? obj->cache_level : 0,
PIN_UPDATE));
- if (obj) {
- i915_gem_object_lock(obj);
- WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
- i915_gem_object_unlock(obj);
+ if (obj) { /* only used during resume => exclusive access */
+ flush |= fetch_and_zero(&obj->write_domain);
+ obj->read_domains |= I915_GEM_DOMAIN_GTT;
}
lock:
@@ -3292,6 +3336,9 @@ lock:
ggtt->invalidate(ggtt);
mutex_unlock(&ggtt->vm.mutex);
+
+ if (flush)
+ wbinvd_on_all_cpus();
}
void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915)
@@ -3728,7 +3775,8 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
if (flags & PIN_NOEVICT)
return -ENOSPC;
- /* No free space, pick a slot at random.
+ /*
+ * No free space, pick a slot at random.
*
* There is a pathological case here using a GTT shared between
* mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt):
@@ -3756,6 +3804,9 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
if (err != -ENOSPC)
return err;
+ if (flags & PIN_NOSEARCH)
+ return -ENOSPC;
+
/* Randomly selected placement is pinned, do a search */
err = i915_gem_evict_something(vm, size, alignment, color,
start, end, flags);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 51274483502e..b97a47fc7a68 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -600,9 +600,9 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
u64 start, u64 end, unsigned int flags);
/* Flags used by pin/bind&friends. */
-#define PIN_NONBLOCK BIT_ULL(0)
-#define PIN_NONFAULT BIT_ULL(1)
-#define PIN_NOEVICT BIT_ULL(2)
+#define PIN_NOEVICT BIT_ULL(0)
+#define PIN_NOSEARCH BIT_ULL(1)
+#define PIN_NONBLOCK BIT_ULL(2)
#define PIN_MAPPABLE BIT_ULL(3)
#define PIN_ZONE_4G BIT_ULL(4)
#define PIN_HIGH BIT_ULL(5)
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
new file mode 100644
index 000000000000..5d9101376a3d
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -0,0 +1,168 @@
+/*
+ * SPDX-License-Identifier: MIT
+ */
+
+#include "gt/intel_engine_user.h"
+
+#include "i915_drv.h"
+
+int i915_getparam_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_private *i915 = to_i915(dev);
+ const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
+ drm_i915_getparam_t *param = data;
+ int value;
+
+ switch (param->param) {
+ case I915_PARAM_IRQ_ACTIVE:
+ case I915_PARAM_ALLOW_BATCHBUFFER:
+ case I915_PARAM_LAST_DISPATCH:
+ case I915_PARAM_HAS_EXEC_CONSTANTS:
+ /* Reject all old ums/dri params. */
+ return -ENODEV;
+ case I915_PARAM_CHIPSET_ID:
+ value = i915->drm.pdev->device;
+ break;
+ case I915_PARAM_REVISION:
+ value = i915->drm.pdev->revision;
+ break;
+ case I915_PARAM_NUM_FENCES_AVAIL:
+ value = i915->ggtt.num_fences;
+ break;
+ case I915_PARAM_HAS_OVERLAY:
+ value = !!i915->overlay;
+ break;
+ case I915_PARAM_HAS_BSD:
+ value = !!intel_engine_lookup_user(i915,
+ I915_ENGINE_CLASS_VIDEO, 0);
+ break;
+ case I915_PARAM_HAS_BLT:
+ value = !!intel_engine_lookup_user(i915,
+ I915_ENGINE_CLASS_COPY, 0);
+ break;
+ case I915_PARAM_HAS_VEBOX:
+ value = !!intel_engine_lookup_user(i915,
+ I915_ENGINE_CLASS_VIDEO_ENHANCE, 0);
+ break;
+ case I915_PARAM_HAS_BSD2:
+ value = !!intel_engine_lookup_user(i915,
+ I915_ENGINE_CLASS_VIDEO, 1);
+ break;
+ case I915_PARAM_HAS_LLC:
+ value = HAS_LLC(i915);
+ break;
+ case I915_PARAM_HAS_WT:
+ value = HAS_WT(i915);
+ break;
+ case I915_PARAM_HAS_ALIASING_PPGTT:
+ value = INTEL_PPGTT(i915);
+ break;
+ case I915_PARAM_HAS_SEMAPHORES:
+ value = !!(i915->caps.scheduler & I915_SCHEDULER_CAP_SEMAPHORES);
+ break;
+ case I915_PARAM_HAS_SECURE_BATCHES:
+ value = capable(CAP_SYS_ADMIN);
+ break;
+ case I915_PARAM_CMD_PARSER_VERSION:
+ value = i915_cmd_parser_get_version(i915);
+ break;
+ case I915_PARAM_SUBSLICE_TOTAL:
+ value = intel_sseu_subslice_total(sseu);
+ if (!value)
+ return -ENODEV;
+ break;
+ case I915_PARAM_EU_TOTAL:
+ value = sseu->eu_total;
+ if (!value)
+ return -ENODEV;
+ break;
+ case I915_PARAM_HAS_GPU_RESET:
+ value = i915_modparams.enable_hangcheck &&
+ intel_has_gpu_reset(i915);
+ if (value && intel_has_reset_engine(i915))
+ value = 2;
+ break;
+ case I915_PARAM_HAS_RESOURCE_STREAMER:
+ value = 0;
+ break;
+ case I915_PARAM_HAS_POOLED_EU:
+ value = HAS_POOLED_EU(i915);
+ break;
+ case I915_PARAM_MIN_EU_IN_POOL:
+ value = sseu->min_eu_in_pool;
+ break;
+ case I915_PARAM_HUC_STATUS:
+ value = intel_huc_check_status(&i915->gt.uc.huc);
+ if (value < 0)
+ return value;
+ break;
+ case I915_PARAM_MMAP_GTT_VERSION:
+ /* Though we've started our numbering from 1, and so class all
+ * earlier versions as 0, in effect their value is undefined as
+ * the ioctl will report EINVAL for the unknown param!
+ */
+ value = i915_gem_mmap_gtt_version();
+ break;
+ case I915_PARAM_HAS_SCHEDULER:
+ value = i915->caps.scheduler;
+ break;
+
+ case I915_PARAM_MMAP_VERSION:
+ /* Remember to bump this if the version changes! */
+ case I915_PARAM_HAS_GEM:
+ case I915_PARAM_HAS_PAGEFLIPPING:
+ case I915_PARAM_HAS_EXECBUF2: /* depends on GEM */
+ case I915_PARAM_HAS_RELAXED_FENCING:
+ case I915_PARAM_HAS_COHERENT_RINGS:
+ case I915_PARAM_HAS_RELAXED_DELTA:
+ case I915_PARAM_HAS_GEN7_SOL_RESET:
+ case I915_PARAM_HAS_WAIT_TIMEOUT:
+ case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
+ case I915_PARAM_HAS_PINNED_BATCHES:
+ case I915_PARAM_HAS_EXEC_NO_RELOC:
+ case I915_PARAM_HAS_EXEC_HANDLE_LUT:
+ case I915_PARAM_HAS_COHERENT_PHYS_GTT:
+ case I915_PARAM_HAS_EXEC_SOFTPIN:
+ case I915_PARAM_HAS_EXEC_ASYNC:
+ case I915_PARAM_HAS_EXEC_FENCE:
+ case I915_PARAM_HAS_EXEC_CAPTURE:
+ case I915_PARAM_HAS_EXEC_BATCH_FIRST:
+ case I915_PARAM_HAS_EXEC_FENCE_ARRAY:
+ case I915_PARAM_HAS_EXEC_SUBMIT_FENCE:
+ /* For the time being all of these are always true;
+ * if some supported hardware does not have one of these
+ * features this value needs to be provided from
+ * INTEL_INFO(), a feature macro, or similar.
+ */
+ value = 1;
+ break;
+ case I915_PARAM_HAS_CONTEXT_ISOLATION:
+ value = intel_engines_has_context_isolation(i915);
+ break;
+ case I915_PARAM_SLICE_MASK:
+ value = sseu->slice_mask;
+ if (!value)
+ return -ENODEV;
+ break;
+ case I915_PARAM_SUBSLICE_MASK:
+ value = sseu->subslice_mask[0];
+ if (!value)
+ return -ENODEV;
+ break;
+ case I915_PARAM_CS_TIMESTAMP_FREQUENCY:
+ value = 1000 * RUNTIME_INFO(i915)->cs_timestamp_frequency_khz;
+ break;
+ case I915_PARAM_MMAP_GTT_COHERENT:
+ value = INTEL_INFO(i915)->has_coherent_ggtt;
+ break;
+ default:
+ DRM_DEBUG("Unknown parameter %d\n", param->param);
+ return -EINVAL;
+ }
+
+ if (put_user(value, param->value))
+ return -EFAULT;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_globals.c b/drivers/gpu/drm/i915/i915_globals.c
index 2d5fcba98841..be127cd28931 100644
--- a/drivers/gpu/drm/i915/i915_globals.c
+++ b/drivers/gpu/drm/i915/i915_globals.c
@@ -62,6 +62,7 @@ static void __i915_globals_cleanup(void)
static __initconst int (* const initfn[])(void) = {
i915_global_active_init,
+ i915_global_buddy_init,
i915_global_context_init,
i915_global_gem_context_init,
i915_global_objects_init,
diff --git a/drivers/gpu/drm/i915/i915_globals.h b/drivers/gpu/drm/i915/i915_globals.h
index 2d199f411a4a..b2f5cd9b9b1a 100644
--- a/drivers/gpu/drm/i915/i915_globals.h
+++ b/drivers/gpu/drm/i915/i915_globals.h
@@ -27,6 +27,7 @@ void i915_globals_exit(void);
/* constructors */
int i915_global_active_init(void);
+int i915_global_buddy_init(void);
int i915_global_context_init(void);
int i915_global_gem_context_init(void);
int i915_global_objects_init(void);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 0c0f255000c2..e284bd76fa86 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -43,33 +43,13 @@
#include "i915_drv.h"
#include "i915_gpu_error.h"
+#include "i915_memcpy.h"
#include "i915_scatterlist.h"
#include "intel_csr.h"
#define ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
#define ATOMIC_MAYFAIL (GFP_ATOMIC | __GFP_NOWARN)
-static inline const struct intel_engine_cs *
-engine_lookup(const struct drm_i915_private *i915, unsigned int id)
-{
- if (id >= I915_NUM_ENGINES)
- return NULL;
-
- return i915->engine[id];
-}
-
-static inline const char *
-__engine_name(const struct intel_engine_cs *engine)
-{
- return engine ? engine->name : "";
-}
-
-static const char *
-engine_name(const struct drm_i915_private *i915, unsigned int id)
-{
- return __engine_name(engine_lookup(i915, id));
-}
-
static void __sg_set_buf(struct scatterlist *sg,
void *addr, unsigned int len, loff_t it)
{
@@ -447,7 +427,7 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m,
err_printf(m, " INSTDONE: 0x%08x\n",
ee->instdone.instdone);
- if (ee->engine_id != RCS0 || INTEL_GEN(m->i915) <= 3)
+ if (ee->engine->class != RENDER_CLASS || INTEL_GEN(m->i915) <= 3)
return;
err_printf(m, " SC_INSTDONE: 0x%08x\n",
@@ -501,8 +481,7 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
{
int n;
- err_printf(m, "%s command stream:\n",
- engine_name(m->i915, ee->engine_id));
+ err_printf(m, "%s command stream:\n", ee->engine->name);
err_printf(m, " IDLE?: %s\n", yesno(ee->idle));
err_printf(m, " START: 0x%08x\n", ee->start);
err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head);
@@ -578,9 +557,9 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
}
static void print_error_obj(struct drm_i915_error_state_buf *m,
- struct intel_engine_cs *engine,
+ const struct intel_engine_cs *engine,
const char *name,
- struct drm_i915_error_object *obj)
+ const struct drm_i915_error_object *obj)
{
char out[ASCII85_BUFSZ];
int page;
@@ -677,7 +656,7 @@ static void err_free_sgl(struct scatterlist *sgl)
static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
struct i915_gpu_state *error)
{
- struct drm_i915_error_object *obj;
+ const struct drm_i915_error_engine *ee;
struct timespec64 ts;
int i, j;
@@ -686,6 +665,7 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
err_printf(m, "Kernel: %s %s\n",
init_utsname()->release,
init_utsname()->machine);
+ err_printf(m, "Driver: %s\n", DRIVER_DATE);
ts = ktime_to_timespec64(error->time);
err_printf(m, "Time: %lld s %ld us\n",
(s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
@@ -701,15 +681,12 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
jiffies_to_msecs(jiffies - error->capture),
jiffies_to_msecs(error->capture - error->epoch));
- for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
- if (!error->engine[i].context.pid)
- continue;
-
+ for (ee = error->engine; ee; ee = ee->next)
err_printf(m, "Active process (on ring %s): %s [%d]\n",
- engine_name(m->i915, i),
- error->engine[i].context.comm,
- error->engine[i].context.pid);
- }
+ ee->engine->name,
+ ee->context.comm,
+ ee->context.pid);
+
err_printf(m, "Reset count: %u\n", error->reset_count);
err_printf(m, "Suspend count: %u\n", error->suspend_count);
err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform));
@@ -745,30 +722,27 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
for (i = 0; i < error->nfence; i++)
err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
- if (INTEL_GEN(m->i915) >= 6) {
+ if (IS_GEN_RANGE(m->i915, 6, 11)) {
err_printf(m, "ERROR: 0x%08x\n", error->error);
-
- if (INTEL_GEN(m->i915) >= 8)
- err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
- error->fault_data1, error->fault_data0);
-
err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
}
+ if (INTEL_GEN(m->i915) >= 8)
+ err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
+ error->fault_data1, error->fault_data0);
+
if (IS_GEN(m->i915, 7))
err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
- for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
- if (error->engine[i].engine_id != -1)
- error_print_engine(m, &error->engine[i], error->epoch);
- }
+ for (ee = error->engine; ee; ee = ee->next)
+ error_print_engine(m, ee, error->epoch);
- for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
- const struct drm_i915_error_engine *ee = &error->engine[i];
+ for (ee = error->engine; ee; ee = ee->next) {
+ const struct drm_i915_error_object *obj;
obj = ee->batchbuffer;
if (obj) {
- err_puts(m, m->i915->engine[i]->name);
+ err_puts(m, ee->engine->name);
if (ee->context.pid)
err_printf(m, " (submitted by %s [%d])",
ee->context.comm,
@@ -776,16 +750,15 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
upper_32_bits(obj->gtt_offset),
lower_32_bits(obj->gtt_offset));
- print_error_obj(m, m->i915->engine[i], NULL, obj);
+ print_error_obj(m, ee->engine, NULL, obj);
}
for (j = 0; j < ee->user_bo_count; j++)
- print_error_obj(m, m->i915->engine[i],
- "user", ee->user_bo[j]);
+ print_error_obj(m, ee->engine, "user", ee->user_bo[j]);
if (ee->num_requests) {
err_printf(m, "%s --- %d requests\n",
- m->i915->engine[i]->name,
+ ee->engine->name,
ee->num_requests);
for (j = 0; j < ee->num_requests; j++)
error_print_request(m, " ",
@@ -793,22 +766,13 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
error->epoch);
}
- print_error_obj(m, m->i915->engine[i],
- "ringbuffer", ee->ringbuffer);
-
- print_error_obj(m, m->i915->engine[i],
- "HW Status", ee->hws_page);
-
- print_error_obj(m, m->i915->engine[i],
- "HW context", ee->ctx);
-
- print_error_obj(m, m->i915->engine[i],
- "WA context", ee->wa_ctx);
-
- print_error_obj(m, m->i915->engine[i],
+ print_error_obj(m, ee->engine, "ringbuffer", ee->ringbuffer);
+ print_error_obj(m, ee->engine, "HW Status", ee->hws_page);
+ print_error_obj(m, ee->engine, "HW context", ee->ctx);
+ print_error_obj(m, ee->engine, "WA context", ee->wa_ctx);
+ print_error_obj(m, ee->engine,
"WA batchbuffer", ee->wa_batchbuffer);
-
- print_error_obj(m, m->i915->engine[i],
+ print_error_obj(m, ee->engine,
"NULL context", ee->default_state);
}
@@ -957,13 +921,15 @@ void __i915_gpu_state_free(struct kref *error_ref)
{
struct i915_gpu_state *error =
container_of(error_ref, typeof(*error), ref);
- long i, j;
+ long i;
- for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
- struct drm_i915_error_engine *ee = &error->engine[i];
+ while (error->engine) {
+ struct drm_i915_error_engine *ee = error->engine;
- for (j = 0; j < ee->user_bo_count; j++)
- i915_error_object_free(ee->user_bo[j]);
+ error->engine = ee->next;
+
+ for (i = 0; i < ee->user_bo_count; i++)
+ i915_error_object_free(ee->user_bo[i]);
kfree(ee->user_bo);
i915_error_object_free(ee->batchbuffer);
@@ -974,6 +940,7 @@ void __i915_gpu_state_free(struct kref *error_ref)
i915_error_object_free(ee->wa_ctx);
kfree(ee->requests);
+ kfree(ee);
}
kfree(error->overlay);
@@ -1055,23 +1022,17 @@ i915_error_object_create(struct drm_i915_private *i915,
*
* It's only a small step better than a random number in its current form.
*/
-static u32 i915_error_generate_code(struct i915_gpu_state *error,
- intel_engine_mask_t engine_mask)
+static u32 i915_error_generate_code(struct i915_gpu_state *error)
{
+ const struct drm_i915_error_engine *ee = error->engine;
+
/*
* IPEHR would be an ideal way to detect errors, as it's the gross
* measure of "the command that hung." However, has some very common
* synchronization commands which almost always appear in the case
* strictly a client bug. Use instdone to differentiate those some.
*/
- if (engine_mask) {
- struct drm_i915_error_engine *ee =
- &error->engine[ffs(engine_mask)];
-
- return ee->ipehr ^ ee->instdone.instdone;
- }
-
- return 0;
+ return ee ? ee->ipehr ^ ee->instdone.instdone : 0;
}
static void gem_record_fences(struct i915_gpu_state *error)
@@ -1106,7 +1067,10 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
if (INTEL_GEN(dev_priv) >= 6) {
ee->rc_psmi = ENGINE_READ(engine, RING_PSMI_CTL);
- if (INTEL_GEN(dev_priv) >= 8)
+
+ if (INTEL_GEN(dev_priv) >= 12)
+ ee->fault_reg = I915_READ(GEN12_RING_FAULT_REG);
+ else if (INTEL_GEN(dev_priv) >= 8)
ee->fault_reg = I915_READ(GEN8_RING_FAULT_REG);
else
ee->fault_reg = GEN6_RING_FAULT_REG_READ(engine);
@@ -1282,9 +1246,11 @@ static void error_record_engine_execlists(const struct intel_engine_cs *engine,
ee->num_ports = n;
}
-static void record_context(struct drm_i915_error_context *e,
- struct i915_gem_context *ctx)
+static bool record_context(struct drm_i915_error_context *e,
+ const struct i915_request *rq)
{
+ const struct i915_gem_context *ctx = rq->gem_context;
+
if (ctx->pid) {
struct task_struct *task;
@@ -1301,6 +1267,8 @@ static void record_context(struct drm_i915_error_context *e,
e->sched_attr = ctx->sched;
e->guilty = atomic_read(&ctx->guilty_count);
e->active = atomic_read(&ctx->active_count);
+
+ return i915_gem_context_no_error_capture(ctx);
}
struct capture_vma {
@@ -1395,74 +1363,67 @@ static void
gem_record_rings(struct i915_gpu_state *error, struct compress *compress)
{
struct drm_i915_private *i915 = error->i915;
- int i;
+ struct intel_engine_cs *engine;
+ struct drm_i915_error_engine *ee;
+
+ ee = kzalloc(sizeof(*ee), GFP_KERNEL);
+ if (!ee)
+ return;
- for (i = 0; i < I915_NUM_ENGINES; i++) {
- struct intel_engine_cs *engine = i915->engine[i];
- struct drm_i915_error_engine *ee = &error->engine[i];
+ for_each_uabi_engine(engine, i915) {
struct capture_vma *capture = NULL;
struct i915_request *request;
unsigned long flags;
- ee->engine_id = -1;
-
- if (!engine)
- continue;
-
- ee->engine_id = i;
-
/* Refill our page pool before entering atomic section */
pool_refill(&compress->pool, ALLOW_FAIL);
- error_record_engine_registers(error, engine, ee);
- error_record_engine_execlists(engine, ee);
-
spin_lock_irqsave(&engine->active.lock, flags);
request = intel_engine_find_active_request(engine);
- if (request) {
- struct i915_gem_context *ctx = request->gem_context;
- struct intel_ring *ring = request->ring;
-
- record_context(&ee->context, ctx);
-
- /*
- * We need to copy these to an anonymous buffer
- * as the simplest method to avoid being overwritten
- * by userspace.
- */
- capture = capture_vma(capture,
- request->batch,
- &ee->batchbuffer);
+ if (!request) {
+ spin_unlock_irqrestore(&engine->active.lock, flags);
+ continue;
+ }
- if (HAS_BROKEN_CS_TLB(i915))
- capture = capture_vma(capture,
- engine->gt->scratch,
- &ee->wa_batchbuffer);
+ error->simulated |= record_context(&ee->context, request);
- capture = request_record_user_bo(request, ee, capture);
+ /*
+ * We need to copy these to an anonymous buffer
+ * as the simplest method to avoid being overwritten
+ * by userspace.
+ */
+ capture = capture_vma(capture,
+ request->batch,
+ &ee->batchbuffer);
+ if (HAS_BROKEN_CS_TLB(i915))
capture = capture_vma(capture,
- request->hw_context->state,
- &ee->ctx);
+ engine->gt->scratch,
+ &ee->wa_batchbuffer);
- capture = capture_vma(capture,
- ring->vma,
- &ee->ringbuffer);
+ capture = request_record_user_bo(request, ee, capture);
- error->simulated |=
- i915_gem_context_no_error_capture(ctx);
+ capture = capture_vma(capture,
+ request->hw_context->state,
+ &ee->ctx);
- ee->rq_head = request->head;
- ee->rq_post = request->postfix;
- ee->rq_tail = request->tail;
+ capture = capture_vma(capture,
+ request->ring->vma,
+ &ee->ringbuffer);
- ee->cpu_ring_head = ring->head;
- ee->cpu_ring_tail = ring->tail;
+ ee->cpu_ring_head = request->ring->head;
+ ee->cpu_ring_tail = request->ring->tail;
- engine_record_requests(engine, request, ee);
- }
+ ee->rq_head = request->head;
+ ee->rq_post = request->postfix;
+ ee->rq_tail = request->tail;
+
+ engine_record_requests(engine, request, ee);
spin_unlock_irqrestore(&engine->active.lock, flags);
+ error_record_engine_registers(error, engine, ee);
+ error_record_engine_execlists(engine, ee);
+
while (capture) {
struct capture_vma *this = capture;
struct i915_vma *vma = *this->slot;
@@ -1489,7 +1450,18 @@ gem_record_rings(struct i915_gpu_state *error, struct compress *compress)
ee->default_state =
capture_object(i915, engine->default_state, compress);
+
+ ee->engine = engine;
+
+ ee->next = error->engine;
+ error->engine = ee;
+
+ ee = kzalloc(sizeof(*ee), GFP_KERNEL);
+ if (!ee)
+ return;
}
+
+ kfree(ee);
}
static void
@@ -1503,8 +1475,8 @@ capture_uc_state(struct i915_gpu_state *error, struct compress *compress)
if (!error->device_info.has_gt_uc)
return;
- error_uc->guc_fw = uc->guc.fw;
- error_uc->huc_fw = uc->huc.fw;
+ memcpy(&error_uc->guc_fw, &uc->guc.fw, sizeof(uc->guc.fw));
+ memcpy(&error_uc->huc_fw, &uc->huc.fw, sizeof(uc->huc.fw));
/* Non-default firmware paths will be specified by the modparam.
* As modparams are generally accesible from the userspace make
@@ -1542,7 +1514,12 @@ static void capture_reg_state(struct i915_gpu_state *error)
if (IS_GEN(i915, 7))
error->err_int = intel_uncore_read(uncore, GEN7_ERR_INT);
- if (INTEL_GEN(i915) >= 8) {
+ if (INTEL_GEN(i915) >= 12) {
+ error->fault_data0 = intel_uncore_read(uncore,
+ GEN12_FAULT_TLB_DATA0);
+ error->fault_data1 = intel_uncore_read(uncore,
+ GEN12_FAULT_TLB_DATA1);
+ } else if (INTEL_GEN(i915) >= 8) {
error->fault_data0 = intel_uncore_read(uncore,
GEN8_FAULT_TLB_DATA0);
error->fault_data1 = intel_uncore_read(uncore,
@@ -1561,8 +1538,10 @@ static void capture_reg_state(struct i915_gpu_state *error)
if (INTEL_GEN(i915) >= 6) {
error->derrmr = intel_uncore_read(uncore, DERRMR);
- error->error = intel_uncore_read(uncore, ERROR_GEN6);
- error->done_reg = intel_uncore_read(uncore, DONE_REG);
+ if (INTEL_GEN(i915) < 12) {
+ error->error = intel_uncore_read(uncore, ERROR_GEN6);
+ error->done_reg = intel_uncore_read(uncore, DONE_REG);
+ }
}
if (INTEL_GEN(i915) >= 5)
@@ -1618,24 +1597,18 @@ error_msg(struct i915_gpu_state *error,
intel_engine_mask_t engines, const char *msg)
{
int len;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(error->engine); i++)
- if (!error->engine[i].context.pid)
- engines &= ~BIT(i);
len = scnprintf(error->error_msg, sizeof(error->error_msg),
"GPU HANG: ecode %d:%x:0x%08x",
INTEL_GEN(error->i915), engines,
- i915_error_generate_code(error, engines));
- if (engines) {
+ i915_error_generate_code(error));
+ if (error->engine) {
/* Just show the first executing process, more is confusing */
- i = __ffs(engines);
len += scnprintf(error->error_msg + len,
sizeof(error->error_msg) - len,
", in %s [%d]",
- error->engine[i].context.comm,
- error->engine[i].context.pid);
+ error->engine->context.comm,
+ error->engine->context.pid);
}
if (msg)
len += scnprintf(error->error_msg + len,
@@ -1676,12 +1649,10 @@ static void capture_params(struct i915_gpu_state *error)
static unsigned long capture_find_epoch(const struct i915_gpu_state *error)
{
+ const struct drm_i915_error_engine *ee;
unsigned long epoch = error->capture;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
- const struct drm_i915_error_engine *ee = &error->engine[i];
+ for (ee = error->engine; ee; ee = ee->next) {
if (ee->hangcheck_timestamp &&
time_before(ee->hangcheck_timestamp, epoch))
epoch = ee->hangcheck_timestamp;
@@ -1794,15 +1765,14 @@ void i915_capture_error_state(struct drm_i915_private *i915,
return;
}
- if (!warned &&
+ if (!xchg(&warned, true) &&
ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
- DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
- DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
- DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
- DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
- DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
- i915->drm.primary->index);
- warned = true;
+ pr_info("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
+ pr_info("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
+ pr_info("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
+ pr_info("The GPU crash dump is required to analyze GPU hangs, so please always attach it.\n");
+ pr_info("GPU crash dump saved to /sys/class/drm/card%d/error\n",
+ i915->drm.primary->index);
}
}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index a24c35107d16..df9f57766626 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -81,7 +81,8 @@ struct i915_gpu_state {
struct intel_display_error_state *display;
struct drm_i915_error_engine {
- int engine_id;
+ const struct intel_engine_cs *engine;
+
/* Software tracked state */
bool idle;
unsigned long hangcheck_timestamp;
@@ -158,7 +159,9 @@ struct i915_gpu_state {
u32 pp_dir_base;
};
} vm_info;
- } engine[I915_NUM_ENGINES];
+
+ struct drm_i915_error_engine *next;
+ } *engine;
struct scatterlist *sgl, *fit;
};
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index a17d4fd17962..37e3dd3c1a9d 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -37,17 +37,19 @@
#include <drm/drm_irq.h>
#include <drm/i915_drm.h>
+#include "display/intel_display_types.h"
#include "display/intel_fifo_underrun.h"
#include "display/intel_hotplug.h"
#include "display/intel_lpe_audio.h"
#include "display/intel_psr.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_irq.h"
+#include "gt/intel_gt_pm_irq.h"
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_trace.h"
-#include "intel_drv.h"
#include "intel_pm.h"
/**
@@ -58,6 +60,8 @@
* and related files, but that will be described in separate chapters.
*/
+typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);
+
static const u32 hpd_ilk[HPD_NUM_PINS] = {
[HPD_PORT_A] = DE_DP_A_HOTPLUG,
};
@@ -135,6 +139,15 @@ static const u32 hpd_gen11[HPD_NUM_PINS] = {
[HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG
};
+static const u32 hpd_gen12[HPD_NUM_PINS] = {
+ [HPD_PORT_D] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
+ [HPD_PORT_E] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
+ [HPD_PORT_F] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
+ [HPD_PORT_G] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG,
+ [HPD_PORT_H] = GEN12_TC5_HOTPLUG | GEN12_TBT5_HOTPLUG,
+ [HPD_PORT_I] = GEN12_TC6_HOTPLUG | GEN12_TBT6_HOTPLUG
+};
+
static const u32 hpd_icp[HPD_NUM_PINS] = {
[HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
[HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
@@ -150,8 +163,20 @@ static const u32 hpd_mcc[HPD_NUM_PINS] = {
[HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP
};
-static void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
- i915_reg_t iir, i915_reg_t ier)
+static const u32 hpd_tgp[HPD_NUM_PINS] = {
+ [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
+ [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
+ [HPD_PORT_C] = SDE_DDIC_HOTPLUG_TGP,
+ [HPD_PORT_D] = SDE_TC1_HOTPLUG_ICP,
+ [HPD_PORT_E] = SDE_TC2_HOTPLUG_ICP,
+ [HPD_PORT_F] = SDE_TC3_HOTPLUG_ICP,
+ [HPD_PORT_G] = SDE_TC4_HOTPLUG_ICP,
+ [HPD_PORT_H] = SDE_TC5_HOTPLUG_TGP,
+ [HPD_PORT_I] = SDE_TC6_HOTPLUG_TGP,
+};
+
+void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
+ i915_reg_t iir, i915_reg_t ier)
{
intel_uncore_write(uncore, imr, 0xffffffff);
intel_uncore_posting_read(uncore, imr);
@@ -165,7 +190,7 @@ static void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
intel_uncore_posting_read(uncore, iir);
}
-static void gen2_irq_reset(struct intel_uncore *uncore)
+void gen2_irq_reset(struct intel_uncore *uncore)
{
intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
intel_uncore_posting_read16(uncore, GEN2_IMR);
@@ -179,19 +204,6 @@ static void gen2_irq_reset(struct intel_uncore *uncore)
intel_uncore_posting_read16(uncore, GEN2_IIR);
}
-#define GEN8_IRQ_RESET_NDX(uncore, type, which) \
-({ \
- unsigned int which_ = which; \
- gen3_irq_reset((uncore), GEN8_##type##_IMR(which_), \
- GEN8_##type##_IIR(which_), GEN8_##type##_IER(which_)); \
-})
-
-#define GEN3_IRQ_RESET(uncore, type) \
- gen3_irq_reset((uncore), type##IMR, type##IIR, type##IER)
-
-#define GEN2_IRQ_RESET(uncore) \
- gen2_irq_reset(uncore)
-
/*
* We should clear IMR at preinstall/uninstall, and just check at postinstall.
*/
@@ -225,10 +237,10 @@ static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
intel_uncore_posting_read16(uncore, GEN2_IIR);
}
-static void gen3_irq_init(struct intel_uncore *uncore,
- i915_reg_t imr, u32 imr_val,
- i915_reg_t ier, u32 ier_val,
- i915_reg_t iir)
+void gen3_irq_init(struct intel_uncore *uncore,
+ i915_reg_t imr, u32 imr_val,
+ i915_reg_t ier, u32 ier_val,
+ i915_reg_t iir)
{
gen3_assert_iir_is_zero(uncore, iir);
@@ -237,8 +249,8 @@ static void gen3_irq_init(struct intel_uncore *uncore,
intel_uncore_posting_read(uncore, imr);
}
-static void gen2_irq_init(struct intel_uncore *uncore,
- u32 imr_val, u32 ier_val)
+void gen2_irq_init(struct intel_uncore *uncore,
+ u32 imr_val, u32 ier_val)
{
gen2_assert_iir_is_zero(uncore);
@@ -247,27 +259,6 @@ static void gen2_irq_init(struct intel_uncore *uncore,
intel_uncore_posting_read16(uncore, GEN2_IMR);
}
-#define GEN8_IRQ_INIT_NDX(uncore, type, which, imr_val, ier_val) \
-({ \
- unsigned int which_ = which; \
- gen3_irq_init((uncore), \
- GEN8_##type##_IMR(which_), imr_val, \
- GEN8_##type##_IER(which_), ier_val, \
- GEN8_##type##_IIR(which_)); \
-})
-
-#define GEN3_IRQ_INIT(uncore, type, imr_val, ier_val) \
- gen3_irq_init((uncore), \
- type##IMR, imr_val, \
- type##IER, ier_val, \
- type##IIR)
-
-#define GEN2_IRQ_INIT(uncore, imr_val, ier_val) \
- gen2_irq_init((uncore), imr_val, ier_val)
-
-static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
-static void guc_irq_handler(struct intel_guc *guc, u16 guc_iir);
-
/* For display hotplug interrupt */
static inline void
i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
@@ -306,41 +297,6 @@ void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
spin_unlock_irq(&dev_priv->irq_lock);
}
-static u32
-gen11_gt_engine_identity(struct intel_gt *gt,
- const unsigned int bank, const unsigned int bit);
-
-static bool gen11_reset_one_iir(struct intel_gt *gt,
- const unsigned int bank,
- const unsigned int bit)
-{
- void __iomem * const regs = gt->uncore->regs;
- u32 dw;
-
- lockdep_assert_held(&gt->i915->irq_lock);
-
- dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
- if (dw & BIT(bit)) {
- /*
- * According to the BSpec, DW_IIR bits cannot be cleared without
- * first servicing the Selector & Shared IIR registers.
- */
- gen11_gt_engine_identity(gt, bank, bit);
-
- /*
- * We locked GT INT DW by reading it. If we want to (try
- * to) recover from this succesfully, we need to clear
- * our bit, otherwise we are locking the register for
- * everybody.
- */
- raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit));
-
- return true;
- }
-
- return false;
-}
-
/**
* ilk_update_display_irq - update DEIMR
* @dev_priv: driver private
@@ -371,39 +327,6 @@ void ilk_update_display_irq(struct drm_i915_private *dev_priv,
}
}
-/**
- * ilk_update_gt_irq - update GTIMR
- * @dev_priv: driver private
- * @interrupt_mask: mask of interrupt bits to update
- * @enabled_irq_mask: mask of interrupt bits to enable
- */
-static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
- u32 interrupt_mask,
- u32 enabled_irq_mask)
-{
- lockdep_assert_held(&dev_priv->irq_lock);
-
- WARN_ON(enabled_irq_mask & ~interrupt_mask);
-
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
- return;
-
- dev_priv->gt_irq_mask &= ~interrupt_mask;
- dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-}
-
-void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
-{
- ilk_update_gt_irq(dev_priv, mask, mask);
- intel_uncore_posting_read_fw(&dev_priv->uncore, GTIMR);
-}
-
-void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
-{
- ilk_update_gt_irq(dev_priv, mask, 0);
-}
-
static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
{
WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11);
@@ -411,143 +334,28 @@ static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
}
-static void write_pm_imr(struct intel_gt *gt)
-{
- struct drm_i915_private *i915 = gt->i915;
- struct intel_uncore *uncore = gt->uncore;
- u32 mask = gt->pm_imr;
- i915_reg_t reg;
-
- if (INTEL_GEN(i915) >= 11) {
- reg = GEN11_GPM_WGBOXPERF_INTR_MASK;
- /* pm is in upper half */
- mask = mask << 16;
- } else if (INTEL_GEN(i915) >= 8) {
- reg = GEN8_GT_IMR(2);
- } else {
- reg = GEN6_PMIMR;
- }
-
- intel_uncore_write(uncore, reg, mask);
- intel_uncore_posting_read(uncore, reg);
-}
-
-static void write_pm_ier(struct intel_gt *gt)
-{
- struct drm_i915_private *i915 = gt->i915;
- struct intel_uncore *uncore = gt->uncore;
- u32 mask = gt->pm_ier;
- i915_reg_t reg;
-
- if (INTEL_GEN(i915) >= 11) {
- reg = GEN11_GPM_WGBOXPERF_INTR_ENABLE;
- /* pm is in upper half */
- mask = mask << 16;
- } else if (INTEL_GEN(i915) >= 8) {
- reg = GEN8_GT_IER(2);
- } else {
- reg = GEN6_PMIER;
- }
-
- intel_uncore_write(uncore, reg, mask);
-}
-
-/**
- * snb_update_pm_irq - update GEN6_PMIMR
- * @gt: gt for the interrupts
- * @interrupt_mask: mask of interrupt bits to update
- * @enabled_irq_mask: mask of interrupt bits to enable
- */
-static void snb_update_pm_irq(struct intel_gt *gt,
- u32 interrupt_mask,
- u32 enabled_irq_mask)
-{
- u32 new_val;
-
- WARN_ON(enabled_irq_mask & ~interrupt_mask);
-
- lockdep_assert_held(&gt->i915->irq_lock);
-
- new_val = gt->pm_imr;
- new_val &= ~interrupt_mask;
- new_val |= (~enabled_irq_mask & interrupt_mask);
-
- if (new_val != gt->pm_imr) {
- gt->pm_imr = new_val;
- write_pm_imr(gt);
- }
-}
-
-void gen6_unmask_pm_irq(struct intel_gt *gt, u32 mask)
-{
- if (WARN_ON(!intel_irqs_enabled(gt->i915)))
- return;
-
- snb_update_pm_irq(gt, mask, mask);
-}
-
-static void __gen6_mask_pm_irq(struct intel_gt *gt, u32 mask)
-{
- snb_update_pm_irq(gt, mask, 0);
-}
-
-void gen6_mask_pm_irq(struct intel_gt *gt, u32 mask)
-{
- if (WARN_ON(!intel_irqs_enabled(gt->i915)))
- return;
-
- __gen6_mask_pm_irq(gt, mask);
-}
-
-static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
-{
- i915_reg_t reg = gen6_pm_iir(dev_priv);
-
- lockdep_assert_held(&dev_priv->irq_lock);
-
- I915_WRITE(reg, reset_mask);
- I915_WRITE(reg, reset_mask);
- POSTING_READ(reg);
-}
-
-static void gen6_enable_pm_irq(struct intel_gt *gt, u32 enable_mask)
-{
- lockdep_assert_held(&gt->i915->irq_lock);
-
- gt->pm_ier |= enable_mask;
- write_pm_ier(gt);
- gen6_unmask_pm_irq(gt, enable_mask);
- /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
-}
-
-static void gen6_disable_pm_irq(struct intel_gt *gt, u32 disable_mask)
-{
- lockdep_assert_held(&gt->i915->irq_lock);
-
- gt->pm_ier &= ~disable_mask;
- __gen6_mask_pm_irq(gt, disable_mask);
- write_pm_ier(gt);
- /* though a barrier is missing here, but don't really need a one */
-}
-
void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv)
{
- spin_lock_irq(&dev_priv->irq_lock);
+ struct intel_gt *gt = &dev_priv->gt;
- while (gen11_reset_one_iir(&dev_priv->gt, 0, GEN11_GTPM))
+ spin_lock_irq(&gt->irq_lock);
+
+ while (gen11_gt_reset_one_iir(gt, 0, GEN11_GTPM))
;
dev_priv->gt_pm.rps.pm_iir = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&gt->irq_lock);
}
void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
{
- spin_lock_irq(&dev_priv->irq_lock);
- gen6_reset_pm_iir(dev_priv, GEN6_PM_RPS_EVENTS);
+ struct intel_gt *gt = &dev_priv->gt;
+
+ spin_lock_irq(&gt->irq_lock);
+ gen6_gt_pm_reset_iir(gt, GEN6_PM_RPS_EVENTS);
dev_priv->gt_pm.rps.pm_iir = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&gt->irq_lock);
}
void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
@@ -558,35 +366,41 @@ void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
if (READ_ONCE(rps->interrupts_enabled))
return;
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&gt->irq_lock);
WARN_ON_ONCE(rps->pm_iir);
if (INTEL_GEN(dev_priv) >= 11)
- WARN_ON_ONCE(gen11_reset_one_iir(gt, 0, GEN11_GTPM));
+ WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GTPM));
else
WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
rps->interrupts_enabled = true;
- gen6_enable_pm_irq(gt, dev_priv->pm_rps_events);
+ gen6_gt_pm_enable_irq(gt, dev_priv->pm_rps_events);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&gt->irq_lock);
+}
+
+u32 gen6_sanitize_rps_pm_mask(const struct drm_i915_private *i915, u32 mask)
+{
+ return mask & ~i915->gt_pm.rps.pm_intrmsk_mbz;
}
void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
{
struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ struct intel_gt *gt = &dev_priv->gt;
if (!READ_ONCE(rps->interrupts_enabled))
return;
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&gt->irq_lock);
rps->interrupts_enabled = false;
I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
- gen6_disable_pm_irq(&dev_priv->gt, GEN6_PM_RPS_EVENTS);
+ gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&gt->irq_lock);
intel_synchronize_irq(dev_priv);
/* Now that we will not be generating any more work, flush any
@@ -604,46 +418,44 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
void gen9_reset_guc_interrupts(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
- struct drm_i915_private *i915 = gt->i915;
- assert_rpm_wakelock_held(&i915->runtime_pm);
+ assert_rpm_wakelock_held(&gt->i915->runtime_pm);
- spin_lock_irq(&i915->irq_lock);
- gen6_reset_pm_iir(i915, gt->pm_guc_events);
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&gt->irq_lock);
+ gen6_gt_pm_reset_iir(gt, gt->pm_guc_events);
+ spin_unlock_irq(&gt->irq_lock);
}
void gen9_enable_guc_interrupts(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
- struct drm_i915_private *i915 = gt->i915;
- assert_rpm_wakelock_held(&i915->runtime_pm);
+ assert_rpm_wakelock_held(&gt->i915->runtime_pm);
- spin_lock_irq(&i915->irq_lock);
+ spin_lock_irq(&gt->irq_lock);
if (!guc->interrupts.enabled) {
- WARN_ON_ONCE(intel_uncore_read(gt->uncore, gen6_pm_iir(i915)) &
+ WARN_ON_ONCE(intel_uncore_read(gt->uncore,
+ gen6_pm_iir(gt->i915)) &
gt->pm_guc_events);
guc->interrupts.enabled = true;
- gen6_enable_pm_irq(gt, gt->pm_guc_events);
+ gen6_gt_pm_enable_irq(gt, gt->pm_guc_events);
}
- spin_unlock_irq(&i915->irq_lock);
+ spin_unlock_irq(&gt->irq_lock);
}
void gen9_disable_guc_interrupts(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
- struct drm_i915_private *i915 = gt->i915;
- assert_rpm_wakelock_held(&i915->runtime_pm);
+ assert_rpm_wakelock_held(&gt->i915->runtime_pm);
- spin_lock_irq(&i915->irq_lock);
+ spin_lock_irq(&gt->irq_lock);
guc->interrupts.enabled = false;
- gen6_disable_pm_irq(gt, gt->pm_guc_events);
+ gen6_gt_pm_disable_irq(gt, gt->pm_guc_events);
- spin_unlock_irq(&i915->irq_lock);
- intel_synchronize_irq(i915);
+ spin_unlock_irq(&gt->irq_lock);
+ intel_synchronize_irq(gt->i915);
gen9_reset_guc_interrupts(guc);
}
@@ -651,42 +463,40 @@ void gen9_disable_guc_interrupts(struct intel_guc *guc)
void gen11_reset_guc_interrupts(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
- struct drm_i915_private *i915 = gt->i915;
- spin_lock_irq(&i915->irq_lock);
- gen11_reset_one_iir(gt, 0, GEN11_GUC);
- spin_unlock_irq(&i915->irq_lock);
+ spin_lock_irq(&gt->irq_lock);
+ gen11_gt_reset_one_iir(gt, 0, GEN11_GUC);
+ spin_unlock_irq(&gt->irq_lock);
}
void gen11_enable_guc_interrupts(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
- spin_lock_irq(&gt->i915->irq_lock);
+ spin_lock_irq(&gt->irq_lock);
if (!guc->interrupts.enabled) {
u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
- WARN_ON_ONCE(gen11_reset_one_iir(gt, 0, GEN11_GUC));
+ WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC));
intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, events);
intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~events);
guc->interrupts.enabled = true;
}
- spin_unlock_irq(&gt->i915->irq_lock);
+ spin_unlock_irq(&gt->irq_lock);
}
void gen11_disable_guc_interrupts(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
- struct drm_i915_private *i915 = gt->i915;
- spin_lock_irq(&i915->irq_lock);
+ spin_lock_irq(&gt->irq_lock);
guc->interrupts.enabled = false;
intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0);
intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
- spin_unlock_irq(&i915->irq_lock);
- intel_synchronize_irq(i915);
+ spin_unlock_irq(&gt->irq_lock);
+ intel_synchronize_irq(gt->i915);
gen11_reset_guc_interrupts(guc);
}
@@ -1360,17 +1170,18 @@ static void gen6_pm_rps_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private, gt_pm.rps.work);
+ struct intel_gt *gt = &dev_priv->gt;
struct intel_rps *rps = &dev_priv->gt_pm.rps;
bool client_boost = false;
int new_delay, adj, min, max;
u32 pm_iir = 0;
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&gt->irq_lock);
if (rps->interrupts_enabled) {
pm_iir = fetch_and_zero(&rps->pm_iir);
client_boost = atomic_read(&rps->num_waiters);
}
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&gt->irq_lock);
/* Make sure we didn't queue anything we're not going to process. */
WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
@@ -1447,10 +1258,10 @@ static void gen6_pm_rps_work(struct work_struct *work)
out:
/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
- spin_lock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&gt->irq_lock);
if (rps->interrupts_enabled)
- gen6_unmask_pm_irq(&dev_priv->gt, dev_priv->pm_rps_events);
- spin_unlock_irq(&dev_priv->irq_lock);
+ gen6_gt_pm_unmask_irq(gt, dev_priv->pm_rps_events);
+ spin_unlock_irq(&gt->irq_lock);
}
@@ -1467,6 +1278,7 @@ static void ivybridge_parity_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), l3_parity.error_work);
+ struct intel_gt *gt = &dev_priv->gt;
u32 error_status, row, bank, subbank;
char *parity_event[6];
u32 misccpctl;
@@ -1528,144 +1340,13 @@ static void ivybridge_parity_work(struct work_struct *work)
out:
WARN_ON(dev_priv->l3_parity.which_slice);
- spin_lock_irq(&dev_priv->irq_lock);
- gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&gt->irq_lock);
+ gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
+ spin_unlock_irq(&gt->irq_lock);
mutex_unlock(&dev_priv->drm.struct_mutex);
}
-static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
- u32 iir)
-{
- if (!HAS_L3_DPF(dev_priv))
- return;
-
- spin_lock(&dev_priv->irq_lock);
- gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
- spin_unlock(&dev_priv->irq_lock);
-
- iir &= GT_PARITY_ERROR(dev_priv);
- if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
- dev_priv->l3_parity.which_slice |= 1 << 1;
-
- if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
- dev_priv->l3_parity.which_slice |= 1 << 0;
-
- queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
-}
-
-static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
- u32 gt_iir)
-{
- if (gt_iir & GT_RENDER_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
- if (gt_iir & ILK_BSD_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
-}
-
-static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
- u32 gt_iir)
-{
- if (gt_iir & GT_RENDER_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
- if (gt_iir & GT_BSD_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
- if (gt_iir & GT_BLT_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[BCS0]);
-
- if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
- GT_BSD_CS_ERROR_INTERRUPT |
- GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
- DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
-
- if (gt_iir & GT_PARITY_ERROR(dev_priv))
- ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
-}
-
-static void
-gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
-{
- bool tasklet = false;
-
- if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
- tasklet = true;
-
- if (iir & GT_RENDER_USER_INTERRUPT) {
- intel_engine_breadcrumbs_irq(engine);
- tasklet |= intel_engine_needs_breadcrumb_tasklet(engine);
- }
-
- if (tasklet)
- tasklet_hi_schedule(&engine->execlists.tasklet);
-}
-
-static void gen8_gt_irq_ack(struct drm_i915_private *i915,
- u32 master_ctl, u32 gt_iir[4])
-{
- void __iomem * const regs = i915->uncore.regs;
-
-#define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \
- GEN8_GT_BCS_IRQ | \
- GEN8_GT_VCS0_IRQ | \
- GEN8_GT_VCS1_IRQ | \
- GEN8_GT_VECS_IRQ | \
- GEN8_GT_PM_IRQ | \
- GEN8_GT_GUC_IRQ)
-
- if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
- gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0));
- if (likely(gt_iir[0]))
- raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]);
- }
-
- if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
- gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1));
- if (likely(gt_iir[1]))
- raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]);
- }
-
- if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
- gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2));
- if (likely(gt_iir[2]))
- raw_reg_write(regs, GEN8_GT_IIR(2), gt_iir[2]);
- }
-
- if (master_ctl & GEN8_GT_VECS_IRQ) {
- gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3));
- if (likely(gt_iir[3]))
- raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]);
- }
-}
-
-static void gen8_gt_irq_handler(struct drm_i915_private *i915,
- u32 master_ctl, u32 gt_iir[4])
-{
- if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
- gen8_cs_irq_handler(i915->engine[RCS0],
- gt_iir[0] >> GEN8_RCS_IRQ_SHIFT);
- gen8_cs_irq_handler(i915->engine[BCS0],
- gt_iir[0] >> GEN8_BCS_IRQ_SHIFT);
- }
-
- if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
- gen8_cs_irq_handler(i915->engine[VCS0],
- gt_iir[1] >> GEN8_VCS0_IRQ_SHIFT);
- gen8_cs_irq_handler(i915->engine[VCS1],
- gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT);
- }
-
- if (master_ctl & GEN8_GT_VECS_IRQ) {
- gen8_cs_irq_handler(i915->engine[VECS0],
- gt_iir[3] >> GEN8_VECS_IRQ_SHIFT);
- }
-
- if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
- gen6_rps_irq_handler(i915, gt_iir[2]);
- guc_irq_handler(&i915->gt.uc.guc, gt_iir[2] >> 16);
- }
-}
-
static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
switch (pin) {
@@ -1682,6 +1363,26 @@ static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
}
}
+static bool gen12_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
+{
+ switch (pin) {
+ case HPD_PORT_D:
+ return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
+ case HPD_PORT_E:
+ return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
+ case HPD_PORT_F:
+ return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
+ case HPD_PORT_G:
+ return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
+ case HPD_PORT_H:
+ return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC5);
+ case HPD_PORT_I:
+ return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC6);
+ default:
+ return false;
+ }
+}
+
static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
switch (pin) {
@@ -1703,6 +1404,8 @@ static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
return val & ICP_DDIA_HPD_LONG_DETECT;
case HPD_PORT_B:
return val & ICP_DDIB_HPD_LONG_DETECT;
+ case HPD_PORT_C:
+ return val & TGP_DDIC_HPD_LONG_DETECT;
default:
return false;
}
@@ -1724,6 +1427,40 @@ static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
}
}
+static bool tgp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
+{
+ switch (pin) {
+ case HPD_PORT_A:
+ return val & ICP_DDIA_HPD_LONG_DETECT;
+ case HPD_PORT_B:
+ return val & ICP_DDIB_HPD_LONG_DETECT;
+ case HPD_PORT_C:
+ return val & TGP_DDIC_HPD_LONG_DETECT;
+ default:
+ return false;
+ }
+}
+
+static bool tgp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
+{
+ switch (pin) {
+ case HPD_PORT_D:
+ return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
+ case HPD_PORT_E:
+ return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
+ case HPD_PORT_F:
+ return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
+ case HPD_PORT_G:
+ return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
+ case HPD_PORT_H:
+ return val & ICP_TC_HPD_LONG_DETECT(PORT_TC5);
+ case HPD_PORT_I:
+ return val & ICP_TC_HPD_LONG_DETECT(PORT_TC6);
+ default:
+ return false;
+ }
+}
+
static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
{
switch (pin) {
@@ -1803,6 +1540,8 @@ static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
{
enum hpd_pin pin;
+ BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS);
+
for_each_hpd_pin(pin) {
if ((hpd[pin] & hotplug_trigger) == 0)
continue;
@@ -1916,18 +1655,18 @@ static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
/* The RPS events need forcewake, so we add them to a work queue and mask their
* IMR bits until the work is done. Other interrupts can be processed without
* the work queue. */
-static void gen11_rps_irq_handler(struct intel_gt *gt, u32 pm_iir)
+void gen11_rps_irq_handler(struct intel_gt *gt, u32 pm_iir)
{
struct drm_i915_private *i915 = gt->i915;
struct intel_rps *rps = &i915->gt_pm.rps;
const u32 events = i915->pm_rps_events & pm_iir;
- lockdep_assert_held(&i915->irq_lock);
+ lockdep_assert_held(&gt->irq_lock);
if (unlikely(!events))
return;
- gen6_mask_pm_irq(gt, events);
+ gen6_gt_pm_mask_irq(gt, events);
if (!rps->interrupts_enabled)
return;
@@ -1936,19 +1675,19 @@ static void gen11_rps_irq_handler(struct intel_gt *gt, u32 pm_iir)
schedule_work(&rps->work);
}
-static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
+void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
{
struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ struct intel_gt *gt = &dev_priv->gt;
if (pm_iir & dev_priv->pm_rps_events) {
- spin_lock(&dev_priv->irq_lock);
- gen6_mask_pm_irq(&dev_priv->gt,
- pm_iir & dev_priv->pm_rps_events);
+ spin_lock(&gt->irq_lock);
+ gen6_gt_pm_mask_irq(gt, pm_iir & dev_priv->pm_rps_events);
if (rps->interrupts_enabled) {
rps->pm_iir |= pm_iir & dev_priv->pm_rps_events;
schedule_work(&rps->work);
}
- spin_unlock(&dev_priv->irq_lock);
+ spin_unlock(&gt->irq_lock);
}
if (INTEL_GEN(dev_priv) >= 8)
@@ -1961,12 +1700,6 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
}
-static void guc_irq_handler(struct intel_guc *guc, u16 iir)
-{
- if (iir & GUC_INTR_GUC2HOST)
- intel_guc_to_host_event_handler(guc);
-}
-
static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
{
enum pipe pipe;
@@ -2274,7 +2007,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
if (gt_iir)
- snb_gt_irq_handler(dev_priv, gt_iir);
+ gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
if (pm_iir)
gen6_rps_irq_handler(dev_priv, pm_iir);
@@ -2332,7 +2065,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
ier = I915_READ(VLV_IER);
I915_WRITE(VLV_IER, 0);
- gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
+ gen8_gt_irq_ack(&dev_priv->gt, master_ctl, gt_iir);
if (iir & I915_DISPLAY_PORT_INTERRUPT)
hotplug_status = i9xx_hpd_irq_ack(dev_priv);
@@ -2356,7 +2089,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
I915_WRITE(VLV_IER, ier);
I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
- gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
+ gen8_gt_irq_handler(&dev_priv->gt, master_ctl, gt_iir);
if (hotplug_status)
i9xx_hpd_irq_handler(dev_priv, hotplug_status);
@@ -2526,10 +2259,18 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir,
const u32 *pins)
{
- u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
- u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
+ u32 ddi_hotplug_trigger;
+ u32 tc_hotplug_trigger;
u32 pin_mask = 0, long_mask = 0;
+ if (HAS_PCH_MCC(dev_priv)) {
+ ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
+ tc_hotplug_trigger = 0;
+ } else {
+ ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
+ tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
+ }
+
if (ddi_hotplug_trigger) {
u32 dig_hotplug_reg;
@@ -2561,6 +2302,43 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir,
gmbus_irq_handler(dev_priv);
}
+static void tgp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
+{
+ u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
+ u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_TGP;
+ u32 pin_mask = 0, long_mask = 0;
+
+ if (ddi_hotplug_trigger) {
+ u32 dig_hotplug_reg;
+
+ dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
+ I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);
+
+ intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ ddi_hotplug_trigger,
+ dig_hotplug_reg, hpd_tgp,
+ tgp_ddi_port_hotplug_long_detect);
+ }
+
+ if (tc_hotplug_trigger) {
+ u32 dig_hotplug_reg;
+
+ dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
+ I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);
+
+ intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+ tc_hotplug_trigger,
+ dig_hotplug_reg, hpd_tgp,
+ tgp_tc_port_hotplug_long_detect);
+ }
+
+ if (pin_mask)
+ intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+
+ if (pch_iir & SDE_GMBUS_ICP)
+ gmbus_irq_handler(dev_priv);
+}
+
static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
{
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
@@ -2741,9 +2519,9 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
I915_WRITE(GTIIR, gt_iir);
ret = IRQ_HANDLED;
if (INTEL_GEN(dev_priv) >= 6)
- snb_gt_irq_handler(dev_priv, gt_iir);
+ gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
else
- ilk_gt_irq_handler(dev_priv, gt_iir);
+ gen5_gt_irq_handler(&dev_priv->gt, gt_iir);
}
de_iir = I915_READ(DEIIR);
@@ -2796,6 +2574,16 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
u32 pin_mask = 0, long_mask = 0;
u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
+ long_pulse_detect_func long_pulse_detect;
+ const u32 *hpd;
+
+ if (INTEL_GEN(dev_priv) >= 12) {
+ long_pulse_detect = gen12_port_hotplug_long_detect;
+ hpd = hpd_gen12;
+ } else {
+ long_pulse_detect = gen11_port_hotplug_long_detect;
+ hpd = hpd_gen11;
+ }
if (trigger_tc) {
u32 dig_hotplug_reg;
@@ -2804,8 +2592,7 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc,
- dig_hotplug_reg, hpd_gen11,
- gen11_port_hotplug_long_detect);
+ dig_hotplug_reg, hpd, long_pulse_detect);
}
if (trigger_tbt) {
@@ -2815,8 +2602,7 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt,
- dig_hotplug_reg, hpd_gen11,
- gen11_port_hotplug_long_detect);
+ dig_hotplug_reg, hpd, long_pulse_detect);
}
if (pin_mask)
@@ -2827,19 +2613,25 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
{
- u32 mask = GEN8_AUX_CHANNEL_A;
+ u32 mask;
+
+ if (INTEL_GEN(dev_priv) >= 12)
+ /* TODO: Add AUX entries for USBC */
+ return TGL_DE_PORT_AUX_DDIA |
+ TGL_DE_PORT_AUX_DDIB |
+ TGL_DE_PORT_AUX_DDIC;
+ mask = GEN8_AUX_CHANNEL_A;
if (INTEL_GEN(dev_priv) >= 9)
mask |= GEN9_AUX_CHANNEL_B |
GEN9_AUX_CHANNEL_C |
GEN9_AUX_CHANNEL_D;
- if (IS_CNL_WITH_PORT_F(dev_priv))
+ if (IS_CNL_WITH_PORT_F(dev_priv) || IS_GEN(dev_priv, 11))
mask |= CNL_AUX_CHANNEL_F;
- if (INTEL_GEN(dev_priv) >= 11)
- mask |= ICL_AUX_CHANNEL_E |
- CNL_AUX_CHANNEL_F;
+ if (IS_GEN(dev_priv, 11))
+ mask |= ICL_AUX_CHANNEL_E;
return mask;
}
@@ -2852,6 +2644,28 @@ static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
}
+static void
+gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
+{
+ bool found = false;
+
+ if (iir & GEN8_DE_MISC_GSE) {
+ intel_opregion_asle_intr(dev_priv);
+ found = true;
+ }
+
+ if (iir & GEN8_DE_EDP_PSR) {
+ u32 psr_iir = I915_READ(EDP_PSR_IIR);
+
+ intel_psr_irq_handler(dev_priv, psr_iir);
+ I915_WRITE(EDP_PSR_IIR, psr_iir);
+ found = true;
+ }
+
+ if (!found)
+ DRM_ERROR("Unexpected DE Misc interrupt\n");
+}
+
static irqreturn_t
gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
{
@@ -2862,29 +2676,12 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
if (master_ctl & GEN8_DE_MISC_IRQ) {
iir = I915_READ(GEN8_DE_MISC_IIR);
if (iir) {
- bool found = false;
-
I915_WRITE(GEN8_DE_MISC_IIR, iir);
ret = IRQ_HANDLED;
-
- if (iir & GEN8_DE_MISC_GSE) {
- intel_opregion_asle_intr(dev_priv);
- found = true;
- }
-
- if (iir & GEN8_DE_EDP_PSR) {
- u32 psr_iir = I915_READ(EDP_PSR_IIR);
-
- intel_psr_irq_handler(dev_priv, psr_iir);
- I915_WRITE(EDP_PSR_IIR, psr_iir);
- found = true;
- }
-
- if (!found)
- DRM_ERROR("Unexpected DE Misc interrupt\n");
- }
- else
+ gen8_de_misc_irq_handler(dev_priv, iir);
+ } else {
DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
+ }
}
if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
@@ -2983,7 +2780,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
I915_WRITE(SDEIIR, iir);
ret = IRQ_HANDLED;
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_MCC)
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP)
+ tgp_irq_handler(dev_priv, iir);
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_MCC)
icp_irq_handler(dev_priv, iir, hpd_mcc);
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
icp_irq_handler(dev_priv, iir, hpd_icp);
@@ -3038,7 +2837,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
}
/* Find, clear, then process each source of interrupt */
- gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
+ gen8_gt_irq_ack(&dev_priv->gt, master_ctl, gt_iir);
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
if (master_ctl & ~GEN8_GT_IRQS) {
@@ -3049,135 +2848,12 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
gen8_master_intr_enable(regs);
- gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
+ gen8_gt_irq_handler(&dev_priv->gt, master_ctl, gt_iir);
return IRQ_HANDLED;
}
static u32
-gen11_gt_engine_identity(struct intel_gt *gt,
- const unsigned int bank, const unsigned int bit)
-{
- void __iomem * const regs = gt->uncore->regs;
- u32 timeout_ts;
- u32 ident;
-
- lockdep_assert_held(&gt->i915->irq_lock);
-
- raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
-
- /*
- * NB: Specs do not specify how long to spin wait,
- * so we do ~100us as an educated guess.
- */
- timeout_ts = (local_clock() >> 10) + 100;
- do {
- ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank));
- } while (!(ident & GEN11_INTR_DATA_VALID) &&
- !time_after32(local_clock() >> 10, timeout_ts));
-
- if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
- DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
- bank, bit, ident);
- return 0;
- }
-
- raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank),
- GEN11_INTR_DATA_VALID);
-
- return ident;
-}
-
-static void
-gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
- const u16 iir)
-{
- if (instance == OTHER_GUC_INSTANCE)
- return guc_irq_handler(&gt->uc.guc, iir);
-
- if (instance == OTHER_GTPM_INSTANCE)
- return gen11_rps_irq_handler(gt, iir);
-
- WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
- instance, iir);
-}
-
-static void
-gen11_engine_irq_handler(struct intel_gt *gt, const u8 class,
- const u8 instance, const u16 iir)
-{
- struct intel_engine_cs *engine;
-
- if (instance <= MAX_ENGINE_INSTANCE)
- engine = gt->i915->engine_class[class][instance];
- else
- engine = NULL;
-
- if (likely(engine))
- return gen8_cs_irq_handler(engine, iir);
-
- WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
- class, instance);
-}
-
-static void
-gen11_gt_identity_handler(struct intel_gt *gt, const u32 identity)
-{
- const u8 class = GEN11_INTR_ENGINE_CLASS(identity);
- const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity);
- const u16 intr = GEN11_INTR_ENGINE_INTR(identity);
-
- if (unlikely(!intr))
- return;
-
- if (class <= COPY_ENGINE_CLASS)
- return gen11_engine_irq_handler(gt, class, instance, intr);
-
- if (class == OTHER_CLASS)
- return gen11_other_irq_handler(gt, instance, intr);
-
- WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n",
- class, instance, intr);
-}
-
-static void
-gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank)
-{
- void __iomem * const regs = gt->uncore->regs;
- unsigned long intr_dw;
- unsigned int bit;
-
- lockdep_assert_held(&gt->i915->irq_lock);
-
- intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
-
- for_each_set_bit(bit, &intr_dw, 32) {
- const u32 ident = gen11_gt_engine_identity(gt, bank, bit);
-
- gen11_gt_identity_handler(gt, ident);
- }
-
- /* Clear must be after shared has been served for engine */
- raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw);
-}
-
-static void
-gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl)
-{
- struct drm_i915_private *i915 = gt->i915;
- unsigned int bank;
-
- spin_lock(&i915->irq_lock);
-
- for (bank = 0; bank < 2; bank++) {
- if (master_ctl & GEN11_GT_DW_IRQ(bank))
- gen11_gt_bank_handler(gt, bank);
- }
-
- spin_unlock(&i915->irq_lock);
-}
-
-static u32
gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl)
{
void __iomem * const regs = gt->uncore->regs;
@@ -3485,15 +3161,6 @@ static void ibx_irq_pre_postinstall(struct drm_i915_private *dev_priv)
POSTING_READ(SDEIER);
}
-static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
-{
- struct intel_uncore *uncore = &dev_priv->uncore;
-
- GEN3_IRQ_RESET(uncore, GT);
- if (INTEL_GEN(dev_priv) >= 6)
- GEN3_IRQ_RESET(uncore, GEN6_PM);
-}
-
static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
@@ -3558,7 +3225,7 @@ static void ironlake_irq_reset(struct drm_i915_private *dev_priv)
intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
}
- gen5_gt_irq_reset(dev_priv);
+ gen5_gt_irq_reset(&dev_priv->gt);
ibx_irq_reset(dev_priv);
}
@@ -3568,7 +3235,7 @@ static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
I915_WRITE(VLV_MASTER_IER, 0);
POSTING_READ(VLV_MASTER_IER);
- gen5_gt_irq_reset(dev_priv);
+ gen5_gt_irq_reset(&dev_priv->gt);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
@@ -3576,16 +3243,6 @@ static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
spin_unlock_irq(&dev_priv->irq_lock);
}
-static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
-{
- struct intel_uncore *uncore = &dev_priv->uncore;
-
- GEN8_IRQ_RESET_NDX(uncore, GT, 0);
- GEN8_IRQ_RESET_NDX(uncore, GT, 1);
- GEN8_IRQ_RESET_NDX(uncore, GT, 2);
- GEN8_IRQ_RESET_NDX(uncore, GT, 3);
-}
-
static void gen8_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
@@ -3593,7 +3250,7 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv)
gen8_master_intr_disable(dev_priv->uncore.regs);
- gen8_gt_irq_reset(dev_priv);
+ gen8_gt_irq_reset(&dev_priv->gt);
intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
@@ -3611,27 +3268,6 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv)
ibx_irq_reset(dev_priv);
}
-static void gen11_gt_irq_reset(struct intel_gt *gt)
-{
- struct intel_uncore *uncore = gt->uncore;
-
- /* Disable RCS, BCS, VCS and VECS class engines. */
- intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0);
- intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0);
-
- /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
- intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~0);
- intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~0);
- intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~0);
- intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~0);
- intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~0);
-
- intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
- intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
- intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
- intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0);
-}
-
static void gen11_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
@@ -3713,7 +3349,7 @@ static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
I915_WRITE(GEN8_MASTER_IRQ, 0);
POSTING_READ(GEN8_MASTER_IRQ);
- gen8_gt_irq_reset(dev_priv);
+ gen8_gt_irq_reset(&dev_priv->gt);
GEN3_IRQ_RESET(uncore, GEN8_PCU_);
@@ -3778,21 +3414,21 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
ibx_hpd_detection_setup(dev_priv);
}
-static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv)
+static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv,
+ u32 ddi_hotplug_enable_mask,
+ u32 tc_hotplug_enable_mask)
{
u32 hotplug;
hotplug = I915_READ(SHOTPLUG_CTL_DDI);
- hotplug |= ICP_DDIA_HPD_ENABLE |
- ICP_DDIB_HPD_ENABLE;
+ hotplug |= ddi_hotplug_enable_mask;
I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);
- hotplug = I915_READ(SHOTPLUG_CTL_TC);
- hotplug |= ICP_TC_HPD_ENABLE(PORT_TC1) |
- ICP_TC_HPD_ENABLE(PORT_TC2) |
- ICP_TC_HPD_ENABLE(PORT_TC3) |
- ICP_TC_HPD_ENABLE(PORT_TC4);
- I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
+ if (tc_hotplug_enable_mask) {
+ hotplug = I915_READ(SHOTPLUG_CTL_TC);
+ hotplug |= tc_hotplug_enable_mask;
+ I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
+ }
}
static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
@@ -3804,7 +3440,33 @@ static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
- icp_hpd_detection_setup(dev_priv);
+ icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK,
+ ICP_TC_HPD_ENABLE_MASK);
+}
+
+static void mcc_hpd_irq_setup(struct drm_i915_private *dev_priv)
+{
+ u32 hotplug_irqs, enabled_irqs;
+
+ hotplug_irqs = SDE_DDI_MASK_TGP;
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_mcc);
+
+ ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
+
+ icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, 0);
+}
+
+static void tgp_hpd_irq_setup(struct drm_i915_private *dev_priv)
+{
+ u32 hotplug_irqs, enabled_irqs;
+
+ hotplug_irqs = SDE_DDI_MASK_TGP | SDE_TC_MASK_TGP;
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_tgp);
+
+ ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
+
+ icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK,
+ TGP_TC_HPD_ENABLE_MASK);
}
static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
@@ -3829,9 +3491,11 @@ static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug_irqs, enabled_irqs;
+ const u32 *hpd;
u32 val;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_gen11);
+ hpd = INTEL_GEN(dev_priv) >= 12 ? hpd_gen12 : hpd_gen11;
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd);
hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK;
val = I915_READ(GEN11_DE_HPD_IMR);
@@ -3841,7 +3505,9 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
gen11_hpd_detection_setup(dev_priv);
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP)
+ tgp_hpd_irq_setup(dev_priv);
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
icp_hpd_irq_setup(dev_priv);
}
@@ -3996,44 +3662,6 @@ static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
spt_hpd_detection_setup(dev_priv);
}
-static void gen5_gt_irq_postinstall(struct drm_i915_private *dev_priv)
-{
- struct intel_uncore *uncore = &dev_priv->uncore;
- u32 pm_irqs, gt_irqs;
-
- pm_irqs = gt_irqs = 0;
-
- dev_priv->gt_irq_mask = ~0;
- if (HAS_L3_DPF(dev_priv)) {
- /* L3 parity interrupt is always unmasked. */
- dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv);
- gt_irqs |= GT_PARITY_ERROR(dev_priv);
- }
-
- gt_irqs |= GT_RENDER_USER_INTERRUPT;
- if (IS_GEN(dev_priv, 5)) {
- gt_irqs |= ILK_BSD_USER_INTERRUPT;
- } else {
- gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
- }
-
- GEN3_IRQ_INIT(uncore, GT, dev_priv->gt_irq_mask, gt_irqs);
-
- if (INTEL_GEN(dev_priv) >= 6) {
- /*
- * RPS interrupts will get enabled/disabled on demand when RPS
- * itself is enabled/disabled.
- */
- if (HAS_ENGINE(dev_priv, VECS0)) {
- pm_irqs |= PM_VEBOX_USER_INTERRUPT;
- dev_priv->gt.pm_ier |= PM_VEBOX_USER_INTERRUPT;
- }
-
- dev_priv->gt.pm_imr = 0xffffffff;
- GEN3_IRQ_INIT(uncore, GEN6_PM, dev_priv->gt.pm_imr, pm_irqs);
- }
-}
-
static void ironlake_irq_postinstall(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
@@ -4067,7 +3695,7 @@ static void ironlake_irq_postinstall(struct drm_i915_private *dev_priv)
GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
display_mask | extra_mask);
- gen5_gt_irq_postinstall(dev_priv);
+ gen5_gt_irq_postinstall(&dev_priv->gt);
ilk_hpd_detection_setup(dev_priv);
@@ -4116,7 +3744,7 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
{
- gen5_gt_irq_postinstall(dev_priv);
+ gen5_gt_irq_postinstall(&dev_priv->gt);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
@@ -4127,41 +3755,6 @@ static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
POSTING_READ(VLV_MASTER_IER);
}
-static void gen8_gt_irq_postinstall(struct drm_i915_private *i915)
-{
- struct intel_gt *gt = &i915->gt;
- struct intel_uncore *uncore = gt->uncore;
-
- /* These are interrupts we'll toggle with the ring mask register */
- u32 gt_interrupts[] = {
- (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
- GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT),
-
- (GT_RENDER_USER_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
- GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT),
-
- 0,
-
- (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)
- };
-
- gt->pm_ier = 0x0;
- gt->pm_imr = ~gt->pm_ier;
- GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
- GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
- /*
- * RPS interrupts will get enabled/disabled on demand when RPS itself
- * is enabled/disabled. Same wil be the case for GuC interrupts.
- */
- GEN8_IRQ_INIT_NDX(uncore, GT, 2, gt->pm_imr, gt->pm_ier);
- GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
-}
-
static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
@@ -4237,7 +3830,7 @@ static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_pre_postinstall(dev_priv);
- gen8_gt_irq_postinstall(dev_priv);
+ gen8_gt_irq_postinstall(&dev_priv->gt);
gen8_de_irq_postinstall(dev_priv);
if (HAS_PCH_SPLIT(dev_priv))
@@ -4246,40 +3839,6 @@ static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
gen8_master_intr_enable(dev_priv->uncore.regs);
}
-static void gen11_gt_irq_postinstall(struct intel_gt *gt)
-{
- const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT;
- struct intel_uncore *uncore = gt->uncore;
- const u32 dmask = irqs << 16 | irqs;
- const u32 smask = irqs << 16;
-
- BUILD_BUG_ON(irqs & 0xffff0000);
-
- /* Enable RCS, BCS, VCS and VECS class interrupts. */
- intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask);
- intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask);
-
- /* Unmask irqs on RCS, BCS, VCS and VECS engines. */
- intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~smask);
- intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~smask);
- intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~dmask);
- intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~dmask);
- intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~dmask);
-
- /*
- * RPS interrupts will get enabled/disabled on demand when RPS itself
- * is enabled/disabled.
- */
- gt->pm_ier = 0x0;
- gt->pm_imr = ~gt->pm_ier;
- intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
- intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
-
- /* Same thing for GuC interrupts */
- intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
- intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0);
-}
-
static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
{
u32 mask = SDE_GMBUS_ICP;
@@ -4291,7 +3850,14 @@ static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
I915_WRITE(SDEIMR, ~mask);
- icp_hpd_detection_setup(dev_priv);
+ if (HAS_PCH_TGP(dev_priv))
+ icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK,
+ TGP_TC_HPD_ENABLE_MASK);
+ else if (HAS_PCH_MCC(dev_priv))
+ icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, 0);
+ else
+ icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK,
+ ICP_TC_HPD_ENABLE_MASK);
}
static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
@@ -4315,7 +3881,7 @@ static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
{
- gen8_gt_irq_postinstall(dev_priv);
+ gen8_gt_irq_postinstall(&dev_priv->gt);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
@@ -4821,7 +4387,10 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
if (I915_HAS_HOTPLUG(dev_priv))
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
} else {
- if (INTEL_GEN(dev_priv) >= 11)
+ if (HAS_PCH_MCC(dev_priv))
+ /* EHL doesn't need most of gen11_hpd_irq_setup */
+ dev_priv->display.hpd_irq_setup = mcc_hpd_irq_setup;
+ else if (INTEL_GEN(dev_priv) >= 11)
dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
else if (IS_GEN9_LP(dev_priv))
dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
@@ -5014,3 +4583,17 @@ void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
intel_irq_reset(dev_priv);
intel_irq_postinstall(dev_priv);
}
+
+bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
+{
+ /*
+ * We only use drm_irq_uninstall() at unload and VT switch, so
+ * this is the only thing we need to check.
+ */
+ return dev_priv->runtime_pm.irqs_enabled;
+}
+
+void intel_synchronize_irq(struct drm_i915_private *i915)
+{
+ synchronize_irq(i915->drm.pdev->irq);
+}
diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h
index 8918809cd805..8e7e6071777e 100644
--- a/drivers/gpu/drm/i915/i915_irq.h
+++ b/drivers/gpu/drm/i915/i915_irq.h
@@ -6,13 +6,24 @@
#ifndef __I915_IRQ_H__
#define __I915_IRQ_H__
+#include <linux/ktime.h>
#include <linux/types.h>
-#include "i915_drv.h"
+#include "display/intel_display.h"
+#include "i915_reg.h"
+struct drm_crtc;
+struct drm_device;
+struct drm_display_mode;
struct drm_i915_private;
struct intel_crtc;
+struct intel_crtc;
+struct intel_gt;
struct intel_guc;
+struct intel_uncore;
+
+void gen11_rps_irq_handler(struct intel_gt *gt, u32 pm_iir);
+void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
void intel_irq_init(struct drm_i915_private *dev_priv);
void intel_irq_fini(struct drm_i915_private *dev_priv);
@@ -78,35 +89,17 @@ ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, u32 bits)
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask);
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask);
-void gen6_mask_pm_irq(struct intel_gt *gt, u32 mask);
-void gen6_unmask_pm_irq(struct intel_gt *gt, u32 mask);
void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv);
void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv);
void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv);
void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv);
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
-
-static inline u32 gen6_sanitize_rps_pm_mask(const struct drm_i915_private *i915,
- u32 mask)
-{
- return mask & ~i915->gt_pm.rps.pm_intrmsk_mbz;
-}
+u32 gen6_sanitize_rps_pm_mask(const struct drm_i915_private *i915, u32 mask);
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
-static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
-{
- /*
- * We only use drm_irq_uninstall() at unload and VT switch, so
- * this is the only thing we need to check.
- */
- return dev_priv->runtime_pm.irqs_enabled;
-}
-
-static inline void intel_synchronize_irq(struct drm_i915_private *i915)
-{
- synchronize_irq(i915->drm.pdev->irq);
-}
+bool intel_irqs_enabled(struct drm_i915_private *dev_priv);
+void intel_synchronize_irq(struct drm_i915_private *i915);
int intel_get_crtc_scanline(struct intel_crtc *crtc);
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
@@ -139,4 +132,46 @@ void i965_disable_vblank(struct drm_crtc *crtc);
void ilk_disable_vblank(struct drm_crtc *crtc);
void bdw_disable_vblank(struct drm_crtc *crtc);
+void gen2_irq_reset(struct intel_uncore *uncore);
+void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
+ i915_reg_t iir, i915_reg_t ier);
+
+void gen2_irq_init(struct intel_uncore *uncore,
+ u32 imr_val, u32 ier_val);
+void gen3_irq_init(struct intel_uncore *uncore,
+ i915_reg_t imr, u32 imr_val,
+ i915_reg_t ier, u32 ier_val,
+ i915_reg_t iir);
+
+#define GEN8_IRQ_RESET_NDX(uncore, type, which) \
+({ \
+ unsigned int which_ = which; \
+ gen3_irq_reset((uncore), GEN8_##type##_IMR(which_), \
+ GEN8_##type##_IIR(which_), GEN8_##type##_IER(which_)); \
+})
+
+#define GEN3_IRQ_RESET(uncore, type) \
+ gen3_irq_reset((uncore), type##IMR, type##IIR, type##IER)
+
+#define GEN2_IRQ_RESET(uncore) \
+ gen2_irq_reset(uncore)
+
+#define GEN8_IRQ_INIT_NDX(uncore, type, which, imr_val, ier_val) \
+({ \
+ unsigned int which_ = which; \
+ gen3_irq_init((uncore), \
+ GEN8_##type##_IMR(which_), imr_val, \
+ GEN8_##type##_IER(which_), ier_val, \
+ GEN8_##type##_IIR(which_)); \
+})
+
+#define GEN3_IRQ_INIT(uncore, type, imr_val, ier_val) \
+ gen3_irq_init((uncore), \
+ type##IMR, imr_val, \
+ type##IER, ier_val, \
+ type##IIR)
+
+#define GEN2_IRQ_INIT(uncore, imr_val, ier_val) \
+ gen2_irq_init((uncore), imr_val, ier_val)
+
#endif /* __I915_IRQ_H__ */
diff --git a/drivers/gpu/drm/i915/i915_memcpy.c b/drivers/gpu/drm/i915/i915_memcpy.c
index 79f8ec756362..07b04b0acb77 100644
--- a/drivers/gpu/drm/i915/i915_memcpy.c
+++ b/drivers/gpu/drm/i915/i915_memcpy.c
@@ -25,7 +25,7 @@
#include <linux/kernel.h>
#include <asm/fpu/api.h>
-#include "i915_drv.h"
+#include "i915_memcpy.h"
static DEFINE_STATIC_KEY_FALSE(has_movntdqa);
diff --git a/drivers/gpu/drm/i915/i915_memcpy.h b/drivers/gpu/drm/i915/i915_memcpy.h
new file mode 100644
index 000000000000..970d84b16987
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_memcpy.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_MEMCPY_H__
+#define __I915_MEMCPY_H__
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+
+void i915_memcpy_init_early(struct drm_i915_private *i915);
+bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len);
+
+/* The movntdqa instructions used for memcpy-from-wc require 16-byte alignment,
+ * as well as SSE4.1 support. i915_memcpy_from_wc() will report if it cannot
+ * perform the operation. To check beforehand, pass in the parameters to
+ * to i915_can_memcpy_from_wc() - since we only care about the low 4 bits,
+ * you only need to pass in the minor offsets, page-aligned pointers are
+ * always valid.
+ *
+ * For just checking for SSE4.1, in the foreknowledge that the future use
+ * will be correctly aligned, just use i915_has_memcpy_from_wc().
+ */
+#define i915_can_memcpy_from_wc(dst, src, len) \
+ i915_memcpy_from_wc((void *)((unsigned long)(dst) | (unsigned long)(src) | (len)), NULL, 0)
+
+#define i915_has_memcpy_from_wc() \
+ i915_memcpy_from_wc(NULL, NULL, 0)
+
+#endif /* __I915_MEMCPY_H__ */
diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c
index c23bb29e6d3e..318562ce64c0 100644
--- a/drivers/gpu/drm/i915/i915_mm.c
+++ b/drivers/gpu/drm/i915/i915_mm.c
@@ -63,9 +63,8 @@ int remap_io_mapping(struct vm_area_struct *vma,
struct remap_pfn r;
int err;
- GEM_BUG_ON((vma->vm_flags &
- (VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)) !=
- (VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP));
+#define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
+ GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
/* We rely on prevalidation of the io-mapping to skip track_pfn(). */
r.mm = vma->vm_mm;
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index bd9211b3d76e..1974e4c78a43 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -522,8 +522,6 @@ static const struct intel_device_info intel_haswell_gt3_info = {
#define GEN8_FEATURES \
G75_FEATURES, \
GEN(8), \
- .page_sizes = I915_GTT_PAGE_SIZE_4K | \
- I915_GTT_PAGE_SIZE_2M, \
.has_logical_ring_contexts = 1, \
.ppgtt_type = INTEL_PPGTT_FULL, \
.ppgtt_size = 48, \
@@ -586,8 +584,7 @@ static const struct intel_device_info intel_cherryview_info = {
#define GEN9_DEFAULT_PAGE_SIZES \
.page_sizes = I915_GTT_PAGE_SIZE_4K | \
- I915_GTT_PAGE_SIZE_64K | \
- I915_GTT_PAGE_SIZE_2M
+ I915_GTT_PAGE_SIZE_64K
#define GEN9_FEATURES \
GEN8_FEATURES, \
@@ -727,8 +724,14 @@ static const struct intel_device_info intel_cannonlake_info = {
.gt = 2,
};
+#define GEN11_DEFAULT_PAGE_SIZES \
+ .page_sizes = I915_GTT_PAGE_SIZE_4K | \
+ I915_GTT_PAGE_SIZE_64K | \
+ I915_GTT_PAGE_SIZE_2M
+
#define GEN11_FEATURES \
GEN10_FEATURES, \
+ GEN11_DEFAULT_PAGE_SIZES, \
.pipe_offsets = { \
[TRANSCODER_A] = PIPE_A_OFFSET, \
[TRANSCODER_B] = PIPE_B_OFFSET, \
@@ -783,7 +786,8 @@ static const struct intel_device_info intel_elkhartlake_info = {
[TRANSCODER_D] = TRANSCODER_D_OFFSET, \
[TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, \
[TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \
- }
+ }, \
+ .has_global_mocs = 1
static const struct intel_device_info intel_tigerlake_12_info = {
GEN12_FEATURES,
@@ -873,16 +877,16 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
static void i915_pci_remove(struct pci_dev *pdev)
{
- struct drm_device *dev;
+ struct drm_i915_private *i915;
- dev = pci_get_drvdata(pdev);
- if (!dev) /* driver load aborted, nothing to cleanup */
+ i915 = pci_get_drvdata(pdev);
+ if (!i915) /* driver load aborted, nothing to cleanup */
return;
- i915_driver_remove(dev);
- drm_dev_put(dev);
-
+ i915_driver_remove(i915);
pci_set_drvdata(pdev, NULL);
+
+ drm_dev_put(&i915->drm);
}
/* is device_id present in comma separated list of ids */
@@ -958,7 +962,7 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
return err;
- if (i915_inject_probe_failure()) {
+ if (i915_inject_probe_failure(pci_get_drvdata(pdev))) {
i915_pci_remove(pdev);
return -ENODEV;
}
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 988a4092164e..e42b86827d6b 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -200,6 +200,7 @@
#include "gt/intel_lrc_reg.h"
#include "i915_drv.h"
+#include "i915_perf.h"
#include "oa/i915_oa_hsw.h"
#include "oa/i915_oa_bdw.h"
#include "oa/i915_oa_chv.h"
@@ -364,6 +365,8 @@ struct perf_open_properties {
int oa_period_exponent;
};
+static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer);
+
static void free_oa_config(struct drm_i915_private *dev_priv,
struct i915_oa_config *oa_config)
{
@@ -392,8 +395,8 @@ static int get_oa_config(struct drm_i915_private *dev_priv,
int ret;
if (metrics_set == 1) {
- *out_config = &dev_priv->perf.oa.test_config;
- atomic_inc(&dev_priv->perf.oa.test_config.ref_count);
+ *out_config = &dev_priv->perf.test_config;
+ atomic_inc(&dev_priv->perf.test_config.ref_count);
return 0;
}
@@ -412,13 +415,16 @@ static int get_oa_config(struct drm_i915_private *dev_priv,
return ret;
}
-static u32 gen8_oa_hw_tail_read(struct drm_i915_private *dev_priv)
+static u32 gen8_oa_hw_tail_read(struct i915_perf_stream *stream)
{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+
return I915_READ(GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK;
}
-static u32 gen7_oa_hw_tail_read(struct drm_i915_private *dev_priv)
+static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream)
{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
u32 oastatus1 = I915_READ(GEN7_OASTATUS1);
return oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
@@ -426,7 +432,7 @@ static u32 gen7_oa_hw_tail_read(struct drm_i915_private *dev_priv)
/**
* oa_buffer_check_unlocked - check for data and update tail ptr state
- * @dev_priv: i915 device instance
+ * @stream: i915 stream instance
*
* This is either called via fops (for blocking reads in user ctx) or the poll
* check hrtimer (atomic ctx) to check the OA buffer tail pointer and check
@@ -448,9 +454,10 @@ static u32 gen7_oa_hw_tail_read(struct drm_i915_private *dev_priv)
*
* Returns: %true if the OA buffer contains data, else %false
*/
-static bool oa_buffer_check_unlocked(struct drm_i915_private *dev_priv)
+static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
{
- int report_size = dev_priv->perf.oa.oa_buffer.format_size;
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+ int report_size = stream->oa_buffer.format_size;
unsigned long flags;
unsigned int aged_idx;
u32 head, hw_tail, aged_tail, aging_tail;
@@ -460,19 +467,19 @@ static bool oa_buffer_check_unlocked(struct drm_i915_private *dev_priv)
* could result in an OA buffer reset which might reset the head,
* tails[] and aged_tail state.
*/
- spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
/* NB: The head we observe here might effectively be a little out of
* date (between head and tails[aged_idx].offset if there is currently
* a read() in progress.
*/
- head = dev_priv->perf.oa.oa_buffer.head;
+ head = stream->oa_buffer.head;
- aged_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx;
- aged_tail = dev_priv->perf.oa.oa_buffer.tails[aged_idx].offset;
- aging_tail = dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset;
+ aged_idx = stream->oa_buffer.aged_tail_idx;
+ aged_tail = stream->oa_buffer.tails[aged_idx].offset;
+ aging_tail = stream->oa_buffer.tails[!aged_idx].offset;
- hw_tail = dev_priv->perf.oa.ops.oa_hw_tail_read(dev_priv);
+ hw_tail = dev_priv->perf.ops.oa_hw_tail_read(stream);
/* The tail pointer increases in 64 byte increments,
* not in report_size steps...
@@ -492,16 +499,16 @@ static bool oa_buffer_check_unlocked(struct drm_i915_private *dev_priv)
* available) without needing to wait for a later hrtimer callback.
*/
if (aging_tail != INVALID_TAIL_PTR &&
- ((now - dev_priv->perf.oa.oa_buffer.aging_timestamp) >
+ ((now - stream->oa_buffer.aging_timestamp) >
OA_TAIL_MARGIN_NSEC)) {
aged_idx ^= 1;
- dev_priv->perf.oa.oa_buffer.aged_tail_idx = aged_idx;
+ stream->oa_buffer.aged_tail_idx = aged_idx;
aged_tail = aging_tail;
/* Mark that we need a new pointer to start aging... */
- dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset = INVALID_TAIL_PTR;
+ stream->oa_buffer.tails[!aged_idx].offset = INVALID_TAIL_PTR;
aging_tail = INVALID_TAIL_PTR;
}
@@ -516,7 +523,7 @@ static bool oa_buffer_check_unlocked(struct drm_i915_private *dev_priv)
if (aging_tail == INVALID_TAIL_PTR &&
(aged_tail == INVALID_TAIL_PTR ||
OA_TAKEN(hw_tail, aged_tail) >= report_size)) {
- struct i915_vma *vma = dev_priv->perf.oa.oa_buffer.vma;
+ struct i915_vma *vma = stream->oa_buffer.vma;
u32 gtt_offset = i915_ggtt_offset(vma);
/* Be paranoid and do a bounds check on the pointer read back
@@ -525,16 +532,16 @@ static bool oa_buffer_check_unlocked(struct drm_i915_private *dev_priv)
*/
if (hw_tail >= gtt_offset &&
hw_tail < (gtt_offset + OA_BUFFER_SIZE)) {
- dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset =
+ stream->oa_buffer.tails[!aged_idx].offset =
aging_tail = hw_tail;
- dev_priv->perf.oa.oa_buffer.aging_timestamp = now;
+ stream->oa_buffer.aging_timestamp = now;
} else {
DRM_ERROR("Ignoring spurious out of range OA buffer tail pointer = %u\n",
hw_tail);
}
}
- spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
return aged_tail == INVALID_TAIL_PTR ?
false : OA_TAKEN(aged_tail, head) >= report_size;
@@ -597,8 +604,7 @@ static int append_oa_sample(struct i915_perf_stream *stream,
size_t *offset,
const u8 *report)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
- int report_size = dev_priv->perf.oa.oa_buffer.format_size;
+ int report_size = stream->oa_buffer.format_size;
struct drm_i915_perf_record_header header;
u32 sample_flags = stream->sample_flags;
@@ -650,9 +656,9 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
size_t *offset)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
- int report_size = dev_priv->perf.oa.oa_buffer.format_size;
- u8 *oa_buf_base = dev_priv->perf.oa.oa_buffer.vaddr;
- u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
+ int report_size = stream->oa_buffer.format_size;
+ u8 *oa_buf_base = stream->oa_buffer.vaddr;
+ u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
u32 mask = (OA_BUFFER_SIZE - 1);
size_t start_offset = *offset;
unsigned long flags;
@@ -664,13 +670,13 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
if (WARN_ON(!stream->enabled))
return -EIO;
- spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
- head = dev_priv->perf.oa.oa_buffer.head;
- aged_tail_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx;
- tail = dev_priv->perf.oa.oa_buffer.tails[aged_tail_idx].offset;
+ head = stream->oa_buffer.head;
+ aged_tail_idx = stream->oa_buffer.aged_tail_idx;
+ tail = stream->oa_buffer.tails[aged_tail_idx].offset;
- spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
/*
* An invalid tail pointer here means we're still waiting for the poll
@@ -734,12 +740,12 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
reason = ((report32[0] >> OAREPORT_REASON_SHIFT) &
OAREPORT_REASON_MASK);
if (reason == 0) {
- if (__ratelimit(&dev_priv->perf.oa.spurious_report_rs))
+ if (__ratelimit(&dev_priv->perf.spurious_report_rs))
DRM_NOTE("Skipping spurious, invalid OA report\n");
continue;
}
- ctx_id = report32[2] & dev_priv->perf.oa.specific_ctx_id_mask;
+ ctx_id = report32[2] & stream->specific_ctx_id_mask;
/*
* Squash whatever is in the CTX_ID field if it's marked as
@@ -749,7 +755,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
* Note: that we don't clear the valid_ctx_bit so userspace can
* understand that the ID has been squashed by the kernel.
*/
- if (!(report32[0] & dev_priv->perf.oa.gen8_valid_ctx_bit))
+ if (!(report32[0] & dev_priv->perf.gen8_valid_ctx_bit))
ctx_id = report32[2] = INVALID_CTX_ID;
/*
@@ -783,18 +789,17 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
* switches since it's not-uncommon for periodic samples to
* identify a switch before any 'context switch' report.
*/
- if (!dev_priv->perf.oa.exclusive_stream->ctx ||
- dev_priv->perf.oa.specific_ctx_id == ctx_id ||
- (dev_priv->perf.oa.oa_buffer.last_ctx_id ==
- dev_priv->perf.oa.specific_ctx_id) ||
+ if (!dev_priv->perf.exclusive_stream->ctx ||
+ stream->specific_ctx_id == ctx_id ||
+ stream->oa_buffer.last_ctx_id == stream->specific_ctx_id ||
reason & OAREPORT_REASON_CTX_SWITCH) {
/*
* While filtering for a single context we avoid
* leaking the IDs of other contexts.
*/
- if (dev_priv->perf.oa.exclusive_stream->ctx &&
- dev_priv->perf.oa.specific_ctx_id != ctx_id) {
+ if (dev_priv->perf.exclusive_stream->ctx &&
+ stream->specific_ctx_id != ctx_id) {
report32[2] = INVALID_CTX_ID;
}
@@ -803,7 +808,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
if (ret)
break;
- dev_priv->perf.oa.oa_buffer.last_ctx_id = ctx_id;
+ stream->oa_buffer.last_ctx_id = ctx_id;
}
/*
@@ -817,7 +822,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
}
if (start_offset != *offset) {
- spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
/*
* We removed the gtt_offset for the copy loop above, indexing
@@ -826,9 +831,9 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
head += gtt_offset;
I915_WRITE(GEN8_OAHEADPTR, head & GEN8_OAHEADPTR_MASK);
- dev_priv->perf.oa.oa_buffer.head = head;
+ stream->oa_buffer.head = head;
- spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
}
return ret;
@@ -863,7 +868,7 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
u32 oastatus;
int ret;
- if (WARN_ON(!dev_priv->perf.oa.oa_buffer.vaddr))
+ if (WARN_ON(!stream->oa_buffer.vaddr))
return -EIO;
oastatus = I915_READ(GEN8_OASTATUS);
@@ -889,10 +894,10 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
return ret;
DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
- dev_priv->perf.oa.period_exponent);
+ stream->period_exponent);
- dev_priv->perf.oa.ops.oa_disable(stream);
- dev_priv->perf.oa.ops.oa_enable(stream);
+ dev_priv->perf.ops.oa_disable(stream);
+ dev_priv->perf.ops.oa_enable(stream);
/*
* Note: .oa_enable() is expected to re-init the oabuffer and
@@ -939,9 +944,9 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
size_t *offset)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
- int report_size = dev_priv->perf.oa.oa_buffer.format_size;
- u8 *oa_buf_base = dev_priv->perf.oa.oa_buffer.vaddr;
- u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
+ int report_size = stream->oa_buffer.format_size;
+ u8 *oa_buf_base = stream->oa_buffer.vaddr;
+ u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
u32 mask = (OA_BUFFER_SIZE - 1);
size_t start_offset = *offset;
unsigned long flags;
@@ -953,13 +958,13 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
if (WARN_ON(!stream->enabled))
return -EIO;
- spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
- head = dev_priv->perf.oa.oa_buffer.head;
- aged_tail_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx;
- tail = dev_priv->perf.oa.oa_buffer.tails[aged_tail_idx].offset;
+ head = stream->oa_buffer.head;
+ aged_tail_idx = stream->oa_buffer.aged_tail_idx;
+ tail = stream->oa_buffer.tails[aged_tail_idx].offset;
- spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
/* An invalid tail pointer here means we're still waiting for the poll
* hrtimer callback to give us a pointer
@@ -1012,7 +1017,7 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
* copying it to userspace...
*/
if (report32[0] == 0) {
- if (__ratelimit(&dev_priv->perf.oa.spurious_report_rs))
+ if (__ratelimit(&dev_priv->perf.spurious_report_rs))
DRM_NOTE("Skipping spurious, invalid OA report\n");
continue;
}
@@ -1031,7 +1036,7 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
}
if (start_offset != *offset) {
- spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
/* We removed the gtt_offset for the copy loop above, indexing
* relative to oa_buf_base so put back here...
@@ -1041,9 +1046,9 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
I915_WRITE(GEN7_OASTATUS2,
((head & GEN7_OASTATUS2_HEAD_MASK) |
GEN7_OASTATUS2_MEM_SELECT_GGTT));
- dev_priv->perf.oa.oa_buffer.head = head;
+ stream->oa_buffer.head = head;
- spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
}
return ret;
@@ -1074,7 +1079,7 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
u32 oastatus1;
int ret;
- if (WARN_ON(!dev_priv->perf.oa.oa_buffer.vaddr))
+ if (WARN_ON(!stream->oa_buffer.vaddr))
return -EIO;
oastatus1 = I915_READ(GEN7_OASTATUS1);
@@ -1084,7 +1089,7 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
* may be updated asynchronously) so we ignore status bits
* that have already been reported to userspace.
*/
- oastatus1 &= ~dev_priv->perf.oa.gen7_latched_oastatus1;
+ oastatus1 &= ~dev_priv->perf.gen7_latched_oastatus1;
/* We treat OABUFFER_OVERFLOW as a significant error:
*
@@ -1113,10 +1118,10 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
return ret;
DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
- dev_priv->perf.oa.period_exponent);
+ stream->period_exponent);
- dev_priv->perf.oa.ops.oa_disable(stream);
- dev_priv->perf.oa.ops.oa_enable(stream);
+ dev_priv->perf.ops.oa_disable(stream);
+ dev_priv->perf.ops.oa_enable(stream);
oastatus1 = I915_READ(GEN7_OASTATUS1);
}
@@ -1126,7 +1131,7 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
DRM_I915_PERF_RECORD_OA_REPORT_LOST);
if (ret)
return ret;
- dev_priv->perf.oa.gen7_latched_oastatus1 |=
+ dev_priv->perf.gen7_latched_oastatus1 |=
GEN7_OASTATUS1_REPORT_LOST;
}
@@ -1149,14 +1154,12 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
*/
static int i915_oa_wait_unlocked(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
-
/* We would wait indefinitely if periodic sampling is not enabled */
- if (!dev_priv->perf.oa.periodic)
+ if (!stream->periodic)
return -EIO;
- return wait_event_interruptible(dev_priv->perf.oa.poll_wq,
- oa_buffer_check_unlocked(dev_priv));
+ return wait_event_interruptible(stream->poll_wq,
+ oa_buffer_check_unlocked(stream));
}
/**
@@ -1173,9 +1176,7 @@ static void i915_oa_poll_wait(struct i915_perf_stream *stream,
struct file *file,
poll_table *wait)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
-
- poll_wait(file, &dev_priv->perf.oa.poll_wq, wait);
+ poll_wait(file, &stream->poll_wq, wait);
}
/**
@@ -1197,13 +1198,14 @@ static int i915_oa_read(struct i915_perf_stream *stream,
{
struct drm_i915_private *dev_priv = stream->dev_priv;
- return dev_priv->perf.oa.ops.read(stream, buf, count, offset);
+ return dev_priv->perf.ops.read(stream, buf, count, offset);
}
-static struct intel_context *oa_pin_context(struct drm_i915_private *i915,
- struct i915_gem_context *ctx)
+static struct intel_context *oa_pin_context(struct i915_perf_stream *stream)
{
struct i915_gem_engines_iter it;
+ struct drm_i915_private *i915 = stream->dev_priv;
+ struct i915_gem_context *ctx = stream->ctx;
struct intel_context *ce;
int err;
@@ -1221,7 +1223,7 @@ static struct intel_context *oa_pin_context(struct drm_i915_private *i915,
*/
err = intel_context_pin(ce);
if (err == 0) {
- i915->perf.oa.pinned_ctx = ce;
+ stream->pinned_ctx = ce;
break;
}
}
@@ -1231,7 +1233,7 @@ static struct intel_context *oa_pin_context(struct drm_i915_private *i915,
if (err)
return ERR_PTR(err);
- return i915->perf.oa.pinned_ctx;
+ return stream->pinned_ctx;
}
/**
@@ -1249,7 +1251,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
struct drm_i915_private *i915 = stream->dev_priv;
struct intel_context *ce;
- ce = oa_pin_context(i915, stream->ctx);
+ ce = oa_pin_context(stream);
if (IS_ERR(ce))
return PTR_ERR(ce);
@@ -1259,8 +1261,8 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
* On Haswell we don't do any post processing of the reports
* and don't need to use the mask.
*/
- i915->perf.oa.specific_ctx_id = i915_ggtt_offset(ce->state);
- i915->perf.oa.specific_ctx_id_mask = 0;
+ stream->specific_ctx_id = i915_ggtt_offset(ce->state);
+ stream->specific_ctx_id_mask = 0;
break;
}
@@ -1278,33 +1280,33 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
* dropped by GuC. They won't be part of the context
* ID in the OA reports, so squash those lower bits.
*/
- i915->perf.oa.specific_ctx_id =
+ stream->specific_ctx_id =
lower_32_bits(ce->lrc_desc) >> 12;
/*
* GuC uses the top bit to signal proxy submission, so
* ignore that bit.
*/
- i915->perf.oa.specific_ctx_id_mask =
+ stream->specific_ctx_id_mask =
(1U << (GEN8_CTX_ID_WIDTH - 1)) - 1;
} else {
- i915->perf.oa.specific_ctx_id_mask =
+ stream->specific_ctx_id_mask =
(1U << GEN8_CTX_ID_WIDTH) - 1;
- i915->perf.oa.specific_ctx_id =
+ stream->specific_ctx_id =
upper_32_bits(ce->lrc_desc);
- i915->perf.oa.specific_ctx_id &=
- i915->perf.oa.specific_ctx_id_mask;
+ stream->specific_ctx_id &=
+ stream->specific_ctx_id_mask;
}
break;
case 11: {
- i915->perf.oa.specific_ctx_id_mask =
+ stream->specific_ctx_id_mask =
((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32) |
((1U << GEN11_ENGINE_INSTANCE_WIDTH) - 1) << (GEN11_ENGINE_INSTANCE_SHIFT - 32) |
((1 << GEN11_ENGINE_CLASS_WIDTH) - 1) << (GEN11_ENGINE_CLASS_SHIFT - 32);
- i915->perf.oa.specific_ctx_id = upper_32_bits(ce->lrc_desc);
- i915->perf.oa.specific_ctx_id &=
- i915->perf.oa.specific_ctx_id_mask;
+ stream->specific_ctx_id = upper_32_bits(ce->lrc_desc);
+ stream->specific_ctx_id &=
+ stream->specific_ctx_id_mask;
break;
}
@@ -1313,8 +1315,8 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
}
DRM_DEBUG_DRIVER("filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
- i915->perf.oa.specific_ctx_id,
- i915->perf.oa.specific_ctx_id_mask);
+ stream->specific_ctx_id,
+ stream->specific_ctx_id_mask);
return 0;
}
@@ -1331,10 +1333,10 @@ static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
struct drm_i915_private *dev_priv = stream->dev_priv;
struct intel_context *ce;
- dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
- dev_priv->perf.oa.specific_ctx_id_mask = 0;
+ stream->specific_ctx_id = INVALID_CTX_ID;
+ stream->specific_ctx_id_mask = 0;
- ce = fetch_and_zero(&dev_priv->perf.oa.pinned_ctx);
+ ce = fetch_and_zero(&stream->pinned_ctx);
if (ce) {
mutex_lock(&dev_priv->drm.struct_mutex);
intel_context_unpin(ce);
@@ -1343,34 +1345,36 @@ static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
}
static void
-free_oa_buffer(struct drm_i915_private *i915)
+free_oa_buffer(struct i915_perf_stream *stream)
{
+ struct drm_i915_private *i915 = stream->dev_priv;
+
mutex_lock(&i915->drm.struct_mutex);
- i915_vma_unpin_and_release(&i915->perf.oa.oa_buffer.vma,
+ i915_vma_unpin_and_release(&stream->oa_buffer.vma,
I915_VMA_RELEASE_MAP);
mutex_unlock(&i915->drm.struct_mutex);
- i915->perf.oa.oa_buffer.vaddr = NULL;
+ stream->oa_buffer.vaddr = NULL;
}
static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
- BUG_ON(stream != dev_priv->perf.oa.exclusive_stream);
+ BUG_ON(stream != dev_priv->perf.exclusive_stream);
/*
* Unset exclusive_stream first, it will be checked while disabling
* the metric set on gen8+.
*/
mutex_lock(&dev_priv->drm.struct_mutex);
- dev_priv->perf.oa.exclusive_stream = NULL;
- dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
+ dev_priv->perf.exclusive_stream = NULL;
+ dev_priv->perf.ops.disable_metric_set(stream);
mutex_unlock(&dev_priv->drm.struct_mutex);
- free_oa_buffer(dev_priv);
+ free_oa_buffer(stream);
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
intel_runtime_pm_put(&dev_priv->runtime_pm, stream->wakeref);
@@ -1380,41 +1384,42 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
put_oa_config(dev_priv, stream->oa_config);
- if (dev_priv->perf.oa.spurious_report_rs.missed) {
+ if (dev_priv->perf.spurious_report_rs.missed) {
DRM_NOTE("%d spurious OA report notices suppressed due to ratelimiting\n",
- dev_priv->perf.oa.spurious_report_rs.missed);
+ dev_priv->perf.spurious_report_rs.missed);
}
}
-static void gen7_init_oa_buffer(struct drm_i915_private *dev_priv)
+static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
{
- u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+ u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
unsigned long flags;
- spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
/* Pre-DevBDW: OABUFFER must be set with counters off,
* before OASTATUS1, but after OASTATUS2
*/
I915_WRITE(GEN7_OASTATUS2,
gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT); /* head */
- dev_priv->perf.oa.oa_buffer.head = gtt_offset;
+ stream->oa_buffer.head = gtt_offset;
I915_WRITE(GEN7_OABUFFER, gtt_offset);
I915_WRITE(GEN7_OASTATUS1, gtt_offset | OABUFFER_SIZE_16M); /* tail */
/* Mark that we need updated tail pointers to read from... */
- dev_priv->perf.oa.oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
- dev_priv->perf.oa.oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
+ stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
+ stream->oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
- spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
/* On Haswell we have to track which OASTATUS1 flags we've
* already seen since they can't be cleared while periodic
* sampling is enabled.
*/
- dev_priv->perf.oa.gen7_latched_oastatus1 = 0;
+ dev_priv->perf.gen7_latched_oastatus1 = 0;
/* NB: although the OA buffer will initially be allocated
* zeroed via shmfs (and so this memset is redundant when
@@ -1427,24 +1432,25 @@ static void gen7_init_oa_buffer(struct drm_i915_private *dev_priv)
* the assumption that new reports are being written to zeroed
* memory...
*/
- memset(dev_priv->perf.oa.oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
+ memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
/* Maybe make ->pollin per-stream state if we support multiple
* concurrent streams in the future.
*/
- dev_priv->perf.oa.pollin = false;
+ stream->pollin = false;
}
-static void gen8_init_oa_buffer(struct drm_i915_private *dev_priv)
+static void gen8_init_oa_buffer(struct i915_perf_stream *stream)
{
- u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+ u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
unsigned long flags;
- spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
I915_WRITE(GEN8_OASTATUS, 0);
I915_WRITE(GEN8_OAHEADPTR, gtt_offset);
- dev_priv->perf.oa.oa_buffer.head = gtt_offset;
+ stream->oa_buffer.head = gtt_offset;
I915_WRITE(GEN8_OABUFFER_UDW, 0);
@@ -1461,17 +1467,17 @@ static void gen8_init_oa_buffer(struct drm_i915_private *dev_priv)
I915_WRITE(GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK);
/* Mark that we need updated tail pointers to read from... */
- dev_priv->perf.oa.oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
- dev_priv->perf.oa.oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
+ stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
+ stream->oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
/*
* Reset state used to recognise context switches, affecting which
* reports we will forward to userspace while filtering for a single
* context.
*/
- dev_priv->perf.oa.oa_buffer.last_ctx_id = INVALID_CTX_ID;
+ stream->oa_buffer.last_ctx_id = INVALID_CTX_ID;
- spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
/*
* NB: although the OA buffer will initially be allocated
@@ -1485,22 +1491,23 @@ static void gen8_init_oa_buffer(struct drm_i915_private *dev_priv)
* the assumption that new reports are being written to zeroed
* memory...
*/
- memset(dev_priv->perf.oa.oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
+ memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
/*
* Maybe make ->pollin per-stream state if we support multiple
* concurrent streams in the future.
*/
- dev_priv->perf.oa.pollin = false;
+ stream->pollin = false;
}
-static int alloc_oa_buffer(struct drm_i915_private *dev_priv)
+static int alloc_oa_buffer(struct i915_perf_stream *stream)
{
struct drm_i915_gem_object *bo;
+ struct drm_i915_private *dev_priv = stream->dev_priv;
struct i915_vma *vma;
int ret;
- if (WARN_ON(dev_priv->perf.oa.oa_buffer.vma))
+ if (WARN_ON(stream->oa_buffer.vma))
return -ENODEV;
ret = i915_mutex_lock_interruptible(&dev_priv->drm);
@@ -1525,18 +1532,18 @@ static int alloc_oa_buffer(struct drm_i915_private *dev_priv)
ret = PTR_ERR(vma);
goto err_unref;
}
- dev_priv->perf.oa.oa_buffer.vma = vma;
+ stream->oa_buffer.vma = vma;
- dev_priv->perf.oa.oa_buffer.vaddr =
+ stream->oa_buffer.vaddr =
i915_gem_object_pin_map(bo, I915_MAP_WB);
- if (IS_ERR(dev_priv->perf.oa.oa_buffer.vaddr)) {
- ret = PTR_ERR(dev_priv->perf.oa.oa_buffer.vaddr);
+ if (IS_ERR(stream->oa_buffer.vaddr)) {
+ ret = PTR_ERR(stream->oa_buffer.vaddr);
goto err_unpin;
}
DRM_DEBUG_DRIVER("OA Buffer initialized, gtt offset = 0x%x, vaddr = %p\n",
- i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma),
- dev_priv->perf.oa.oa_buffer.vaddr);
+ i915_ggtt_offset(stream->oa_buffer.vma),
+ stream->oa_buffer.vaddr);
goto unlock;
@@ -1546,8 +1553,8 @@ err_unpin:
err_unref:
i915_gem_object_put(bo);
- dev_priv->perf.oa.oa_buffer.vaddr = NULL;
- dev_priv->perf.oa.oa_buffer.vma = NULL;
+ stream->oa_buffer.vaddr = NULL;
+ stream->oa_buffer.vma = NULL;
unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -1623,8 +1630,10 @@ static int hsw_enable_metric_set(struct i915_perf_stream *stream)
return 0;
}
-static void hsw_disable_metric_set(struct drm_i915_private *dev_priv)
+static void hsw_disable_metric_set(struct i915_perf_stream *stream)
{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+
I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) &
~GEN6_CSUNIT_CLOCK_GATE_DISABLE));
I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) |
@@ -1663,13 +1672,14 @@ static u32 oa_config_flex_reg(const struct i915_oa_config *oa_config,
* in the case that the OA unit has been disabled.
*/
static void
-gen8_update_reg_state_unlocked(struct intel_context *ce,
+gen8_update_reg_state_unlocked(struct i915_perf_stream *stream,
+ struct intel_context *ce,
u32 *reg_state,
const struct i915_oa_config *oa_config)
{
- struct drm_i915_private *i915 = ce->gem_context->i915;
- u32 ctx_oactxctrl = i915->perf.oa.ctx_oactxctrl_offset;
- u32 ctx_flexeu0 = i915->perf.oa.ctx_flexeu0_offset;
+ struct drm_i915_private *i915 = ce->engine->i915;
+ u32 ctx_oactxctrl = i915->perf.ctx_oactxctrl_offset;
+ u32 ctx_flexeu0 = i915->perf.ctx_flexeu0_offset;
/* The MMIO offsets for Flex EU registers aren't contiguous */
i915_reg_t flex_regs[] = {
EU_PERF_CNTL0,
@@ -1683,8 +1693,8 @@ gen8_update_reg_state_unlocked(struct intel_context *ce,
int i;
CTX_REG(reg_state, ctx_oactxctrl, GEN8_OACTXCONTROL,
- (i915->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
- (i915->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) |
+ (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
+ (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
GEN8_OA_COUNTER_RESUME);
for (i = 0; i < ARRAY_SIZE(flex_regs); i++) {
@@ -1846,11 +1856,12 @@ static int gen8_configure_context(struct i915_gem_context *ctx,
*
* Note: it's only the RCS/Render context that has any OA state.
*/
-static int gen8_configure_all_contexts(struct drm_i915_private *i915,
+static int gen8_configure_all_contexts(struct i915_perf_stream *stream,
const struct i915_oa_config *oa_config)
{
+ struct drm_i915_private *i915 = stream->dev_priv;
/* The MMIO offsets for Flex EU registers aren't contiguous */
- const u32 ctx_flexeu0 = i915->perf.oa.ctx_flexeu0_offset;
+ const u32 ctx_flexeu0 = i915->perf.ctx_flexeu0_offset;
#define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N))
struct flex regs[] = {
{
@@ -1859,9 +1870,9 @@ static int gen8_configure_all_contexts(struct drm_i915_private *i915,
},
{
GEN8_OACTXCONTROL,
- i915->perf.oa.ctx_oactxctrl_offset,
- ((i915->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
- (i915->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) |
+ i915->perf.ctx_oactxctrl_offset,
+ ((stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
+ (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
GEN8_OA_COUNTER_RESUME)
},
{ EU_PERF_CNTL0, ctx_flexeuN(0) },
@@ -1875,7 +1886,6 @@ static int gen8_configure_all_contexts(struct drm_i915_private *i915,
#undef ctx_flexeuN
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
- enum intel_engine_id id;
int i;
for (i = 2; i < ARRAY_SIZE(regs); i++)
@@ -1915,7 +1925,7 @@ static int gen8_configure_all_contexts(struct drm_i915_private *i915,
* If we don't modify the kernel_context, we do not get events while
* idle.
*/
- for_each_engine(engine, i915, id) {
+ for_each_uabi_engine(engine, i915) {
struct intel_context *ce = engine->kernel_context;
int err;
@@ -1972,7 +1982,7 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream)
* to make sure all slices/subslices are ON before writing to NOA
* registers.
*/
- ret = gen8_configure_all_contexts(dev_priv, oa_config);
+ ret = gen8_configure_all_contexts(stream, oa_config);
if (ret)
return ret;
@@ -1985,19 +1995,23 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream)
return 0;
}
-static void gen8_disable_metric_set(struct drm_i915_private *dev_priv)
+static void gen8_disable_metric_set(struct i915_perf_stream *stream)
{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+
/* Reset all contexts' slices/subslices configurations. */
- gen8_configure_all_contexts(dev_priv, NULL);
+ gen8_configure_all_contexts(stream, NULL);
I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
~GT_NOA_ENABLE));
}
-static void gen10_disable_metric_set(struct drm_i915_private *dev_priv)
+static void gen10_disable_metric_set(struct i915_perf_stream *stream)
{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+
/* Reset all contexts' slices/subslices configurations. */
- gen8_configure_all_contexts(dev_priv, NULL);
+ gen8_configure_all_contexts(stream, NULL);
/* Make sure we disable noa to save power. */
I915_WRITE(RPM_CONFIG1,
@@ -2008,10 +2022,10 @@ static void gen7_oa_enable(struct i915_perf_stream *stream)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
struct i915_gem_context *ctx = stream->ctx;
- u32 ctx_id = dev_priv->perf.oa.specific_ctx_id;
- bool periodic = dev_priv->perf.oa.periodic;
- u32 period_exponent = dev_priv->perf.oa.period_exponent;
- u32 report_format = dev_priv->perf.oa.oa_buffer.format;
+ u32 ctx_id = stream->specific_ctx_id;
+ bool periodic = stream->periodic;
+ u32 period_exponent = stream->period_exponent;
+ u32 report_format = stream->oa_buffer.format;
/*
* Reset buf pointers so we don't forward reports from before now.
@@ -2022,7 +2036,7 @@ static void gen7_oa_enable(struct i915_perf_stream *stream)
* on the assumption that certain fields are written to zeroed
* memory which this helps maintains.
*/
- gen7_init_oa_buffer(dev_priv);
+ gen7_init_oa_buffer(stream);
I915_WRITE(GEN7_OACONTROL,
(ctx_id & GEN7_OACONTROL_CTX_MASK) |
@@ -2037,7 +2051,7 @@ static void gen7_oa_enable(struct i915_perf_stream *stream)
static void gen8_oa_enable(struct i915_perf_stream *stream)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
- u32 report_format = dev_priv->perf.oa.oa_buffer.format;
+ u32 report_format = stream->oa_buffer.format;
/*
* Reset buf pointers so we don't forward reports from before now.
@@ -2048,7 +2062,7 @@ static void gen8_oa_enable(struct i915_perf_stream *stream)
* on the assumption that certain fields are written to zeroed
* memory which this helps maintains.
*/
- gen8_init_oa_buffer(dev_priv);
+ gen8_init_oa_buffer(stream);
/*
* Note: we don't rely on the hardware to perform single context
@@ -2073,10 +2087,10 @@ static void i915_oa_stream_enable(struct i915_perf_stream *stream)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
- dev_priv->perf.oa.ops.oa_enable(stream);
+ dev_priv->perf.ops.oa_enable(stream);
- if (dev_priv->perf.oa.periodic)
- hrtimer_start(&dev_priv->perf.oa.poll_check_timer,
+ if (stream->periodic)
+ hrtimer_start(&stream->poll_check_timer,
ns_to_ktime(POLL_PERIOD),
HRTIMER_MODE_REL_PINNED);
}
@@ -2115,10 +2129,10 @@ static void i915_oa_stream_disable(struct i915_perf_stream *stream)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
- dev_priv->perf.oa.ops.oa_disable(stream);
+ dev_priv->perf.ops.oa_disable(stream);
- if (dev_priv->perf.oa.periodic)
- hrtimer_cancel(&dev_priv->perf.oa.poll_check_timer);
+ if (stream->periodic)
+ hrtimer_cancel(&stream->poll_check_timer);
}
static const struct i915_perf_stream_ops i915_oa_stream_ops = {
@@ -2170,7 +2184,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
return -EINVAL;
}
- if (!dev_priv->perf.oa.ops.enable_metric_set) {
+ if (!dev_priv->perf.ops.enable_metric_set) {
DRM_DEBUG("OA unit not supported\n");
return -ENODEV;
}
@@ -2179,7 +2193,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
* counter reports and marshal to the appropriate client
* we currently only allow exclusive access
*/
- if (dev_priv->perf.oa.exclusive_stream) {
+ if (dev_priv->perf.exclusive_stream) {
DRM_DEBUG("OA unit already in use\n");
return -EBUSY;
}
@@ -2189,43 +2203,23 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
return -EINVAL;
}
- /* We set up some ratelimit state to potentially throttle any _NOTES
- * about spurious, invalid OA reports which we don't forward to
- * userspace.
- *
- * The initialization is associated with opening the stream (not driver
- * init) considering we print a _NOTE about any throttling when closing
- * the stream instead of waiting until driver _fini which no one would
- * ever see.
- *
- * Using the same limiting factors as printk_ratelimit()
- */
- ratelimit_state_init(&dev_priv->perf.oa.spurious_report_rs,
- 5 * HZ, 10);
- /* Since we use a DRM_NOTE for spurious reports it would be
- * inconsistent to let __ratelimit() automatically print a warning for
- * throttling.
- */
- ratelimit_set_flags(&dev_priv->perf.oa.spurious_report_rs,
- RATELIMIT_MSG_ON_RELEASE);
-
stream->sample_size = sizeof(struct drm_i915_perf_record_header);
- format_size = dev_priv->perf.oa.oa_formats[props->oa_format].size;
+ format_size = dev_priv->perf.oa_formats[props->oa_format].size;
stream->sample_flags |= SAMPLE_OA_REPORT;
stream->sample_size += format_size;
- dev_priv->perf.oa.oa_buffer.format_size = format_size;
- if (WARN_ON(dev_priv->perf.oa.oa_buffer.format_size == 0))
+ stream->oa_buffer.format_size = format_size;
+ if (WARN_ON(stream->oa_buffer.format_size == 0))
return -EINVAL;
- dev_priv->perf.oa.oa_buffer.format =
- dev_priv->perf.oa.oa_formats[props->oa_format].format;
+ stream->oa_buffer.format =
+ dev_priv->perf.oa_formats[props->oa_format].format;
- dev_priv->perf.oa.periodic = props->oa_periodic;
- if (dev_priv->perf.oa.periodic)
- dev_priv->perf.oa.period_exponent = props->oa_period_exponent;
+ stream->periodic = props->oa_periodic;
+ if (stream->periodic)
+ stream->period_exponent = props->oa_period_exponent;
if (stream->ctx) {
ret = oa_get_render_ctx_id(stream);
@@ -2256,7 +2250,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
stream->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
- ret = alloc_oa_buffer(dev_priv);
+ ret = alloc_oa_buffer(stream);
if (ret)
goto err_oa_buf_alloc;
@@ -2265,9 +2259,9 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
goto err_lock;
stream->ops = &i915_oa_stream_ops;
- dev_priv->perf.oa.exclusive_stream = stream;
+ dev_priv->perf.exclusive_stream = stream;
- ret = dev_priv->perf.oa.ops.enable_metric_set(stream);
+ ret = dev_priv->perf.ops.enable_metric_set(stream);
if (ret) {
DRM_DEBUG("Unable to enable metric set\n");
goto err_enable;
@@ -2275,15 +2269,21 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
mutex_unlock(&dev_priv->drm.struct_mutex);
+ hrtimer_init(&stream->poll_check_timer,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ stream->poll_check_timer.function = oa_poll_check_timer_cb;
+ init_waitqueue_head(&stream->poll_wq);
+ spin_lock_init(&stream->oa_buffer.ptr_lock);
+
return 0;
err_enable:
- dev_priv->perf.oa.exclusive_stream = NULL;
- dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
+ dev_priv->perf.exclusive_stream = NULL;
+ dev_priv->perf.ops.disable_metric_set(stream);
mutex_unlock(&dev_priv->drm.struct_mutex);
err_lock:
- free_oa_buffer(dev_priv);
+ free_oa_buffer(stream);
err_oa_buf_alloc:
put_oa_config(dev_priv, stream->oa_config);
@@ -2307,9 +2307,9 @@ void i915_oa_init_reg_state(struct intel_engine_cs *engine,
if (engine->class != RENDER_CLASS)
return;
- stream = engine->i915->perf.oa.exclusive_stream;
+ stream = engine->i915->perf.exclusive_stream;
if (stream)
- gen8_update_reg_state_unlocked(ce, regs, stream->oa_config);
+ gen8_update_reg_state_unlocked(stream, ce, regs, stream->oa_config);
}
/**
@@ -2425,7 +2425,7 @@ static ssize_t i915_perf_read(struct file *file,
/* Maybe make ->pollin per-stream state if we support multiple
* concurrent streams in the future.
*/
- dev_priv->perf.oa.pollin = false;
+ stream->pollin = false;
}
return ret;
@@ -2433,13 +2433,12 @@ static ssize_t i915_perf_read(struct file *file,
static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
{
- struct drm_i915_private *dev_priv =
- container_of(hrtimer, typeof(*dev_priv),
- perf.oa.poll_check_timer);
+ struct i915_perf_stream *stream =
+ container_of(hrtimer, typeof(*stream), poll_check_timer);
- if (oa_buffer_check_unlocked(dev_priv)) {
- dev_priv->perf.oa.pollin = true;
- wake_up(&dev_priv->perf.oa.poll_wq);
+ if (oa_buffer_check_unlocked(stream)) {
+ stream->pollin = true;
+ wake_up(&stream->poll_wq);
}
hrtimer_forward_now(hrtimer, ns_to_ktime(POLL_PERIOD));
@@ -2478,7 +2477,7 @@ static __poll_t i915_perf_poll_locked(struct drm_i915_private *dev_priv,
* the hrtimer/oa_poll_check_timer_cb to notify us when there are
* samples to read.
*/
- if (dev_priv->perf.oa.pollin)
+ if (stream->pollin)
events |= EPOLLIN;
return events;
@@ -2904,7 +2903,7 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
value);
return -EINVAL;
}
- if (!dev_priv->perf.oa.oa_formats[value].size) {
+ if (!dev_priv->perf.oa_formats[value].size) {
DRM_DEBUG("Unsupported OA report format %llu\n",
value);
return -EINVAL;
@@ -3048,7 +3047,7 @@ void i915_perf_register(struct drm_i915_private *dev_priv)
if (!dev_priv->perf.metrics_kobj)
goto exit;
- sysfs_attr_init(&dev_priv->perf.oa.test_config.sysfs_metric_id.attr);
+ sysfs_attr_init(&dev_priv->perf.test_config.sysfs_metric_id.attr);
if (INTEL_GEN(dev_priv) >= 11) {
i915_perf_load_test_config_icl(dev_priv);
@@ -3083,15 +3082,15 @@ void i915_perf_register(struct drm_i915_private *dev_priv)
i915_perf_load_test_config_hsw(dev_priv);
}
- if (dev_priv->perf.oa.test_config.id == 0)
+ if (dev_priv->perf.test_config.id == 0)
goto sysfs_error;
ret = sysfs_create_group(dev_priv->perf.metrics_kobj,
- &dev_priv->perf.oa.test_config.sysfs_metric);
+ &dev_priv->perf.test_config.sysfs_metric);
if (ret)
goto sysfs_error;
- atomic_set(&dev_priv->perf.oa.test_config.ref_count, 1);
+ atomic_set(&dev_priv->perf.test_config.ref_count, 1);
goto exit;
@@ -3118,7 +3117,7 @@ void i915_perf_unregister(struct drm_i915_private *dev_priv)
return;
sysfs_remove_group(dev_priv->perf.metrics_kobj,
- &dev_priv->perf.oa.test_config.sysfs_metric);
+ &dev_priv->perf.test_config.sysfs_metric);
kobject_put(dev_priv->perf.metrics_kobj);
dev_priv->perf.metrics_kobj = NULL;
@@ -3363,7 +3362,7 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
oa_config->mux_regs_len = args->n_mux_regs;
oa_config->mux_regs =
alloc_oa_regs(dev_priv,
- dev_priv->perf.oa.ops.is_valid_mux_reg,
+ dev_priv->perf.ops.is_valid_mux_reg,
u64_to_user_ptr(args->mux_regs_ptr),
args->n_mux_regs);
@@ -3376,7 +3375,7 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
oa_config->b_counter_regs_len = args->n_boolean_regs;
oa_config->b_counter_regs =
alloc_oa_regs(dev_priv,
- dev_priv->perf.oa.ops.is_valid_b_counter_reg,
+ dev_priv->perf.ops.is_valid_b_counter_reg,
u64_to_user_ptr(args->boolean_regs_ptr),
args->n_boolean_regs);
@@ -3395,7 +3394,7 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
oa_config->flex_regs_len = args->n_flex_regs;
oa_config->flex_regs =
alloc_oa_regs(dev_priv,
- dev_priv->perf.oa.ops.is_valid_flex_reg,
+ dev_priv->perf.ops.is_valid_flex_reg,
u64_to_user_ptr(args->flex_regs_ptr),
args->n_flex_regs);
@@ -3562,20 +3561,20 @@ static struct ctl_table dev_root[] = {
void i915_perf_init(struct drm_i915_private *dev_priv)
{
if (IS_HASWELL(dev_priv)) {
- dev_priv->perf.oa.ops.is_valid_b_counter_reg =
+ dev_priv->perf.ops.is_valid_b_counter_reg =
gen7_is_valid_b_counter_addr;
- dev_priv->perf.oa.ops.is_valid_mux_reg =
+ dev_priv->perf.ops.is_valid_mux_reg =
hsw_is_valid_mux_addr;
- dev_priv->perf.oa.ops.is_valid_flex_reg = NULL;
- dev_priv->perf.oa.ops.enable_metric_set = hsw_enable_metric_set;
- dev_priv->perf.oa.ops.disable_metric_set = hsw_disable_metric_set;
- dev_priv->perf.oa.ops.oa_enable = gen7_oa_enable;
- dev_priv->perf.oa.ops.oa_disable = gen7_oa_disable;
- dev_priv->perf.oa.ops.read = gen7_oa_read;
- dev_priv->perf.oa.ops.oa_hw_tail_read =
+ dev_priv->perf.ops.is_valid_flex_reg = NULL;
+ dev_priv->perf.ops.enable_metric_set = hsw_enable_metric_set;
+ dev_priv->perf.ops.disable_metric_set = hsw_disable_metric_set;
+ dev_priv->perf.ops.oa_enable = gen7_oa_enable;
+ dev_priv->perf.ops.oa_disable = gen7_oa_disable;
+ dev_priv->perf.ops.read = gen7_oa_read;
+ dev_priv->perf.ops.oa_hw_tail_read =
gen7_oa_hw_tail_read;
- dev_priv->perf.oa.oa_formats = hsw_oa_formats;
+ dev_priv->perf.oa_formats = hsw_oa_formats;
} else if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
/* Note: that although we could theoretically also support the
* legacy ringbuffer mode on BDW (and earlier iterations of
@@ -3583,71 +3582,65 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
* worth the complexity to maintain now that BDW+ enable
* execlist mode by default.
*/
- dev_priv->perf.oa.oa_formats = gen8_plus_oa_formats;
+ dev_priv->perf.oa_formats = gen8_plus_oa_formats;
- dev_priv->perf.oa.ops.oa_enable = gen8_oa_enable;
- dev_priv->perf.oa.ops.oa_disable = gen8_oa_disable;
- dev_priv->perf.oa.ops.read = gen8_oa_read;
- dev_priv->perf.oa.ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
+ dev_priv->perf.ops.oa_enable = gen8_oa_enable;
+ dev_priv->perf.ops.oa_disable = gen8_oa_disable;
+ dev_priv->perf.ops.read = gen8_oa_read;
+ dev_priv->perf.ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
if (IS_GEN_RANGE(dev_priv, 8, 9)) {
- dev_priv->perf.oa.ops.is_valid_b_counter_reg =
+ dev_priv->perf.ops.is_valid_b_counter_reg =
gen7_is_valid_b_counter_addr;
- dev_priv->perf.oa.ops.is_valid_mux_reg =
+ dev_priv->perf.ops.is_valid_mux_reg =
gen8_is_valid_mux_addr;
- dev_priv->perf.oa.ops.is_valid_flex_reg =
+ dev_priv->perf.ops.is_valid_flex_reg =
gen8_is_valid_flex_addr;
if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->perf.oa.ops.is_valid_mux_reg =
+ dev_priv->perf.ops.is_valid_mux_reg =
chv_is_valid_mux_addr;
}
- dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
- dev_priv->perf.oa.ops.disable_metric_set = gen8_disable_metric_set;
+ dev_priv->perf.ops.enable_metric_set = gen8_enable_metric_set;
+ dev_priv->perf.ops.disable_metric_set = gen8_disable_metric_set;
if (IS_GEN(dev_priv, 8)) {
- dev_priv->perf.oa.ctx_oactxctrl_offset = 0x120;
- dev_priv->perf.oa.ctx_flexeu0_offset = 0x2ce;
+ dev_priv->perf.ctx_oactxctrl_offset = 0x120;
+ dev_priv->perf.ctx_flexeu0_offset = 0x2ce;
- dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<25);
+ dev_priv->perf.gen8_valid_ctx_bit = BIT(25);
} else {
- dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128;
- dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de;
+ dev_priv->perf.ctx_oactxctrl_offset = 0x128;
+ dev_priv->perf.ctx_flexeu0_offset = 0x3de;
- dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
+ dev_priv->perf.gen8_valid_ctx_bit = BIT(16);
}
} else if (IS_GEN_RANGE(dev_priv, 10, 11)) {
- dev_priv->perf.oa.ops.is_valid_b_counter_reg =
+ dev_priv->perf.ops.is_valid_b_counter_reg =
gen7_is_valid_b_counter_addr;
- dev_priv->perf.oa.ops.is_valid_mux_reg =
+ dev_priv->perf.ops.is_valid_mux_reg =
gen10_is_valid_mux_addr;
- dev_priv->perf.oa.ops.is_valid_flex_reg =
+ dev_priv->perf.ops.is_valid_flex_reg =
gen8_is_valid_flex_addr;
- dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
- dev_priv->perf.oa.ops.disable_metric_set = gen10_disable_metric_set;
+ dev_priv->perf.ops.enable_metric_set = gen8_enable_metric_set;
+ dev_priv->perf.ops.disable_metric_set = gen10_disable_metric_set;
if (IS_GEN(dev_priv, 10)) {
- dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128;
- dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de;
+ dev_priv->perf.ctx_oactxctrl_offset = 0x128;
+ dev_priv->perf.ctx_flexeu0_offset = 0x3de;
} else {
- dev_priv->perf.oa.ctx_oactxctrl_offset = 0x124;
- dev_priv->perf.oa.ctx_flexeu0_offset = 0x78e;
+ dev_priv->perf.ctx_oactxctrl_offset = 0x124;
+ dev_priv->perf.ctx_flexeu0_offset = 0x78e;
}
- dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
+ dev_priv->perf.gen8_valid_ctx_bit = BIT(16);
}
}
- if (dev_priv->perf.oa.ops.enable_metric_set) {
- hrtimer_init(&dev_priv->perf.oa.poll_check_timer,
- CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- dev_priv->perf.oa.poll_check_timer.function = oa_poll_check_timer_cb;
- init_waitqueue_head(&dev_priv->perf.oa.poll_wq);
-
+ if (dev_priv->perf.ops.enable_metric_set) {
INIT_LIST_HEAD(&dev_priv->perf.streams);
mutex_init(&dev_priv->perf.lock);
- spin_lock_init(&dev_priv->perf.oa.oa_buffer.ptr_lock);
oa_sample_rate_hard_limit = 1000 *
(RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz / 2);
@@ -3656,6 +3649,25 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
mutex_init(&dev_priv->perf.metrics_lock);
idr_init(&dev_priv->perf.metrics_idr);
+ /* We set up some ratelimit state to potentially throttle any
+ * _NOTES about spurious, invalid OA reports which we don't
+ * forward to userspace.
+ *
+ * We print a _NOTE about any throttling when closing the
+ * stream instead of waiting until driver _fini which no one
+ * would ever see.
+ *
+ * Using the same limiting factors as printk_ratelimit()
+ */
+ ratelimit_state_init(&dev_priv->perf.spurious_report_rs,
+ 5 * HZ, 10);
+ /* Since we use a DRM_NOTE for spurious reports it would be
+ * inconsistent to let __ratelimit() automatically print a
+ * warning for throttling.
+ */
+ ratelimit_set_flags(&dev_priv->perf.spurious_report_rs,
+ RATELIMIT_MSG_ON_RELEASE);
+
dev_priv->perf.initialized = true;
}
}
@@ -3684,7 +3696,7 @@ void i915_perf_fini(struct drm_i915_private *dev_priv)
unregister_sysctl_table(dev_priv->perf.sysctl_header);
- memset(&dev_priv->perf.oa.ops, 0, sizeof(dev_priv->perf.oa.ops));
+ memset(&dev_priv->perf.ops, 0, sizeof(dev_priv->perf.ops));
dev_priv->perf.initialized = false;
}
diff --git a/drivers/gpu/drm/i915/i915_perf.h b/drivers/gpu/drm/i915/i915_perf.h
new file mode 100644
index 000000000000..a412b16d9ffc
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_perf.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_PERF_H__
+#define __I915_PERF_H__
+
+#include <linux/types.h>
+
+struct drm_device;
+struct drm_file;
+struct drm_i915_private;
+struct intel_context;
+struct intel_engine_cs;
+
+void i915_perf_init(struct drm_i915_private *i915);
+void i915_perf_fini(struct drm_i915_private *i915);
+void i915_perf_register(struct drm_i915_private *i915);
+void i915_perf_unregister(struct drm_i915_private *i915);
+
+int i915_perf_open_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+void i915_oa_init_reg_state(struct intel_engine_cs *engine,
+ struct intel_context *ce,
+ u32 *reg_state);
+
+#endif /* __I915_PERF_H__ */
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index eff86483bec0..8e251e719390 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -8,6 +8,9 @@
#include <linux/pm_runtime.h>
#include "gt/intel_engine.h"
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_engine_user.h"
+#include "gt/intel_gt_pm.h"
#include "i915_drv.h"
#include "i915_pmu.h"
@@ -74,8 +77,9 @@ static unsigned int event_enabled_bit(struct perf_event *event)
return config_enabled_bit(event->attr.config);
}
-static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active)
+static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active)
{
+ struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
u64 enable;
/*
@@ -83,7 +87,7 @@ static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active)
*
* We start with a bitmask of all currently enabled events.
*/
- enable = i915->pmu.enable;
+ enable = pmu->enable;
/*
* Mask out all the ones which do not need the timer, or in
@@ -114,24 +118,26 @@ static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active)
void i915_pmu_gt_parked(struct drm_i915_private *i915)
{
- if (!i915->pmu.base.event_init)
+ struct i915_pmu *pmu = &i915->pmu;
+
+ if (!pmu->base.event_init)
return;
- spin_lock_irq(&i915->pmu.lock);
+ spin_lock_irq(&pmu->lock);
/*
* Signal sampling timer to stop if only engine events are enabled and
* GPU went idle.
*/
- i915->pmu.timer_enabled = pmu_needs_timer(i915, false);
- spin_unlock_irq(&i915->pmu.lock);
+ pmu->timer_enabled = pmu_needs_timer(pmu, false);
+ spin_unlock_irq(&pmu->lock);
}
-static void __i915_pmu_maybe_start_timer(struct drm_i915_private *i915)
+static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu)
{
- if (!i915->pmu.timer_enabled && pmu_needs_timer(i915, true)) {
- i915->pmu.timer_enabled = true;
- i915->pmu.timer_last = ktime_get();
- hrtimer_start_range_ns(&i915->pmu.timer,
+ if (!pmu->timer_enabled && pmu_needs_timer(pmu, true)) {
+ pmu->timer_enabled = true;
+ pmu->timer_last = ktime_get();
+ hrtimer_start_range_ns(&pmu->timer,
ns_to_ktime(PERIOD), 0,
HRTIMER_MODE_REL_PINNED);
}
@@ -139,15 +145,17 @@ static void __i915_pmu_maybe_start_timer(struct drm_i915_private *i915)
void i915_pmu_gt_unparked(struct drm_i915_private *i915)
{
- if (!i915->pmu.base.event_init)
+ struct i915_pmu *pmu = &i915->pmu;
+
+ if (!pmu->base.event_init)
return;
- spin_lock_irq(&i915->pmu.lock);
+ spin_lock_irq(&pmu->lock);
/*
* Re-enable sampling timer when GPU goes active.
*/
- __i915_pmu_maybe_start_timer(i915);
- spin_unlock_irq(&i915->pmu.lock);
+ __i915_pmu_maybe_start_timer(pmu);
+ spin_unlock_irq(&pmu->lock);
}
static void
@@ -157,32 +165,30 @@ add_sample(struct i915_pmu_sample *sample, u32 val)
}
static void
-engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
+engines_sample(struct intel_gt *gt, unsigned int period_ns)
{
+ struct drm_i915_private *i915 = gt->i915;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
- unsigned long flags;
-
- if ((dev_priv->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
- return;
- wakeref = 0;
- if (READ_ONCE(dev_priv->gt.awake))
- wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
- if (!wakeref)
+ if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
return;
- spin_lock_irqsave(&dev_priv->uncore.lock, flags);
- for_each_engine(engine, dev_priv, id) {
+ for_each_engine(engine, i915, id) {
struct intel_engine_pmu *pmu = &engine->pmu;
+ unsigned long flags;
bool busy;
u32 val;
- val = I915_READ_FW(RING_CTL(engine->mmio_base));
- if (val == 0) /* powerwell off => engine idle */
+ if (!intel_engine_pm_get_if_awake(engine))
continue;
+ spin_lock_irqsave(&engine->uncore->lock, flags);
+
+ val = ENGINE_READ_FW(engine, RING_CTL);
+ if (val == 0) /* powerwell off => engine idle */
+ goto skip;
+
if (val & RING_WAIT)
add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns);
if (val & RING_WAIT_SEMAPHORE)
@@ -197,15 +203,16 @@ engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
*/
busy = val & (RING_WAIT_SEMAPHORE | RING_WAIT);
if (!busy) {
- val = I915_READ_FW(RING_MI_MODE(engine->mmio_base));
+ val = ENGINE_READ_FW(engine, RING_MI_MODE);
busy = !(val & MODE_IDLE);
}
if (busy)
add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns);
- }
- spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+skip:
+ spin_unlock_irqrestore(&engine->uncore->lock, flags);
+ intel_engine_pm_put(engine);
+ }
}
static void
@@ -215,34 +222,30 @@ add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul)
}
static void
-frequency_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
+frequency_sample(struct intel_gt *gt, unsigned int period_ns)
{
- if (dev_priv->pmu.enable &
- config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
- u32 val;
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+ struct i915_pmu *pmu = &i915->pmu;
- val = dev_priv->gt_pm.rps.cur_freq;
- if (dev_priv->gt.awake) {
- intel_wakeref_t wakeref;
+ if (pmu->enable & config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
+ u32 val;
- with_intel_runtime_pm_if_in_use(&dev_priv->runtime_pm,
- wakeref) {
- val = intel_uncore_read_notrace(&dev_priv->uncore,
- GEN6_RPSTAT1);
- val = intel_get_cagf(dev_priv, val);
- }
+ val = i915->gt_pm.rps.cur_freq;
+ if (intel_gt_pm_get_if_awake(gt)) {
+ val = intel_uncore_read_notrace(uncore, GEN6_RPSTAT1);
+ val = intel_get_cagf(i915, val);
+ intel_gt_pm_put(gt);
}
- add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT],
- intel_gpu_freq(dev_priv, val),
+ add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_ACT],
+ intel_gpu_freq(i915, val),
period_ns / 1000);
}
- if (dev_priv->pmu.enable &
- config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
- add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_REQ],
- intel_gpu_freq(dev_priv,
- dev_priv->gt_pm.rps.cur_freq),
+ if (pmu->enable & config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
+ add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_REQ],
+ intel_gpu_freq(i915, i915->gt_pm.rps.cur_freq),
period_ns / 1000);
}
}
@@ -251,15 +254,17 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
{
struct drm_i915_private *i915 =
container_of(hrtimer, struct drm_i915_private, pmu.timer);
+ struct i915_pmu *pmu = &i915->pmu;
+ struct intel_gt *gt = &i915->gt;
unsigned int period_ns;
ktime_t now;
- if (!READ_ONCE(i915->pmu.timer_enabled))
+ if (!READ_ONCE(pmu->timer_enabled))
return HRTIMER_NORESTART;
now = ktime_get();
- period_ns = ktime_to_ns(ktime_sub(now, i915->pmu.timer_last));
- i915->pmu.timer_last = now;
+ period_ns = ktime_to_ns(ktime_sub(now, pmu->timer_last));
+ pmu->timer_last = now;
/*
* Strictly speaking the passed in period may not be 100% accurate for
@@ -267,8 +272,8 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
* grabbing the forcewake. However the potential error from timer call-
* back delay greatly dominates this so we keep it simple.
*/
- engines_sample(i915, period_ns);
- frequency_sample(i915, period_ns);
+ engines_sample(gt, period_ns);
+ frequency_sample(gt, period_ns);
hrtimer_forward(hrtimer, now, ns_to_ktime(PERIOD));
@@ -421,8 +426,9 @@ static int i915_pmu_event_init(struct perf_event *event)
return 0;
}
-static u64 __get_rc6(struct drm_i915_private *i915)
+static u64 __get_rc6(struct intel_gt *gt)
{
+ struct drm_i915_private *i915 = gt->i915;
u64 val;
val = intel_rc6_residency_ns(i915,
@@ -439,17 +445,19 @@ static u64 __get_rc6(struct drm_i915_private *i915)
return val;
}
-static u64 get_rc6(struct drm_i915_private *i915)
+static u64 get_rc6(struct intel_gt *gt)
{
#if IS_ENABLED(CONFIG_PM)
+ struct drm_i915_private *i915 = gt->i915;
struct intel_runtime_pm *rpm = &i915->runtime_pm;
+ struct i915_pmu *pmu = &i915->pmu;
intel_wakeref_t wakeref;
unsigned long flags;
u64 val;
wakeref = intel_runtime_pm_get_if_in_use(rpm);
if (wakeref) {
- val = __get_rc6(i915);
+ val = __get_rc6(gt);
intel_runtime_pm_put(rpm, wakeref);
/*
@@ -458,16 +466,16 @@ static u64 get_rc6(struct drm_i915_private *i915)
* previously.
*/
- spin_lock_irqsave(&i915->pmu.lock, flags);
+ spin_lock_irqsave(&pmu->lock, flags);
- if (val >= i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
- i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0;
- i915->pmu.sample[__I915_SAMPLE_RC6].cur = val;
+ if (val >= pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
+ pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0;
+ pmu->sample[__I915_SAMPLE_RC6].cur = val;
} else {
- val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
+ val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
}
- spin_unlock_irqrestore(&i915->pmu.lock, flags);
+ spin_unlock_irqrestore(&pmu->lock, flags);
} else {
struct device *kdev = rpm->kdev;
@@ -478,7 +486,7 @@ static u64 get_rc6(struct drm_i915_private *i915)
* on top of the last known real value, as the approximated RC6
* counter value.
*/
- spin_lock_irqsave(&i915->pmu.lock, flags);
+ spin_lock_irqsave(&pmu->lock, flags);
/*
* After the above branch intel_runtime_pm_get_if_in_use failed
@@ -494,25 +502,25 @@ static u64 get_rc6(struct drm_i915_private *i915)
if (pm_runtime_status_suspended(kdev)) {
val = pm_runtime_suspended_time(kdev);
- if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
- i915->pmu.suspended_time_last = val;
+ if (!pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
+ pmu->suspended_time_last = val;
- val -= i915->pmu.suspended_time_last;
- val += i915->pmu.sample[__I915_SAMPLE_RC6].cur;
+ val -= pmu->suspended_time_last;
+ val += pmu->sample[__I915_SAMPLE_RC6].cur;
- i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
- } else if (i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
- val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
+ pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
+ } else if (pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
+ val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
} else {
- val = i915->pmu.sample[__I915_SAMPLE_RC6].cur;
+ val = pmu->sample[__I915_SAMPLE_RC6].cur;
}
- spin_unlock_irqrestore(&i915->pmu.lock, flags);
+ spin_unlock_irqrestore(&pmu->lock, flags);
}
return val;
#else
- return __get_rc6(i915);
+ return __get_rc6(gt);
#endif
}
@@ -520,6 +528,7 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
{
struct drm_i915_private *i915 =
container_of(event->pmu, typeof(*i915), pmu.base);
+ struct i915_pmu *pmu = &i915->pmu;
u64 val = 0;
if (is_engine_event(event)) {
@@ -542,19 +551,19 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
switch (event->attr.config) {
case I915_PMU_ACTUAL_FREQUENCY:
val =
- div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_ACT].cur,
+ div_u64(pmu->sample[__I915_SAMPLE_FREQ_ACT].cur,
USEC_PER_SEC /* to MHz */);
break;
case I915_PMU_REQUESTED_FREQUENCY:
val =
- div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_REQ].cur,
+ div_u64(pmu->sample[__I915_SAMPLE_FREQ_REQ].cur,
USEC_PER_SEC /* to MHz */);
break;
case I915_PMU_INTERRUPTS:
val = count_interrupts(i915);
break;
case I915_PMU_RC6_RESIDENCY:
- val = get_rc6(i915);
+ val = get_rc6(&i915->gt);
break;
}
}
@@ -582,24 +591,25 @@ static void i915_pmu_enable(struct perf_event *event)
struct drm_i915_private *i915 =
container_of(event->pmu, typeof(*i915), pmu.base);
unsigned int bit = event_enabled_bit(event);
+ struct i915_pmu *pmu = &i915->pmu;
unsigned long flags;
- spin_lock_irqsave(&i915->pmu.lock, flags);
+ spin_lock_irqsave(&pmu->lock, flags);
/*
* Update the bitmask of enabled events and increment
* the event reference counter.
*/
- BUILD_BUG_ON(ARRAY_SIZE(i915->pmu.enable_count) != I915_PMU_MASK_BITS);
- GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count));
- GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0);
- i915->pmu.enable |= BIT_ULL(bit);
- i915->pmu.enable_count[bit]++;
+ BUILD_BUG_ON(ARRAY_SIZE(pmu->enable_count) != I915_PMU_MASK_BITS);
+ GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
+ GEM_BUG_ON(pmu->enable_count[bit] == ~0);
+ pmu->enable |= BIT_ULL(bit);
+ pmu->enable_count[bit]++;
/*
* Start the sampling timer if needed and not already enabled.
*/
- __i915_pmu_maybe_start_timer(i915);
+ __i915_pmu_maybe_start_timer(pmu);
/*
* For per-engine events the bitmask and reference counting
@@ -625,7 +635,7 @@ static void i915_pmu_enable(struct perf_event *event)
engine->pmu.enable_count[sample]++;
}
- spin_unlock_irqrestore(&i915->pmu.lock, flags);
+ spin_unlock_irqrestore(&pmu->lock, flags);
/*
* Store the current counter value so we can report the correct delta
@@ -640,9 +650,10 @@ static void i915_pmu_disable(struct perf_event *event)
struct drm_i915_private *i915 =
container_of(event->pmu, typeof(*i915), pmu.base);
unsigned int bit = event_enabled_bit(event);
+ struct i915_pmu *pmu = &i915->pmu;
unsigned long flags;
- spin_lock_irqsave(&i915->pmu.lock, flags);
+ spin_lock_irqsave(&pmu->lock, flags);
if (is_engine_event(event)) {
u8 sample = engine_event_sample(event);
@@ -664,18 +675,18 @@ static void i915_pmu_disable(struct perf_event *event)
engine->pmu.enable &= ~BIT(sample);
}
- GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count));
- GEM_BUG_ON(i915->pmu.enable_count[bit] == 0);
+ GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
+ GEM_BUG_ON(pmu->enable_count[bit] == 0);
/*
* Decrement the reference count and clear the enabled
* bitmask when the last listener on an event goes away.
*/
- if (--i915->pmu.enable_count[bit] == 0) {
- i915->pmu.enable &= ~BIT_ULL(bit);
- i915->pmu.timer_enabled &= pmu_needs_timer(i915, true);
+ if (--pmu->enable_count[bit] == 0) {
+ pmu->enable &= ~BIT_ULL(bit);
+ pmu->timer_enabled &= pmu_needs_timer(pmu, true);
}
- spin_unlock_irqrestore(&i915->pmu.lock, flags);
+ spin_unlock_irqrestore(&pmu->lock, flags);
}
static void i915_pmu_event_start(struct perf_event *event, int flags)
@@ -824,8 +835,9 @@ add_pmu_attr(struct perf_pmu_events_attr *attr, const char *name,
}
static struct attribute **
-create_event_attributes(struct drm_i915_private *i915)
+create_event_attributes(struct i915_pmu *pmu)
{
+ struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
static const struct {
u64 config;
const char *name;
@@ -849,7 +861,6 @@ create_event_attributes(struct drm_i915_private *i915)
struct i915_ext_attribute *i915_attr = NULL, *i915_iter;
struct attribute **attr = NULL, **attr_iter;
struct intel_engine_cs *engine;
- enum intel_engine_id id;
unsigned int i;
/* Count how many counters we will be exposing. */
@@ -858,7 +869,7 @@ create_event_attributes(struct drm_i915_private *i915)
count++;
}
- for_each_engine(engine, i915, id) {
+ for_each_uabi_engine(engine, i915) {
for (i = 0; i < ARRAY_SIZE(engine_events); i++) {
if (!engine_event_status(engine,
engine_events[i].sample))
@@ -909,7 +920,7 @@ create_event_attributes(struct drm_i915_private *i915)
}
/* Initialize supported engine counters. */
- for_each_engine(engine, i915, id) {
+ for_each_uabi_engine(engine, i915) {
for (i = 0; i < ARRAY_SIZE(engine_events); i++) {
char *str;
@@ -926,7 +937,7 @@ create_event_attributes(struct drm_i915_private *i915)
i915_iter =
add_i915_attr(i915_iter, str,
__I915_PMU_ENGINE(engine->uabi_class,
- engine->instance,
+ engine->uabi_instance,
engine_events[i].sample));
str = kasprintf(GFP_KERNEL, "%s-%s.unit",
@@ -939,8 +950,8 @@ create_event_attributes(struct drm_i915_private *i915)
}
}
- i915->pmu.i915_attr = i915_attr;
- i915->pmu.pmu_attr = pmu_attr;
+ pmu->i915_attr = i915_attr;
+ pmu->pmu_attr = pmu_attr;
return attr;
@@ -956,7 +967,7 @@ err_alloc:
return NULL;
}
-static void free_event_attributes(struct drm_i915_private *i915)
+static void free_event_attributes(struct i915_pmu *pmu)
{
struct attribute **attr_iter = i915_pmu_events_attr_group.attrs;
@@ -964,12 +975,12 @@ static void free_event_attributes(struct drm_i915_private *i915)
kfree((*attr_iter)->name);
kfree(i915_pmu_events_attr_group.attrs);
- kfree(i915->pmu.i915_attr);
- kfree(i915->pmu.pmu_attr);
+ kfree(pmu->i915_attr);
+ kfree(pmu->pmu_attr);
i915_pmu_events_attr_group.attrs = NULL;
- i915->pmu.i915_attr = NULL;
- i915->pmu.pmu_attr = NULL;
+ pmu->i915_attr = NULL;
+ pmu->pmu_attr = NULL;
}
static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
@@ -1006,7 +1017,7 @@ static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
static enum cpuhp_state cpuhp_slot = CPUHP_INVALID;
-static int i915_pmu_register_cpuhp_state(struct drm_i915_private *i915)
+static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu)
{
enum cpuhp_state slot;
int ret;
@@ -1019,7 +1030,7 @@ static int i915_pmu_register_cpuhp_state(struct drm_i915_private *i915)
return ret;
slot = ret;
- ret = cpuhp_state_add_instance(slot, &i915->pmu.node);
+ ret = cpuhp_state_add_instance(slot, &pmu->node);
if (ret) {
cpuhp_remove_multi_state(slot);
return ret;
@@ -1029,72 +1040,75 @@ static int i915_pmu_register_cpuhp_state(struct drm_i915_private *i915)
return 0;
}
-static void i915_pmu_unregister_cpuhp_state(struct drm_i915_private *i915)
+static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu)
{
WARN_ON(cpuhp_slot == CPUHP_INVALID);
- WARN_ON(cpuhp_state_remove_instance(cpuhp_slot, &i915->pmu.node));
+ WARN_ON(cpuhp_state_remove_instance(cpuhp_slot, &pmu->node));
cpuhp_remove_multi_state(cpuhp_slot);
}
void i915_pmu_register(struct drm_i915_private *i915)
{
+ struct i915_pmu *pmu = &i915->pmu;
int ret;
if (INTEL_GEN(i915) <= 2) {
- DRM_INFO("PMU not supported for this GPU.");
+ dev_info(i915->drm.dev, "PMU not supported for this GPU.");
return;
}
- i915_pmu_events_attr_group.attrs = create_event_attributes(i915);
+ i915_pmu_events_attr_group.attrs = create_event_attributes(pmu);
if (!i915_pmu_events_attr_group.attrs) {
ret = -ENOMEM;
goto err;
}
- i915->pmu.base.attr_groups = i915_pmu_attr_groups;
- i915->pmu.base.task_ctx_nr = perf_invalid_context;
- i915->pmu.base.event_init = i915_pmu_event_init;
- i915->pmu.base.add = i915_pmu_event_add;
- i915->pmu.base.del = i915_pmu_event_del;
- i915->pmu.base.start = i915_pmu_event_start;
- i915->pmu.base.stop = i915_pmu_event_stop;
- i915->pmu.base.read = i915_pmu_event_read;
- i915->pmu.base.event_idx = i915_pmu_event_event_idx;
-
- spin_lock_init(&i915->pmu.lock);
- hrtimer_init(&i915->pmu.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- i915->pmu.timer.function = i915_sample;
-
- ret = perf_pmu_register(&i915->pmu.base, "i915", -1);
+ pmu->base.attr_groups = i915_pmu_attr_groups;
+ pmu->base.task_ctx_nr = perf_invalid_context;
+ pmu->base.event_init = i915_pmu_event_init;
+ pmu->base.add = i915_pmu_event_add;
+ pmu->base.del = i915_pmu_event_del;
+ pmu->base.start = i915_pmu_event_start;
+ pmu->base.stop = i915_pmu_event_stop;
+ pmu->base.read = i915_pmu_event_read;
+ pmu->base.event_idx = i915_pmu_event_event_idx;
+
+ spin_lock_init(&pmu->lock);
+ hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ pmu->timer.function = i915_sample;
+
+ ret = perf_pmu_register(&pmu->base, "i915", -1);
if (ret)
goto err;
- ret = i915_pmu_register_cpuhp_state(i915);
+ ret = i915_pmu_register_cpuhp_state(pmu);
if (ret)
goto err_unreg;
return;
err_unreg:
- perf_pmu_unregister(&i915->pmu.base);
+ perf_pmu_unregister(&pmu->base);
err:
- i915->pmu.base.event_init = NULL;
- free_event_attributes(i915);
+ pmu->base.event_init = NULL;
+ free_event_attributes(pmu);
DRM_NOTE("Failed to register PMU! (err=%d)\n", ret);
}
void i915_pmu_unregister(struct drm_i915_private *i915)
{
- if (!i915->pmu.base.event_init)
+ struct i915_pmu *pmu = &i915->pmu;
+
+ if (!pmu->base.event_init)
return;
- WARN_ON(i915->pmu.enable);
+ WARN_ON(pmu->enable);
- hrtimer_cancel(&i915->pmu.timer);
+ hrtimer_cancel(&pmu->timer);
- i915_pmu_unregister_cpuhp_state(i915);
+ i915_pmu_unregister_cpuhp_state(pmu);
- perf_pmu_unregister(&i915->pmu.base);
- i915->pmu.base.event_init = NULL;
- free_event_attributes(i915);
+ perf_pmu_unregister(&pmu->base);
+ pmu->base.event_init = NULL;
+ free_event_attributes(pmu);
}
diff --git a/drivers/gpu/drm/i915/i915_priolist_types.h b/drivers/gpu/drm/i915/i915_priolist_types.h
index b02dea17dcab..21037a2e2038 100644
--- a/drivers/gpu/drm/i915/i915_priolist_types.h
+++ b/drivers/gpu/drm/i915/i915_priolist_types.h
@@ -16,18 +16,6 @@ enum {
I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
-
- /*
- * Requests containing performance queries must not be preempted by
- * another context. They get scheduled with their default priority and
- * once they reach the execlist ports we ensure that they stick on the
- * HW until finished by pretending that they have maximum priority,
- * i.e. nothing can have higher priority and force us to usurp the
- * active request.
- */
- I915_PRIORITY_UNPREEMPTABLE = INT_MAX,
-
- I915_PRIORITY_INVALID = INT_MIN
};
#define I915_USER_PRIORITY_SHIFT 2
@@ -39,6 +27,19 @@ enum {
#define I915_PRIORITY_WAIT ((u8)BIT(0))
#define I915_PRIORITY_NOSEMAPHORE ((u8)BIT(1))
+/* Smallest priority value that cannot be bumped. */
+#define I915_PRIORITY_INVALID (INT_MIN | (u8)I915_PRIORITY_MASK)
+
+/*
+ * Requests containing performance queries must not be preempted by
+ * another context. They get scheduled with their default priority and
+ * once they reach the execlist ports we ensure that they stick on the
+ * HW until finished by pretending that they have maximum priority,
+ * i.e. nothing can have higher priority and force us to usurp the
+ * active request.
+ */
+#define I915_PRIORITY_UNPREEMPTABLE INT_MAX
+
#define __NO_PREEMPTION (I915_PRIORITY_WAIT)
struct i915_priolist {
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index 7b7016171057..ad9240a0817a 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -105,7 +105,6 @@ query_engine_info(struct drm_i915_private *i915,
struct drm_i915_query_engine_info query;
struct drm_i915_engine_info info = { };
struct intel_engine_cs *engine;
- enum intel_engine_id id;
int len, ret;
if (query_item->flags)
@@ -125,9 +124,9 @@ query_engine_info(struct drm_i915_private *i915,
info_ptr = &query_ptr->engines[0];
- for_each_engine(engine, i915, id) {
+ for_each_uabi_engine(engine, i915) {
info.engine.engine_class = engine->uabi_class;
- info.engine.engine_instance = engine->instance;
+ info.engine.engine_instance = engine->uabi_instance;
info.capabilities = engine->uabi_capabilities;
if (__copy_to_user(info_ptr, &info, sizeof(info)))
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index d2b76121d863..2abd199093c5 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -251,9 +251,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define _MMIO_PIPE2(pipe, reg) _MMIO(INTEL_INFO(dev_priv)->pipe_offsets[pipe] - \
INTEL_INFO(dev_priv)->pipe_offsets[PIPE_A] + (reg) + \
DISPLAY_MMIO_BASE(dev_priv))
-#define _MMIO_TRANS2(pipe, reg) _MMIO(INTEL_INFO(dev_priv)->trans_offsets[(pipe)] - \
- INTEL_INFO(dev_priv)->trans_offsets[TRANSCODER_A] + (reg) + \
- DISPLAY_MMIO_BASE(dev_priv))
+#define _TRANS2(tran, reg) (INTEL_INFO(dev_priv)->trans_offsets[(tran)] - \
+ INTEL_INFO(dev_priv)->trans_offsets[TRANSCODER_A] + (reg) + \
+ DISPLAY_MMIO_BASE(dev_priv))
+#define _MMIO_TRANS2(tran, reg) _MMIO(_TRANS2(tran, reg))
#define _CURSOR2(pipe, reg) _MMIO(INTEL_INFO(dev_priv)->cursor_offsets[(pipe)] - \
INTEL_INFO(dev_priv)->cursor_offsets[PIPE_A] + (reg) + \
DISPLAY_MMIO_BASE(dev_priv))
@@ -271,30 +272,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define _MASKED_BIT_ENABLE(a) ({ typeof(a) _a = (a); _MASKED_FIELD(_a, _a); })
#define _MASKED_BIT_DISABLE(a) (_MASKED_FIELD((a), 0))
-/* Engine ID */
-
-#define RCS0_HW 0
-#define VCS0_HW 1
-#define BCS0_HW 2
-#define VECS0_HW 3
-#define VCS1_HW 4
-#define VCS2_HW 6
-#define VCS3_HW 7
-#define VECS1_HW 12
-
-/* Engine class */
-
-#define RENDER_CLASS 0
-#define VIDEO_DECODE_CLASS 1
-#define VIDEO_ENHANCEMENT_CLASS 2
-#define COPY_ENGINE_CLASS 3
-#define OTHER_CLASS 4
-#define MAX_ENGINE_CLASS 4
-
-#define OTHER_GUC_INSTANCE 0
-#define OTHER_GTPM_INSTANCE 1
-#define MAX_ENGINE_INSTANCE 3
-
/* PCI config space */
#define MCHBAR_I915 0x44
@@ -1162,27 +1139,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define PUNIT_REG_ISPSSPM0 0x39
#define PUNIT_REG_ISPSSPM1 0x3a
-/*
- * i915_power_well_id:
- *
- * IDs used to look up power wells. Power wells accessed directly bypassing
- * the power domains framework must be assigned a unique ID. The rest of power
- * wells must be assigned DISP_PW_ID_NONE.
- */
-enum i915_power_well_id {
- DISP_PW_ID_NONE,
-
- VLV_DISP_PW_DISP2D,
- BXT_DISP_PW_DPIO_CMN_A,
- VLV_DISP_PW_DPIO_CMN_BC,
- GLK_DISP_PW_DPIO_CMN_C,
- CHV_DISP_PW_DPIO_CMN_D,
- HSW_DISP_PW_GLOBAL,
- SKL_DISP_PW_MISC_IO,
- SKL_DISP_PW_1,
- SKL_DISP_PW_2,
-};
-
#define PUNIT_REG_PWRGT_CTRL 0x60
#define PUNIT_REG_PWRGT_STATUS 0x61
#define PUNIT_PWRGT_MASK(pw_idx) (3 << ((pw_idx) * 2))
@@ -2490,6 +2446,7 @@ enum i915_power_well_id {
#define RENDER_HWS_PGA_GEN7 _MMIO(0x04080)
#define RING_FAULT_REG(engine) _MMIO(0x4094 + 0x100 * (engine)->hw_id)
#define GEN8_RING_FAULT_REG _MMIO(0x4094)
+#define GEN12_RING_FAULT_REG _MMIO(0xcec4)
#define GEN8_RING_FAULT_ENGINE_ID(x) (((x) >> 12) & 0x7)
#define RING_FAULT_GTTSEL_MASK (1 << 11)
#define RING_FAULT_SRCID(x) (((x) >> 3) & 0xff)
@@ -2499,6 +2456,7 @@ enum i915_power_well_id {
#define GEN8_PRIVATE_PAT_LO _MMIO(0x40e0)
#define GEN8_PRIVATE_PAT_HI _MMIO(0x40e0 + 4)
#define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + (index) * 4)
+#define GEN12_PAT_INDEX(index) _MMIO(0x4800 + (index) * 4)
#define BSD_HWS_PGA_GEN7 _MMIO(0x04180)
#define BLT_HWS_PGA_GEN7 _MMIO(0x04280)
#define VEBOX_HWS_PGA_GEN7 _MMIO(0x04380)
@@ -2633,6 +2591,8 @@ enum i915_power_well_id {
#define GEN8_FAULT_TLB_DATA0 _MMIO(0x4b10)
#define GEN8_FAULT_TLB_DATA1 _MMIO(0x4b14)
+#define GEN12_FAULT_TLB_DATA0 _MMIO(0xceb8)
+#define GEN12_FAULT_TLB_DATA1 _MMIO(0xcebc)
#define FAULT_VA_HIGH_BITS (0xf << 0)
#define FAULT_GTT_SEL (1 << 4)
@@ -3248,27 +3208,7 @@ enum i915_power_well_id {
#define GMBUS_RATE_1MHZ (3 << 8) /* reserved on Pineview */
#define GMBUS_HOLD_EXT (1 << 7) /* 300ns hold time, rsvd on Pineview */
#define GMBUS_BYTE_CNT_OVERRIDE (1 << 6)
-#define GMBUS_PIN_DISABLED 0
-#define GMBUS_PIN_SSC 1
-#define GMBUS_PIN_VGADDC 2
-#define GMBUS_PIN_PANEL 3
-#define GMBUS_PIN_DPD_CHV 3 /* HDMID_CHV */
-#define GMBUS_PIN_DPC 4 /* HDMIC */
-#define GMBUS_PIN_DPB 5 /* SDVO, HDMIB */
-#define GMBUS_PIN_DPD 6 /* HDMID */
-#define GMBUS_PIN_RESERVED 7 /* 7 reserved */
-#define GMBUS_PIN_1_BXT 1 /* BXT+ (atom) and CNP+ (big core) */
-#define GMBUS_PIN_2_BXT 2
-#define GMBUS_PIN_3_BXT 3
-#define GMBUS_PIN_4_CNP 4
-#define GMBUS_PIN_9_TC1_ICP 9
-#define GMBUS_PIN_10_TC2_ICP 10
-#define GMBUS_PIN_11_TC3_ICP 11
-#define GMBUS_PIN_12_TC4_ICP 12
-#define GMBUS_PIN_13_TC5_TGP 13
-#define GMBUS_PIN_14_TC6_TGP 14
-
-#define GMBUS_NUM_PINS 15 /* including 0 */
+
#define GMBUS1 _MMIO(dev_priv->gpio_mmio_base + 0x5104) /* command/status */
#define GMBUS_SW_CLR_INT (1 << 31)
#define GMBUS_SW_RDY (1 << 30)
@@ -7268,6 +7208,8 @@ enum {
#define SKL_CSR_DC3_DC5_COUNT _MMIO(0x80030)
#define SKL_CSR_DC5_DC6_COUNT _MMIO(0x8002C)
#define BXT_CSR_DC3_DC5_COUNT _MMIO(0x80038)
+#define TGL_DMC_DEBUG_DC5_COUNT _MMIO(0x101084)
+#define TGL_DMC_DEBUG_DC6_COUNT _MMIO(0x101088)
/* interrupts */
#define DE_MASTER_IRQ_CONTROL (1 << 31)
@@ -7418,6 +7360,9 @@ enum {
#define GEN8_PORT_DP_A_HOTPLUG (1 << 3)
#define BXT_DE_PORT_GMBUS (1 << 1)
#define GEN8_AUX_CHANNEL_A (1 << 0)
+#define TGL_DE_PORT_AUX_DDIC (1 << 2)
+#define TGL_DE_PORT_AUX_DDIB (1 << 1)
+#define TGL_DE_PORT_AUX_DDIA (1 << 0)
#define GEN8_DE_MISC_ISR _MMIO(0x44460)
#define GEN8_DE_MISC_IMR _MMIO(0x44464)
@@ -7461,21 +7406,29 @@ enum {
#define GEN11_DE_HPD_IMR _MMIO(0x44474)
#define GEN11_DE_HPD_IIR _MMIO(0x44478)
#define GEN11_DE_HPD_IER _MMIO(0x4447c)
+#define GEN12_TC6_HOTPLUG (1 << 21)
+#define GEN12_TC5_HOTPLUG (1 << 20)
#define GEN11_TC4_HOTPLUG (1 << 19)
#define GEN11_TC3_HOTPLUG (1 << 18)
#define GEN11_TC2_HOTPLUG (1 << 17)
#define GEN11_TC1_HOTPLUG (1 << 16)
#define GEN11_TC_HOTPLUG(tc_port) (1 << ((tc_port) + 16))
-#define GEN11_DE_TC_HOTPLUG_MASK (GEN11_TC4_HOTPLUG | \
+#define GEN11_DE_TC_HOTPLUG_MASK (GEN12_TC6_HOTPLUG | \
+ GEN12_TC5_HOTPLUG | \
+ GEN11_TC4_HOTPLUG | \
GEN11_TC3_HOTPLUG | \
GEN11_TC2_HOTPLUG | \
GEN11_TC1_HOTPLUG)
+#define GEN12_TBT6_HOTPLUG (1 << 5)
+#define GEN12_TBT5_HOTPLUG (1 << 4)
#define GEN11_TBT4_HOTPLUG (1 << 3)
#define GEN11_TBT3_HOTPLUG (1 << 2)
#define GEN11_TBT2_HOTPLUG (1 << 1)
#define GEN11_TBT1_HOTPLUG (1 << 0)
#define GEN11_TBT_HOTPLUG(tc_port) (1 << (tc_port))
-#define GEN11_DE_TBT_HOTPLUG_MASK (GEN11_TBT4_HOTPLUG | \
+#define GEN11_DE_TBT_HOTPLUG_MASK (GEN12_TBT6_HOTPLUG | \
+ GEN12_TBT5_HOTPLUG | \
+ GEN11_TBT4_HOTPLUG | \
GEN11_TBT3_HOTPLUG | \
GEN11_TBT2_HOTPLUG | \
GEN11_TBT1_HOTPLUG)
@@ -7509,6 +7462,9 @@ enum {
#define GEN11_INTR_ENGINE_CLASS(x) (((x) & GENMASK(18, 16)) >> 16)
#define GEN11_INTR_ENGINE_INSTANCE(x) (((x) & GENMASK(25, 20)) >> 20)
#define GEN11_INTR_ENGINE_INTR(x) ((x) & 0xffff)
+/* irq instances for OTHER_CLASS */
+#define OTHER_GUC_INSTANCE 0
+#define OTHER_GTPM_INSTANCE 1
#define GEN11_INTR_IDENTITY_REG(x) _MMIO(0x190060 + ((x) * 4))
@@ -7861,12 +7817,15 @@ enum {
SDE_FDI_RXB_CPT | \
SDE_FDI_RXA_CPT)
-/* south display engine interrupt: ICP */
+/* south display engine interrupt: ICP/TGP */
+#define SDE_TC6_HOTPLUG_TGP (1 << 29)
+#define SDE_TC5_HOTPLUG_TGP (1 << 28)
#define SDE_TC4_HOTPLUG_ICP (1 << 27)
#define SDE_TC3_HOTPLUG_ICP (1 << 26)
#define SDE_TC2_HOTPLUG_ICP (1 << 25)
#define SDE_TC1_HOTPLUG_ICP (1 << 24)
#define SDE_GMBUS_ICP (1 << 23)
+#define SDE_DDIC_HOTPLUG_TGP (1 << 18)
#define SDE_DDIB_HOTPLUG_ICP (1 << 17)
#define SDE_DDIA_HOTPLUG_ICP (1 << 16)
#define SDE_TC_HOTPLUG_ICP(tc_port) (1 << ((tc_port) + 24))
@@ -7877,6 +7836,11 @@ enum {
SDE_TC3_HOTPLUG_ICP | \
SDE_TC2_HOTPLUG_ICP | \
SDE_TC1_HOTPLUG_ICP)
+#define SDE_DDI_MASK_TGP (SDE_DDIC_HOTPLUG_TGP | \
+ SDE_DDI_MASK_ICP)
+#define SDE_TC_MASK_TGP (SDE_TC6_HOTPLUG_TGP | \
+ SDE_TC5_HOTPLUG_TGP | \
+ SDE_TC_MASK_ICP)
#define SDEISR _MMIO(0xc4000)
#define SDEIMR _MMIO(0xc4004)
@@ -7944,6 +7908,12 @@ enum {
*/
#define SHOTPLUG_CTL_DDI _MMIO(0xc4030)
+#define TGP_DDIC_HPD_ENABLE (1 << 11)
+#define TGP_DDIC_HPD_STATUS_MASK (3 << 8)
+#define TGP_DDIC_HPD_NO_DETECT (0 << 8)
+#define TGP_DDIC_HPD_SHORT_DETECT (1 << 8)
+#define TGP_DDIC_HPD_LONG_DETECT (2 << 8)
+#define TGP_DDIC_HPD_SHORT_LONG_DETECT (3 << 8)
#define ICP_DDIB_HPD_ENABLE (1 << 7)
#define ICP_DDIB_HPD_STATUS_MASK (3 << 4)
#define ICP_DDIB_HPD_NO_DETECT (0 << 4)
@@ -8067,6 +8037,18 @@ enum {
#define ICP_TC_HPD_LONG_DETECT(tc_port) (2 << (tc_port) * 4)
#define ICP_TC_HPD_SHORT_DETECT(tc_port) (1 << (tc_port) * 4)
+#define ICP_DDI_HPD_ENABLE_MASK (ICP_DDIB_HPD_ENABLE | \
+ ICP_DDIA_HPD_ENABLE)
+#define ICP_TC_HPD_ENABLE_MASK (ICP_TC_HPD_ENABLE(PORT_TC4) | \
+ ICP_TC_HPD_ENABLE(PORT_TC3) | \
+ ICP_TC_HPD_ENABLE(PORT_TC2) | \
+ ICP_TC_HPD_ENABLE(PORT_TC1))
+#define TGP_DDI_HPD_ENABLE_MASK (TGP_DDIC_HPD_ENABLE | \
+ ICP_DDI_HPD_ENABLE_MASK)
+#define TGP_TC_HPD_ENABLE_MASK (ICP_TC_HPD_ENABLE(PORT_TC6) | \
+ ICP_TC_HPD_ENABLE(PORT_TC5) | \
+ ICP_TC_HPD_ENABLE_MASK)
+
#define _PCH_DPLL_A 0xc6014
#define _PCH_DPLL_B 0xc6018
#define PCH_DPLL(pll) _MMIO((pll) == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
@@ -9390,6 +9372,8 @@ enum skl_power_gate {
#define TGL_TRANS_DDI_PORT_MASK (0xf << TGL_TRANS_DDI_PORT_SHIFT)
#define TRANS_DDI_SELECT_PORT(x) ((x) << TRANS_DDI_PORT_SHIFT)
#define TGL_TRANS_DDI_SELECT_PORT(x) (((x) + 1) << TGL_TRANS_DDI_PORT_SHIFT)
+#define TRANS_DDI_FUNC_CTL_VAL_TO_PORT(val) (((val) & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT)
+#define TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(val) ((((val) & TGL_TRANS_DDI_PORT_MASK) >> TGL_TRANS_DDI_PORT_SHIFT) - 1)
#define TRANS_DDI_MODE_SELECT_MASK (7 << 24)
#define TRANS_DDI_MODE_SELECT_HDMI (0 << 24)
#define TRANS_DDI_MODE_SELECT_DVI (1 << 24)
@@ -10979,6 +10963,7 @@ enum skl_power_gate {
#define CALIBRATION_DISABLED (0x0 << 4)
#define CALIBRATION_ENABLED_INITIAL_ONLY (0x2 << 4)
#define CALIBRATION_ENABLED_INITIAL_PERIODIC (0x3 << 4)
+#define BLANKING_PACKET_ENABLE (1 << 2)
#define S3D_ORIENTATION_LANDSCAPE (1 << 1)
#define EOTP_DISABLED (1 << 0)
@@ -11213,6 +11198,8 @@ enum skl_power_gate {
#define PMFLUSH_GAPL3UNBLOCK (1 << 21)
#define PMFLUSHDONE_LNEBLK (1 << 22)
+#define GEN12_GLOBAL_MOCS(i) _MMIO(0x4000 + (i) * 4) /* Global MOCS regs */
+
/* gamt regs */
#define GEN8_L3_LRA_1_GPGPU _MMIO(0x4dd4)
#define GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW 0x67F1427F /* max/min for LRA1/2 */
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 8ac7d14ec8c9..f1a0a57fc6fc 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -35,6 +35,7 @@
#include "i915_active.h"
#include "i915_drv.h"
#include "i915_globals.h"
+#include "i915_trace.h"
#include "intel_pm.h"
struct execute_cb {
@@ -164,11 +165,11 @@ static void __notify_execute_cb(struct i915_request *rq)
}
static inline void
-i915_request_remove_from_client(struct i915_request *request)
+remove_from_client(struct i915_request *request)
{
struct drm_i915_file_private *file_priv;
- file_priv = request->file_priv;
+ file_priv = READ_ONCE(request->file_priv);
if (!file_priv)
return;
@@ -180,40 +181,6 @@ i915_request_remove_from_client(struct i915_request *request)
spin_unlock(&file_priv->mm.lock);
}
-static void advance_ring(struct i915_request *request)
-{
- struct intel_ring *ring = request->ring;
- unsigned int tail;
-
- /*
- * We know the GPU must have read the request to have
- * sent us the seqno + interrupt, so use the position
- * of tail of the request to update the last known position
- * of the GPU head.
- *
- * Note this requires that we are always called in request
- * completion order.
- */
- GEM_BUG_ON(!list_is_first(&request->ring_link, &ring->request_list));
- if (list_is_last(&request->ring_link, &ring->request_list)) {
- /*
- * We may race here with execlists resubmitting this request
- * as we retire it. The resubmission will move the ring->tail
- * forwards (to request->wa_tail). We either read the
- * current value that was written to hw, or the value that
- * is just about to be. Either works, if we miss the last two
- * noops - they are safe to be replayed on a reset.
- */
- tail = READ_ONCE(request->tail);
- list_del(&ring->active_link);
- } else {
- tail = request->postfix;
- }
- list_del_init(&request->ring_link);
-
- ring->head = tail;
-}
-
static void free_capture_list(struct i915_request *request)
{
struct i915_capture_list *capture;
@@ -231,7 +198,7 @@ static bool i915_request_retire(struct i915_request *rq)
{
struct i915_active_request *active, *next;
- lockdep_assert_held(&rq->i915->drm.struct_mutex);
+ lockdep_assert_held(&rq->timeline->mutex);
if (!i915_request_completed(rq))
return false;
@@ -243,7 +210,17 @@ static bool i915_request_retire(struct i915_request *rq)
GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
trace_i915_request_retire(rq);
- advance_ring(rq);
+ /*
+ * We know the GPU must have read the request to have
+ * sent us the seqno + interrupt, so use the position
+ * of tail of the request to update the last known position
+ * of the GPU head.
+ *
+ * Note this requires that we are always called in request
+ * completion order.
+ */
+ GEM_BUG_ON(!list_is_first(&rq->link, &rq->timeline->requests));
+ rq->ring->head = rq->postfix;
/*
* Walk through the active list, calling retire on each. This allows
@@ -305,12 +282,12 @@ static bool i915_request_retire(struct i915_request *rq)
local_irq_enable();
+ remove_from_client(rq);
+ list_del(&rq->link);
+
intel_context_exit(rq->hw_context);
intel_context_unpin(rq->hw_context);
- i915_request_remove_from_client(rq);
- list_del(&rq->link);
-
free_capture_list(rq);
i915_sched_node_fini(&rq->sched);
i915_request_put(rq);
@@ -320,7 +297,7 @@ static bool i915_request_retire(struct i915_request *rq)
void i915_request_retire_upto(struct i915_request *rq)
{
- struct intel_ring *ring = rq->ring;
+ struct intel_timeline * const tl = rq->timeline;
struct i915_request *tmp;
GEM_TRACE("%s fence %llx:%lld, current %d\n",
@@ -328,15 +305,11 @@ void i915_request_retire_upto(struct i915_request *rq)
rq->fence.context, rq->fence.seqno,
hwsp_seqno(rq));
- lockdep_assert_held(&rq->i915->drm.struct_mutex);
+ lockdep_assert_held(&tl->mutex);
GEM_BUG_ON(!i915_request_completed(rq));
- if (list_empty(&rq->ring_link))
- return;
-
do {
- tmp = list_first_entry(&ring->request_list,
- typeof(*tmp), ring_link);
+ tmp = list_first_entry(&tl->requests, typeof(*tmp), link);
} while (i915_request_retire(tmp) && tmp != rq);
}
@@ -523,6 +496,10 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
switch (state) {
case FENCE_COMPLETE:
trace_i915_request_submit(request);
+
+ if (unlikely(fence->error))
+ i915_request_skip(request, fence->error);
+
/*
* We need to serialize use of the submit_request() callback
* with its hotplugging performed during an emergency
@@ -563,29 +540,28 @@ semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
return NOTIFY_DONE;
}
-static void ring_retire_requests(struct intel_ring *ring)
+static void retire_requests(struct intel_timeline *tl)
{
struct i915_request *rq, *rn;
- list_for_each_entry_safe(rq, rn, &ring->request_list, ring_link)
+ list_for_each_entry_safe(rq, rn, &tl->requests, link)
if (!i915_request_retire(rq))
break;
}
static noinline struct i915_request *
-request_alloc_slow(struct intel_context *ce, gfp_t gfp)
+request_alloc_slow(struct intel_timeline *tl, gfp_t gfp)
{
- struct intel_ring *ring = ce->ring;
struct i915_request *rq;
- if (list_empty(&ring->request_list))
+ if (list_empty(&tl->requests))
goto out;
if (!gfpflags_allow_blocking(gfp))
goto out;
/* Move our oldest request to the slab-cache (if not in use!) */
- rq = list_first_entry(&ring->request_list, typeof(*rq), ring_link);
+ rq = list_first_entry(&tl->requests, typeof(*rq), link);
i915_request_retire(rq);
rq = kmem_cache_alloc(global.slab_requests,
@@ -594,11 +570,11 @@ request_alloc_slow(struct intel_context *ce, gfp_t gfp)
return rq;
/* Ratelimit ourselves to prevent oom from malicious clients */
- rq = list_last_entry(&ring->request_list, typeof(*rq), ring_link);
+ rq = list_last_entry(&tl->requests, typeof(*rq), link);
cond_synchronize_rcu(rq->rcustate);
/* Retire our old requests in the hope that we free some */
- ring_retire_requests(ring);
+ retire_requests(tl);
out:
return kmem_cache_alloc(global.slab_requests, gfp);
@@ -607,7 +583,7 @@ out:
struct i915_request *
__i915_request_create(struct intel_context *ce, gfp_t gfp)
{
- struct intel_timeline *tl = ce->ring->timeline;
+ struct intel_timeline *tl = ce->timeline;
struct i915_request *rq;
u32 seqno;
int ret;
@@ -649,7 +625,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
rq = kmem_cache_alloc(global.slab_requests,
gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (unlikely(!rq)) {
- rq = request_alloc_slow(ce, gfp);
+ rq = request_alloc_slow(tl, gfp);
if (!rq) {
ret = -ENOMEM;
goto err_unreserve;
@@ -741,15 +717,15 @@ struct i915_request *
i915_request_create(struct intel_context *ce)
{
struct i915_request *rq;
- int err;
+ struct intel_timeline *tl;
- err = intel_context_timeline_lock(ce);
- if (err)
- return ERR_PTR(err);
+ tl = intel_context_timeline_lock(ce);
+ if (IS_ERR(tl))
+ return ERR_CAST(tl);
/* Move our oldest request to the slab-cache (if not in use!) */
- rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
- if (!list_is_last(&rq->ring_link, &ce->ring->request_list))
+ rq = list_first_entry(&tl->requests, typeof(*rq), link);
+ if (!list_is_last(&rq->link, &tl->requests))
i915_request_retire(rq);
intel_context_enter(ce);
@@ -759,22 +735,22 @@ i915_request_create(struct intel_context *ce)
goto err_unlock;
/* Check that we do not interrupt ourselves with a new request */
- rq->cookie = lockdep_pin_lock(&ce->ring->timeline->mutex);
+ rq->cookie = lockdep_pin_lock(&tl->mutex);
return rq;
err_unlock:
- intel_context_timeline_unlock(ce);
+ intel_context_timeline_unlock(tl);
return rq;
}
static int
i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
{
- if (list_is_first(&signal->ring_link, &signal->ring->request_list))
+ if (list_is_first(&signal->link, &signal->timeline->requests))
return 0;
- signal = list_prev_entry(signal, ring_link);
+ signal = list_prev_entry(signal, link);
if (intel_timeline_sync_is_later(rq->timeline, &signal->fence))
return 0;
@@ -939,7 +915,7 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
continue;
/* Squash repeated waits to the same timelines */
- if (fence->context != rq->i915->mm.unordered_timeline &&
+ if (fence->context &&
intel_timeline_sync_is_later(rq->timeline, fence))
continue;
@@ -953,7 +929,7 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
return ret;
/* Record the latest fence used against each timeline */
- if (fence->context != rq->i915->mm.unordered_timeline)
+ if (fence->context)
intel_timeline_sync_set(rq->timeline, fence);
} while (--nchild);
@@ -1038,7 +1014,7 @@ i915_request_await_object(struct i915_request *to,
struct dma_fence **shared;
unsigned int count, i;
- ret = reservation_object_get_fences_rcu(obj->base.resv,
+ ret = dma_resv_get_fences_rcu(obj->base.resv,
&excl, &count, &shared);
if (ret)
return ret;
@@ -1055,7 +1031,7 @@ i915_request_await_object(struct i915_request *to,
dma_fence_put(shared[i]);
kfree(shared);
} else {
- excl = reservation_object_get_excl_rcu(obj->base.resv);
+ excl = dma_resv_get_excl_rcu(obj->base.resv);
}
if (excl) {
@@ -1076,6 +1052,9 @@ void i915_request_skip(struct i915_request *rq, int error)
GEM_BUG_ON(!IS_ERR_VALUE((long)error));
dma_fence_set_error(&rq->fence, error);
+ if (rq->infix == rq->postfix)
+ return;
+
/*
* As this request likely depends on state from the lost
* context, clear out all the user operations leaving the
@@ -1087,6 +1066,7 @@ void i915_request_skip(struct i915_request *rq, int error)
head = 0;
}
memset(vaddr + head, 0, rq->postfix - head);
+ rq->infix = rq->postfix;
}
static struct i915_request *
@@ -1115,7 +1095,8 @@ __i915_request_add_to_timeline(struct i915_request *rq)
* precludes optimising to use semaphores serialisation of a single
* timeline across engines.
*/
- prev = rcu_dereference_protected(timeline->last_request.request, 1);
+ prev = rcu_dereference_protected(timeline->last_request.request,
+ lockdep_is_held(&timeline->mutex));
if (prev && !i915_request_completed(prev)) {
if (is_power_of_2(prev->engine->mask | rq->engine->mask))
i915_sw_fence_await_sw_fence(&rq->submit,
@@ -1154,7 +1135,6 @@ struct i915_request *__i915_request_commit(struct i915_request *rq)
{
struct intel_engine_cs *engine = rq->engine;
struct intel_ring *ring = rq->ring;
- struct i915_request *prev;
u32 *cs;
GEM_TRACE("%s fence %llx:%lld\n",
@@ -1167,6 +1147,7 @@ struct i915_request *__i915_request_commit(struct i915_request *rq)
*/
GEM_BUG_ON(rq->reserved_space > ring->space);
rq->reserved_space = 0;
+ rq->emitted_jiffies = jiffies;
/*
* Record the position of the start of the breadcrumb so that
@@ -1178,13 +1159,12 @@ struct i915_request *__i915_request_commit(struct i915_request *rq)
GEM_BUG_ON(IS_ERR(cs));
rq->postfix = intel_ring_offset(rq, cs);
- prev = __i915_request_add_to_timeline(rq);
-
- list_add_tail(&rq->ring_link, &ring->request_list);
- if (list_is_first(&rq->ring_link, &ring->request_list))
- list_add(&ring->active_link, &rq->i915->gt.active_rings);
- rq->emitted_jiffies = jiffies;
+ return __i915_request_add_to_timeline(rq);
+}
+void __i915_request_queue(struct i915_request *rq,
+ const struct i915_sched_attr *attr)
+{
/*
* Let the backend know a new request has arrived that may need
* to adjust the existing execution schedule due to a high priority
@@ -1196,57 +1176,54 @@ struct i915_request *__i915_request_commit(struct i915_request *rq)
* decide whether to preempt the entire chain so that it is ready to
* run at the earliest possible convenience.
*/
- local_bh_disable();
i915_sw_fence_commit(&rq->semaphore);
- rcu_read_lock(); /* RCU serialisation for set-wedged protection */
- if (engine->schedule) {
- struct i915_sched_attr attr = rq->gem_context->sched;
-
- /*
- * Boost actual workloads past semaphores!
- *
- * With semaphores we spin on one engine waiting for another,
- * simply to reduce the latency of starting our work when
- * the signaler completes. However, if there is any other
- * work that we could be doing on this engine instead, that
- * is better utilisation and will reduce the overall duration
- * of the current work. To avoid PI boosting a semaphore
- * far in the distance past over useful work, we keep a history
- * of any semaphore use along our dependency chain.
- */
- if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
- attr.priority |= I915_PRIORITY_NOSEMAPHORE;
-
- /*
- * Boost priorities to new clients (new request flows).
- *
- * Allow interactive/synchronous clients to jump ahead of
- * the bulk clients. (FQ_CODEL)
- */
- if (list_empty(&rq->sched.signalers_list))
- attr.priority |= I915_PRIORITY_WAIT;
-
- engine->schedule(rq, &attr);
- }
- rcu_read_unlock();
+ if (attr && rq->engine->schedule)
+ rq->engine->schedule(rq, attr);
i915_sw_fence_commit(&rq->submit);
- local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
-
- return prev;
}
void i915_request_add(struct i915_request *rq)
{
+ struct i915_sched_attr attr = rq->gem_context->sched;
+ struct intel_timeline * const tl = rq->timeline;
struct i915_request *prev;
- lockdep_assert_held(&rq->timeline->mutex);
- lockdep_unpin_lock(&rq->timeline->mutex, rq->cookie);
+ lockdep_assert_held(&tl->mutex);
+ lockdep_unpin_lock(&tl->mutex, rq->cookie);
trace_i915_request_add(rq);
prev = __i915_request_commit(rq);
/*
+ * Boost actual workloads past semaphores!
+ *
+ * With semaphores we spin on one engine waiting for another,
+ * simply to reduce the latency of starting our work when
+ * the signaler completes. However, if there is any other
+ * work that we could be doing on this engine instead, that
+ * is better utilisation and will reduce the overall duration
+ * of the current work. To avoid PI boosting a semaphore
+ * far in the distance past over useful work, we keep a history
+ * of any semaphore use along our dependency chain.
+ */
+ if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
+ attr.priority |= I915_PRIORITY_NOSEMAPHORE;
+
+ /*
+ * Boost priorities to new clients (new request flows).
+ *
+ * Allow interactive/synchronous clients to jump ahead of
+ * the bulk clients. (FQ_CODEL)
+ */
+ if (list_empty(&rq->sched.signalers_list))
+ attr.priority |= I915_PRIORITY_WAIT;
+
+ local_bh_disable();
+ __i915_request_queue(rq, &attr);
+ local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
+
+ /*
* In typical scenarios, we do not expect the previous request on
* the timeline to be still tracked by timeline->last_request if it
* has been completed. If the completed request is still here, that
@@ -1263,10 +1240,10 @@ void i915_request_add(struct i915_request *rq)
* work on behalf of others -- but instead we should benefit from
* improved resource management. (Well, that's the theory at least.)
*/
- if (prev && i915_request_completed(prev))
+ if (prev && i915_request_completed(prev) && prev->timeline == tl)
i915_request_retire_upto(prev);
- mutex_unlock(&rq->timeline->mutex);
+ mutex_unlock(&tl->mutex);
}
static unsigned long local_clock_us(unsigned int *cpu)
@@ -1486,18 +1463,43 @@ out:
bool i915_retire_requests(struct drm_i915_private *i915)
{
- struct intel_ring *ring, *tmp;
+ struct intel_gt_timelines *timelines = &i915->gt.timelines;
+ struct intel_timeline *tl, *tn;
+ LIST_HEAD(free);
+
+ spin_lock(&timelines->lock);
+ list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
+ if (!mutex_trylock(&tl->mutex))
+ continue;
+
+ intel_timeline_get(tl);
+ GEM_BUG_ON(!tl->active_count);
+ tl->active_count++; /* pin the list element */
+ spin_unlock(&timelines->lock);
+
+ retire_requests(tl);
- lockdep_assert_held(&i915->drm.struct_mutex);
+ spin_lock(&timelines->lock);
- list_for_each_entry_safe(ring, tmp,
- &i915->gt.active_rings, active_link) {
- intel_ring_get(ring); /* last rq holds reference! */
- ring_retire_requests(ring);
- intel_ring_put(ring);
+ /* Resume iteration after dropping lock */
+ list_safe_reset_next(tl, tn, link);
+ if (!--tl->active_count)
+ list_del(&tl->link);
+
+ mutex_unlock(&tl->mutex);
+
+ /* Defer the final release to after the spinlock */
+ if (refcount_dec_and_test(&tl->kref.refcount)) {
+ GEM_BUG_ON(tl->active_count);
+ list_add(&tl->link, &free);
+ }
}
+ spin_unlock(&timelines->lock);
+
+ list_for_each_entry_safe(tl, tn, &free, link)
+ __intel_timeline_free(&tl->kref);
- return !list_empty(&i915->gt.active_rings);
+ return !list_empty(&timelines->active_list);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 313df3c37158..8ac6e1226a56 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -223,9 +223,6 @@ struct i915_request {
/** timeline->request entry for this request */
struct list_head link;
- /** ring->request_list entry for this request */
- struct list_head ring_link;
-
struct drm_i915_file_private *file_priv;
/** file_priv list entry for this request */
struct list_head client_link;
@@ -251,6 +248,8 @@ struct i915_request * __must_check
i915_request_create(struct intel_context *ce);
struct i915_request *__i915_request_commit(struct i915_request *request);
+void __i915_request_queue(struct i915_request *rq,
+ const struct i915_sched_attr *attr);
void i915_request_retire_upto(struct i915_request *rq);
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 0bd452e851d8..7b84ebca2901 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -349,8 +349,7 @@ void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
unsigned long flags;
GEM_BUG_ON(bump & ~I915_PRIORITY_MASK);
-
- if (READ_ONCE(rq->sched.attr.priority) == I915_PRIORITY_INVALID)
+ if (READ_ONCE(rq->sched.attr.priority) & bump)
return;
spin_lock_irqsave(&schedule_lock, flags);
diff --git a/drivers/gpu/drm/i915/i915_selftest.h b/drivers/gpu/drm/i915/i915_selftest.h
index acdf6eb9e262..4d88205de51b 100644
--- a/drivers/gpu/drm/i915/i915_selftest.h
+++ b/drivers/gpu/drm/i915/i915_selftest.h
@@ -24,6 +24,8 @@
#ifndef __I915_SELFTEST_H__
#define __I915_SELFTEST_H__
+#include <linux/types.h>
+
struct pci_dev;
struct drm_i915_private;
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index a08d7d16621b..8508a01ad8b9 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -29,8 +29,9 @@
#include "display/intel_fbc.h"
#include "display/intel_gmbus.h"
+#include "i915_drv.h"
#include "i915_reg.h"
-#include "intel_drv.h"
+#include "i915_suspend.h"
static void i915_save_display(struct drm_i915_private *dev_priv)
{
diff --git a/drivers/gpu/drm/i915/i915_suspend.h b/drivers/gpu/drm/i915/i915_suspend.h
new file mode 100644
index 000000000000..3a36fb4ecc05
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_suspend.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_SUSPEND_H__
+#define __I915_SUSPEND_H__
+
+struct drm_i915_private;
+
+int i915_save_state(struct drm_i915_private *i915);
+int i915_restore_state(struct drm_i915_private *i915);
+
+#endif /* __I915_SUSPEND_H__ */
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 5387aafd3424..6a88db291252 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -7,7 +7,7 @@
#include <linux/slab.h>
#include <linux/dma-fence.h>
#include <linux/irq_work.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
#include "i915_sw_fence.h"
#include "i915_selftest.h"
@@ -157,8 +157,11 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
LIST_HEAD(extra);
do {
- list_for_each_entry_safe(pos, next, &x->head, entry)
- pos->func(pos, TASK_NORMAL, 0, &extra);
+ list_for_each_entry_safe(pos, next, &x->head, entry) {
+ pos->func(pos,
+ TASK_NORMAL, fence->error,
+ &extra);
+ }
if (list_empty(&extra))
break;
@@ -219,6 +222,8 @@ void __i915_sw_fence_init(struct i915_sw_fence *fence,
__init_waitqueue_head(&fence->wait, name, key);
atomic_set(&fence->pending, 1);
+ fence->error = 0;
+
fence->flags = (unsigned long)fn;
}
@@ -230,6 +235,8 @@ void i915_sw_fence_commit(struct i915_sw_fence *fence)
static int i915_sw_fence_wake(wait_queue_entry_t *wq, unsigned mode, int flags, void *key)
{
+ i915_sw_fence_set_error_once(wq->private, flags);
+
list_del(&wq->entry);
__i915_sw_fence_complete(wq->private, key);
@@ -302,8 +309,10 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
debug_fence_assert(fence);
might_sleep_if(gfpflags_allow_blocking(gfp));
- if (i915_sw_fence_done(signaler))
+ if (i915_sw_fence_done(signaler)) {
+ i915_sw_fence_set_error_once(fence, signaler->error);
return 0;
+ }
debug_fence_assert(signaler);
@@ -319,6 +328,7 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
return -ENOMEM;
i915_sw_fence_wait(signaler);
+ i915_sw_fence_set_error_once(fence, signaler->error);
return 0;
}
@@ -337,7 +347,7 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
__add_wait_queue_entry_tail(&signaler->wait, wq);
pending = 1;
} else {
- i915_sw_fence_wake(wq, 0, 0, NULL);
+ i915_sw_fence_wake(wq, 0, signaler->error, NULL);
pending = 0;
}
spin_unlock_irqrestore(&signaler->wait.lock, flags);
@@ -372,6 +382,7 @@ static void dma_i915_sw_fence_wake(struct dma_fence *dma,
{
struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base);
+ i915_sw_fence_set_error_once(cb->fence, dma->error);
i915_sw_fence_complete(cb->fence);
kfree(cb);
}
@@ -391,6 +402,7 @@ static void timer_i915_sw_fence_wake(struct timer_list *t)
cb->dma->seqno,
i915_sw_fence_debug_hint(fence));
+ i915_sw_fence_set_error_once(fence, -ETIMEDOUT);
i915_sw_fence_complete(fence);
}
@@ -480,6 +492,7 @@ static void __dma_i915_sw_fence_wake(struct dma_fence *dma,
{
struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base);
+ i915_sw_fence_set_error_once(cb->fence, dma->error);
i915_sw_fence_complete(cb->fence);
}
@@ -501,7 +514,7 @@ int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
if (ret == 0) {
ret = 1;
} else {
- i915_sw_fence_complete(fence);
+ __dma_i915_sw_fence_wake(dma, &cb->base);
if (ret == -ENOENT) /* fence already signaled */
ret = 0;
}
@@ -510,7 +523,7 @@ int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
}
int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
- struct reservation_object *resv,
+ struct dma_resv *resv,
const struct dma_fence_ops *exclude,
bool write,
unsigned long timeout,
@@ -526,7 +539,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
struct dma_fence **shared;
unsigned int count, i;
- ret = reservation_object_get_fences_rcu(resv,
+ ret = dma_resv_get_fences_rcu(resv,
&excl, &count, &shared);
if (ret)
return ret;
@@ -551,7 +564,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
dma_fence_put(shared[i]);
kfree(shared);
} else {
- excl = reservation_object_get_excl_rcu(resv);
+ excl = dma_resv_get_excl_rcu(resv);
}
if (ret >= 0 && excl && excl->ops != exclude) {
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index 9cb5c3b307a6..ab7d58bd0b9d 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -16,12 +16,13 @@
#include <linux/wait.h>
struct completion;
-struct reservation_object;
+struct dma_resv;
struct i915_sw_fence {
wait_queue_head_t wait;
unsigned long flags;
atomic_t pending;
+ int error;
};
#define I915_SW_FENCE_CHECKED_BIT 0 /* used internally for DAG checking */
@@ -82,7 +83,7 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
gfp_t gfp);
int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
- struct reservation_object *resv,
+ struct dma_resv *resv,
const struct dma_fence_ops *exclude,
bool write,
unsigned long timeout,
@@ -106,4 +107,10 @@ static inline void i915_sw_fence_wait(struct i915_sw_fence *fence)
wait_event(fence->wait, i915_sw_fence_done(fence));
}
+static inline void
+i915_sw_fence_set_error_once(struct i915_sw_fence *fence, int error)
+{
+ cmpxchg(&fence->error, 0, error);
+}
+
#endif /* _I915_SW_FENCE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_sw_fence_work.c b/drivers/gpu/drm/i915/i915_sw_fence_work.c
new file mode 100644
index 000000000000..07552cd544f2
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_sw_fence_work.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: MIT
+
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_sw_fence_work.h"
+
+static void fence_work(struct work_struct *work)
+{
+ struct dma_fence_work *f = container_of(work, typeof(*f), work);
+ int err;
+
+ err = f->ops->work(f);
+ if (err)
+ dma_fence_set_error(&f->dma, err);
+ dma_fence_signal(&f->dma);
+ dma_fence_put(&f->dma);
+}
+
+static int __i915_sw_fence_call
+fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
+ struct dma_fence_work *f = container_of(fence, typeof(*f), chain);
+
+ switch (state) {
+ case FENCE_COMPLETE:
+ if (fence->error)
+ dma_fence_set_error(&f->dma, fence->error);
+
+ if (!f->dma.error) {
+ dma_fence_get(&f->dma);
+ queue_work(system_unbound_wq, &f->work);
+ } else {
+ dma_fence_signal(&f->dma);
+ }
+ break;
+
+ case FENCE_FREE:
+ dma_fence_put(&f->dma);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static const char *get_driver_name(struct dma_fence *fence)
+{
+ return "dma-fence";
+}
+
+static const char *get_timeline_name(struct dma_fence *fence)
+{
+ struct dma_fence_work *f = container_of(fence, typeof(*f), dma);
+
+ return f->ops->name ?: "work";
+}
+
+static void fence_release(struct dma_fence *fence)
+{
+ struct dma_fence_work *f = container_of(fence, typeof(*f), dma);
+
+ if (f->ops->release)
+ f->ops->release(f);
+
+ i915_sw_fence_fini(&f->chain);
+
+ BUILD_BUG_ON(offsetof(typeof(*f), dma));
+ dma_fence_free(&f->dma);
+}
+
+static const struct dma_fence_ops fence_ops = {
+ .get_driver_name = get_driver_name,
+ .get_timeline_name = get_timeline_name,
+ .release = fence_release,
+};
+
+void dma_fence_work_init(struct dma_fence_work *f,
+ const struct dma_fence_work_ops *ops)
+{
+ spin_lock_init(&f->lock);
+ dma_fence_init(&f->dma, &fence_ops, &f->lock, 0, 0);
+ i915_sw_fence_init(&f->chain, fence_notify);
+ INIT_WORK(&f->work, fence_work);
+
+ f->ops = ops;
+}
+
+int dma_fence_work_chain(struct dma_fence_work *f, struct dma_fence *signal)
+{
+ if (!signal)
+ return 0;
+
+ return __i915_sw_fence_await_dma_fence(&f->chain, signal, &f->cb);
+}
diff --git a/drivers/gpu/drm/i915/i915_sw_fence_work.h b/drivers/gpu/drm/i915/i915_sw_fence_work.h
new file mode 100644
index 000000000000..3a22b287e201
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_sw_fence_work.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: MIT */
+
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef I915_SW_FENCE_WORK_H
+#define I915_SW_FENCE_WORK_H
+
+#include <linux/dma-fence.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include "i915_sw_fence.h"
+
+struct dma_fence_work;
+
+struct dma_fence_work_ops {
+ const char *name;
+ int (*work)(struct dma_fence_work *f);
+ void (*release)(struct dma_fence_work *f);
+};
+
+struct dma_fence_work {
+ struct dma_fence dma;
+ spinlock_t lock;
+
+ struct i915_sw_fence chain;
+ struct i915_sw_dma_fence_cb cb;
+
+ struct work_struct work;
+ const struct dma_fence_work_ops *ops;
+};
+
+void dma_fence_work_init(struct dma_fence_work *f,
+ const struct dma_fence_work_ops *ops);
+int dma_fence_work_chain(struct dma_fence_work *f, struct dma_fence *signal);
+
+static inline void dma_fence_work_commit(struct dma_fence_work *f)
+{
+ i915_sw_fence_commit(&f->chain);
+}
+
+#endif /* I915_SW_FENCE_WORK_H */
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index ecac1c386109..d8a3b180c084 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -31,7 +31,7 @@
#include <linux/sysfs.h>
#include "i915_drv.h"
-#include "intel_drv.h"
+#include "i915_sysfs.h"
#include "intel_pm.h"
#include "intel_sideband.h"
diff --git a/drivers/gpu/drm/i915/i915_sysfs.h b/drivers/gpu/drm/i915/i915_sysfs.h
new file mode 100644
index 000000000000..41afd4366416
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_sysfs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_SYSFS_H__
+#define __I915_SYSFS_H__
+
+struct drm_i915_private;
+
+void i915_setup_sysfs(struct drm_i915_private *i915);
+void i915_teardown_sysfs(struct drm_i915_private *i915);
+
+#endif /* __I915_SYSFS_H__ */
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index da18b8d6b80c..24f2944da09d 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -8,11 +8,11 @@
#include <drm/drm_drv.h>
+#include "display/intel_display_types.h"
#include "gt/intel_engine.h"
#include "i915_drv.h"
#include "i915_irq.h"
-#include "intel_drv.h"
#undef TRACE_SYSTEM
#define TRACE_SYSTEM i915
@@ -677,7 +677,7 @@ TRACE_EVENT(i915_request_queue,
__entry->dev = rq->i915->drm.primary->index;
__entry->hw_id = rq->gem_context->hw_id;
__entry->class = rq->engine->uabi_class;
- __entry->instance = rq->engine->instance;
+ __entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
__entry->flags = flags;
@@ -706,7 +706,7 @@ DECLARE_EVENT_CLASS(i915_request,
__entry->dev = rq->i915->drm.primary->index;
__entry->hw_id = rq->gem_context->hw_id;
__entry->class = rq->engine->uabi_class;
- __entry->instance = rq->engine->instance;
+ __entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
),
@@ -751,7 +751,7 @@ TRACE_EVENT(i915_request_in,
__entry->dev = rq->i915->drm.primary->index;
__entry->hw_id = rq->gem_context->hw_id;
__entry->class = rq->engine->uabi_class;
- __entry->instance = rq->engine->instance;
+ __entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
__entry->prio = rq->sched.attr.priority;
@@ -782,7 +782,7 @@ TRACE_EVENT(i915_request_out,
__entry->dev = rq->i915->drm.primary->index;
__entry->hw_id = rq->gem_context->hw_id;
__entry->class = rq->engine->uabi_class;
- __entry->instance = rq->engine->instance;
+ __entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
__entry->completed = i915_request_completed(rq);
@@ -847,7 +847,7 @@ TRACE_EVENT(i915_request_wait_begin,
__entry->dev = rq->i915->drm.primary->index;
__entry->hw_id = rq->gem_context->hw_id;
__entry->class = rq->engine->uabi_class;
- __entry->instance = rq->engine->instance;
+ __entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
__entry->flags = flags;
diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c
new file mode 100644
index 000000000000..16acdf7bdbe6
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_utils.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <drm/drm_drv.h>
+
+#include "i915_drv.h"
+#include "i915_utils.h"
+
+#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
+#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
+ "providing the dmesg log by booting with drm.debug=0xf"
+
+void
+__i915_printk(struct drm_i915_private *dev_priv, const char *level,
+ const char *fmt, ...)
+{
+ static bool shown_bug_once;
+ struct device *kdev = dev_priv->drm.dev;
+ bool is_error = level[1] <= KERN_ERR[1];
+ bool is_debug = level[1] == KERN_DEBUG[1];
+ struct va_format vaf;
+ va_list args;
+
+ if (is_debug && !(drm_debug & DRM_UT_DRIVER))
+ return;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ if (is_error)
+ dev_printk(level, kdev, "%pV", &vaf);
+ else
+ dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV",
+ __builtin_return_address(0), &vaf);
+
+ va_end(args);
+
+ if (is_error && !shown_bug_once) {
+ /*
+ * Ask the user to file a bug report for the error, except
+ * if they may have caused the bug by fiddling with unsafe
+ * module parameters.
+ */
+ if (!test_taint(TAINT_USER))
+ dev_notice(kdev, "%s", FDO_BUG_MSG);
+ shown_bug_once = true;
+ }
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
+static unsigned int i915_probe_fail_count;
+
+int __i915_inject_load_error(struct drm_i915_private *i915, int err,
+ const char *func, int line)
+{
+ if (i915_probe_fail_count >= i915_modparams.inject_load_failure)
+ return 0;
+
+ if (++i915_probe_fail_count < i915_modparams.inject_load_failure)
+ return 0;
+
+ __i915_printk(i915, KERN_INFO,
+ "Injecting failure %d at checkpoint %u [%s:%d]\n",
+ err, i915_modparams.inject_load_failure, func, line);
+ i915_modparams.inject_load_failure = 0;
+ return err;
+}
+
+bool i915_error_injected(void)
+{
+ return i915_probe_fail_count && !i915_modparams.inject_load_failure;
+}
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 4920ff9aba62..562f756da421 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -31,6 +31,8 @@
#include <linux/types.h>
#include <linux/workqueue.h>
+struct drm_i915_private;
+
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
#if 0
@@ -49,6 +51,34 @@
#define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \
__stringify(x), (long)(x))
+void __printf(3, 4)
+__i915_printk(struct drm_i915_private *dev_priv, const char *level,
+ const char *fmt, ...);
+
+#define i915_report_error(dev_priv, fmt, ...) \
+ __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__)
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
+
+int __i915_inject_load_error(struct drm_i915_private *i915, int err,
+ const char *func, int line);
+#define i915_inject_load_error(_i915, _err) \
+ __i915_inject_load_error((_i915), (_err), __func__, __LINE__)
+bool i915_error_injected(void);
+
+#else
+
+#define i915_inject_load_error(_i915, _err) 0
+#define i915_error_injected() false
+
+#endif
+
+#define i915_inject_probe_failure(i915) i915_inject_load_error((i915), -ENODEV)
+
+#define i915_probe_error(i915, fmt, ...) \
+ __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
+ fmt, ##__VA_ARGS__)
+
#if defined(GCC_VERSION) && GCC_VERSION >= 70000
#define add_overflows_t(T, A, B) \
__builtin_add_overflow_p((A), (B), (T)0)
@@ -131,17 +161,15 @@ __check_struct_size(size_t base, size_t arr, size_t count, size_t *size)
((typeof(ptr))((unsigned long)(ptr) | __bits)); \
})
-#define ptr_count_dec(p_ptr) do { \
- typeof(p_ptr) __p = (p_ptr); \
- unsigned long __v = (unsigned long)(*__p); \
- *__p = (typeof(*p_ptr))(--__v); \
-} while (0)
+#define ptr_dec(ptr) ({ \
+ unsigned long __v = (unsigned long)(ptr); \
+ (typeof(ptr))(__v - 1); \
+})
-#define ptr_count_inc(p_ptr) do { \
- typeof(p_ptr) __p = (p_ptr); \
- unsigned long __v = (unsigned long)(*__p); \
- *__p = (typeof(*p_ptr))(++__v); \
-} while (0)
+#define ptr_inc(ptr) ({ \
+ unsigned long __v = (unsigned long)(ptr); \
+ (typeof(ptr))(__v + 1); \
+})
#define page_mask_bits(ptr) ptr_mask_bits(ptr, PAGE_SHIFT)
#define page_unmask_bits(ptr) ptr_unmask_bits(ptr, PAGE_SHIFT)
@@ -382,4 +410,15 @@ static inline const char *enableddisabled(bool v)
return v ? "enabled" : "disabled";
}
+static inline void add_taint_for_CI(unsigned int taint)
+{
+ /*
+ * The system is "ok", just about surviving for the user, but
+ * CI results are now unreliable as the HW is very suspect.
+ * CI checks the taint state after every test and will reboot
+ * the machine if the kernel is tainted.
+ */
+ add_taint(taint, LOCKDEP_STILL_OK);
+}
+
#endif /* !__I915_UTILS_H */
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index dbd1fa3c7d90..39bebf16edbe 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -21,7 +21,6 @@
* SOFTWARE.
*/
-#include "intel_drv.h"
#include "i915_vgpu.h"
/**
@@ -120,6 +119,9 @@ static struct _balloon_info_ bl_info;
static void vgt_deballoon_space(struct i915_ggtt *ggtt,
struct drm_mm_node *node)
{
+ if (!drm_mm_node_allocated(node))
+ return;
+
DRM_DEBUG_DRIVER("deballoon space: range [0x%llx - 0x%llx] %llu KiB.\n",
node->start,
node->start + node->size,
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index eb16a1a93bbc..e0e677b2a3a9 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -32,6 +32,7 @@
#include "i915_drv.h"
#include "i915_globals.h"
+#include "i915_trace.h"
#include "i915_vma.h"
static struct i915_global_vma {
@@ -86,8 +87,7 @@ static inline struct i915_vma *active_to_vma(struct i915_active *ref)
static int __i915_vma_active(struct i915_active *ref)
{
- i915_vma_get(active_to_vma(ref));
- return 0;
+ return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
}
static void __i915_vma_retire(struct i915_active *ref)
@@ -119,7 +119,6 @@ vma_create(struct drm_i915_gem_object *obj,
i915_active_init(vm->i915, &vma->active,
__i915_vma_active, __i915_vma_retire);
- INIT_ACTIVE_REQUEST(&vma->last_fence);
/* Declare ourselves safe for use inside shrinkers */
if (IS_ENABLED(CONFIG_LOCKDEP)) {
@@ -801,8 +800,6 @@ static void __i915_vma_destroy(struct i915_vma *vma)
GEM_BUG_ON(vma->node.allocated);
GEM_BUG_ON(vma->fence);
- GEM_BUG_ON(i915_active_request_isset(&vma->last_fence));
-
mutex_lock(&vma->vm->mutex);
list_del(&vma->vm_link);
mutex_unlock(&vma->vm->mutex);
@@ -867,7 +864,7 @@ void i915_vma_revoke_mmap(struct i915_vma *vma)
struct drm_vma_offset_node *node = &vma->obj->base.vma_node;
u64 vma_offset;
- lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
+ lockdep_assert_held(&vma->vm->mutex);
if (!i915_vma_has_userfault(vma))
return;
@@ -886,23 +883,6 @@ void i915_vma_revoke_mmap(struct i915_vma *vma)
list_del(&vma->obj->userfault_link);
}
-static void export_fence(struct i915_vma *vma,
- struct i915_request *rq,
- unsigned int flags)
-{
- struct reservation_object *resv = vma->resv;
-
- /*
- * Ignore errors from failing to allocate the new fence, we can't
- * handle an error right now. Worst case should be missed
- * synchronisation leading to rendering corruption.
- */
- if (flags & EXEC_OBJECT_WRITE)
- reservation_object_add_excl_fence(resv, &rq->fence);
- else if (reservation_object_reserve_shared(resv, 1) == 0)
- reservation_object_add_shared_fence(resv, &rq->fence);
-}
-
int i915_vma_move_to_active(struct i915_vma *vma,
struct i915_request *rq,
unsigned int flags)
@@ -922,27 +902,30 @@ int i915_vma_move_to_active(struct i915_vma *vma,
* add the active reference first and queue for it to be dropped
* *last*.
*/
- err = i915_active_ref(&vma->active, rq->fence.context, rq);
+ err = i915_active_ref(&vma->active, rq->timeline, rq);
if (unlikely(err))
return err;
- obj->write_domain = 0;
if (flags & EXEC_OBJECT_WRITE) {
- obj->write_domain = I915_GEM_DOMAIN_RENDER;
-
- if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
- __i915_active_request_set(&obj->frontbuffer_write, rq);
+ if (intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CS))
+ i915_active_ref(&obj->frontbuffer->write,
+ rq->timeline,
+ rq);
+ dma_resv_add_excl_fence(vma->resv, &rq->fence);
+ obj->write_domain = I915_GEM_DOMAIN_RENDER;
obj->read_domains = 0;
+ } else {
+ err = dma_resv_reserve_shared(vma->resv, 1);
+ if (unlikely(err))
+ return err;
+
+ dma_resv_add_shared_fence(vma->resv, &rq->fence);
+ obj->write_domain = 0;
}
obj->read_domains |= I915_GEM_GPU_DOMAINS;
obj->mm.dirty = true;
- if (flags & EXEC_OBJECT_NEEDS_FENCE)
- __i915_active_request_set(&vma->last_fence, rq);
-
- export_fence(vma, rq, flags);
-
GEM_BUG_ON(!i915_vma_is_active(vma));
return 0;
}
@@ -973,14 +956,7 @@ int i915_vma_unbind(struct i915_vma *vma)
* before we are finished).
*/
__i915_vma_pin(vma);
-
ret = i915_active_wait(&vma->active);
- if (ret)
- goto unpin;
-
- ret = i915_active_request_retire(&vma->last_fence,
- &vma->vm->i915->drm.struct_mutex);
-unpin:
__i915_vma_unpin(vma);
if (ret)
return ret;
@@ -1006,12 +982,16 @@ unpin:
GEM_BUG_ON(i915_vma_has_ggtt_write(vma));
/* release the fence reg _after_ flushing */
- ret = i915_vma_put_fence(vma);
+ mutex_lock(&vma->vm->mutex);
+ ret = i915_vma_revoke_fence(vma);
+ mutex_unlock(&vma->vm->mutex);
if (ret)
return ret;
/* Force a pagefault for domain tracking on next user access */
+ mutex_lock(&vma->vm->mutex);
i915_vma_revoke_mmap(vma);
+ mutex_unlock(&vma->vm->mutex);
__i915_vma_iounmap(vma);
vma->flags &= ~I915_VMA_CAN_FENCE;
@@ -1030,6 +1010,22 @@ unpin:
return 0;
}
+struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
+{
+ i915_gem_object_make_unshrinkable(vma->obj);
+ return vma;
+}
+
+void i915_vma_make_shrinkable(struct i915_vma *vma)
+{
+ i915_gem_object_make_shrinkable(vma->obj);
+}
+
+void i915_vma_make_purgeable(struct i915_vma *vma)
+{
+ i915_gem_object_make_purgeable(vma->obj);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/i915_vma.c"
#endif
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 4b769db649bf..889fc7cb910a 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -55,7 +55,7 @@ struct i915_vma {
struct i915_address_space *vm;
const struct i915_vma_ops *ops;
struct i915_fence_reg *fence;
- struct reservation_object *resv; /** Alias of obj->resv */
+ struct dma_resv *resv; /** Alias of obj->resv */
struct sg_table *pages;
void __iomem *iomap;
void *private; /* owned by creator */
@@ -111,7 +111,6 @@ struct i915_vma {
#define I915_VMA_GGTT_WRITE BIT(14)
struct i915_active active;
- struct i915_active_request last_fence;
/**
* Support different GGTT views into the same object.
@@ -232,6 +231,14 @@ static inline struct i915_vma *i915_vma_get(struct i915_vma *vma)
return vma;
}
+static inline struct i915_vma *i915_vma_tryget(struct i915_vma *vma)
+{
+ if (likely(kref_get_unless_zero(&vma->obj->base.refcount)))
+ return vma;
+
+ return NULL;
+}
+
static inline void i915_vma_put(struct i915_vma *vma)
{
i915_gem_object_put(vma->obj);
@@ -299,16 +306,16 @@ void i915_vma_close(struct i915_vma *vma);
void i915_vma_reopen(struct i915_vma *vma);
void i915_vma_destroy(struct i915_vma *vma);
-#define assert_vma_held(vma) reservation_object_assert_held((vma)->resv)
+#define assert_vma_held(vma) dma_resv_assert_held((vma)->resv)
static inline void i915_vma_lock(struct i915_vma *vma)
{
- reservation_object_lock(vma->resv, NULL);
+ dma_resv_lock(vma->resv, NULL);
}
static inline void i915_vma_unlock(struct i915_vma *vma)
{
- reservation_object_unlock(vma->resv);
+ dma_resv_unlock(vma->resv);
}
int __i915_vma_do_pin(struct i915_vma *vma,
@@ -414,13 +421,13 @@ static inline struct page *i915_vma_first_page(struct i915_vma *vma)
*
* True if the vma has a fence, false otherwise.
*/
-int i915_vma_pin_fence(struct i915_vma *vma);
-int __must_check i915_vma_put_fence(struct i915_vma *vma);
+int __must_check i915_vma_pin_fence(struct i915_vma *vma);
+int __must_check i915_vma_revoke_fence(struct i915_vma *vma);
static inline void __i915_vma_unpin_fence(struct i915_vma *vma)
{
- GEM_BUG_ON(vma->fence->pin_count <= 0);
- vma->fence->pin_count--;
+ GEM_BUG_ON(atomic_read(&vma->fence->pin_count) <= 0);
+ atomic_dec(&vma->fence->pin_count);
}
/**
@@ -459,4 +466,8 @@ void i915_vma_parked(struct drm_i915_private *i915);
struct i915_vma *i915_vma_alloc(void);
void i915_vma_free(struct i915_vma *vma);
+struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma);
+void i915_vma_make_shrinkable(struct i915_vma *vma);
+void i915_vma_make_purgeable(struct i915_vma *vma);
+
#endif
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 6ef74531588a..546577e39b4e 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -39,6 +39,11 @@
#define GEN12_CSR_MAX_FW_SIZE ICL_CSR_MAX_FW_SIZE
+#define TGL_CSR_PATH "i915/tgl_dmc_ver2_04.bin"
+#define TGL_CSR_VERSION_REQUIRED CSR_VERSION(2, 4)
+#define TGL_CSR_MAX_FW_SIZE 0x6000
+MODULE_FIRMWARE(TGL_CSR_PATH);
+
#define ICL_CSR_PATH "i915/icl_dmc_ver1_07.bin"
#define ICL_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
#define ICL_CSR_MAX_FW_SIZE 0x6000
@@ -674,6 +679,8 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
intel_csr_runtime_pm_get(dev_priv);
if (INTEL_GEN(dev_priv) >= 12) {
+ csr->fw_path = TGL_CSR_PATH;
+ csr->required_version = TGL_CSR_VERSION_REQUIRED;
/* Allow to load fw via parameter using the last known size */
csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE;
} else if (IS_GEN(dev_priv, 11)) {
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index f99c9fd497b2..d0ed44d33484 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -716,7 +716,7 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
}
return freq;
- } else if (INTEL_GEN(dev_priv) <= 11) {
+ } else if (INTEL_GEN(dev_priv) <= 12) {
u32 ctc_reg = I915_READ(CTC_MODE);
u32 freq = 0;
@@ -1022,8 +1022,9 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
/*
* In Gen11, only even numbered logical VDBOXes are
* hooked up to an SFC (Scaler & Format Converter) unit.
+ * In TGL each VDBOX has access to an SFC.
*/
- if (logical_vdbox++ % 2 == 0)
+ if (IS_TIGERLAKE(dev_priv) || logical_vdbox++ % 2 == 0)
RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i);
}
DRM_DEBUG_DRIVER("vdbox enable: %04x, instances: %04lx\n",
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 4f58e8d71b67..92e0c2e0954c 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -112,6 +112,7 @@ enum intel_ppgtt_type {
func(gpu_reset_clobbers_display); \
func(has_reset_engine); \
func(has_fpga_dbg); \
+ func(has_global_mocs); \
func(has_gt_uc); \
func(has_l3_dpf); \
func(has_llc); \
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index c66b2d8a6219..2b6c016387c2 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -95,7 +95,7 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
{
int ret;
- if (i915_inject_probe_failure())
+ if (i915_inject_probe_failure(dev_priv))
return -ENODEV;
if (!i915_modparams.enable_gvt) {
diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c
new file mode 100644
index 000000000000..fa864d8f2b73
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_pch.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2019 Intel Corporation.
+ */
+
+#include "i915_drv.h"
+#include "intel_pch.h"
+
+/* Map PCH device id to PCH type, or PCH_NONE if unknown. */
+static enum intel_pch
+intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
+{
+ switch (id) {
+ case INTEL_PCH_IBX_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
+ WARN_ON(!IS_GEN(dev_priv, 5));
+ return PCH_IBX;
+ case INTEL_PCH_CPT_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found CougarPoint PCH\n");
+ WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
+ return PCH_CPT;
+ case INTEL_PCH_PPT_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found PantherPoint PCH\n");
+ WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
+ /* PantherPoint is CPT compatible */
+ return PCH_CPT;
+ case INTEL_PCH_LPT_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found LynxPoint PCH\n");
+ WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
+ WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
+ return PCH_LPT;
+ case INTEL_PCH_LPT_LP_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
+ WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
+ WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
+ return PCH_LPT;
+ case INTEL_PCH_WPT_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found WildcatPoint PCH\n");
+ WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
+ WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
+ /* WildcatPoint is LPT compatible */
+ return PCH_LPT;
+ case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found WildcatPoint LP PCH\n");
+ WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
+ WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
+ /* WildcatPoint is LPT compatible */
+ return PCH_LPT;
+ case INTEL_PCH_SPT_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
+ WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
+ return PCH_SPT;
+ case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
+ WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
+ return PCH_SPT;
+ case INTEL_PCH_KBP_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n");
+ WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
+ !IS_COFFEELAKE(dev_priv));
+ /* KBP is SPT compatible */
+ return PCH_SPT;
+ case INTEL_PCH_CNP_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found Cannon Lake PCH (CNP)\n");
+ WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
+ return PCH_CNP;
+ case INTEL_PCH_CNP_LP_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found Cannon Lake LP PCH (CNP-LP)\n");
+ WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
+ return PCH_CNP;
+ case INTEL_PCH_CMP_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found Comet Lake PCH (CMP)\n");
+ WARN_ON(!IS_COFFEELAKE(dev_priv));
+ /* CometPoint is CNP Compatible */
+ return PCH_CNP;
+ case INTEL_PCH_ICP_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found Ice Lake PCH\n");
+ WARN_ON(!IS_ICELAKE(dev_priv));
+ return PCH_ICP;
+ case INTEL_PCH_MCC_DEVICE_ID_TYPE:
+ case INTEL_PCH_MCC2_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found Mule Creek Canyon PCH\n");
+ WARN_ON(!IS_ELKHARTLAKE(dev_priv));
+ return PCH_MCC;
+ case INTEL_PCH_TGP_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found Tiger Lake LP PCH\n");
+ WARN_ON(!IS_TIGERLAKE(dev_priv));
+ return PCH_TGP;
+ default:
+ return PCH_NONE;
+ }
+}
+
+static bool intel_is_virt_pch(unsigned short id,
+ unsigned short svendor, unsigned short sdevice)
+{
+ return (id == INTEL_PCH_P2X_DEVICE_ID_TYPE ||
+ id == INTEL_PCH_P3X_DEVICE_ID_TYPE ||
+ (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE &&
+ svendor == PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
+ sdevice == PCI_SUBDEVICE_ID_QEMU));
+}
+
+static unsigned short
+intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
+{
+ unsigned short id = 0;
+
+ /*
+ * In a virtualized passthrough environment we can be in a
+ * setup where the ISA bridge is not able to be passed through.
+ * In this case, a south bridge can be emulated and we have to
+ * make an educated guess as to which PCH is really there.
+ */
+
+ if (IS_TIGERLAKE(dev_priv))
+ id = INTEL_PCH_TGP_DEVICE_ID_TYPE;
+ else if (IS_ELKHARTLAKE(dev_priv))
+ id = INTEL_PCH_MCC_DEVICE_ID_TYPE;
+ else if (IS_ICELAKE(dev_priv))
+ id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
+ else if (IS_CANNONLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
+ id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
+ else if (IS_KABYLAKE(dev_priv) || IS_SKYLAKE(dev_priv))
+ id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
+ else if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
+ id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
+ else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ id = INTEL_PCH_LPT_DEVICE_ID_TYPE;
+ else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
+ id = INTEL_PCH_CPT_DEVICE_ID_TYPE;
+ else if (IS_GEN(dev_priv, 5))
+ id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
+
+ if (id)
+ DRM_DEBUG_KMS("Assuming PCH ID %04x\n", id);
+ else
+ DRM_DEBUG_KMS("Assuming no PCH\n");
+
+ return id;
+}
+
+void intel_detect_pch(struct drm_i915_private *dev_priv)
+{
+ struct pci_dev *pch = NULL;
+
+ /*
+ * The reason to probe ISA bridge instead of Dev31:Fun0 is to
+ * make graphics device passthrough work easy for VMM, that only
+ * need to expose ISA bridge to let driver know the real hardware
+ * underneath. This is a requirement from virtualization team.
+ *
+ * In some virtualized environments (e.g. XEN), there is irrelevant
+ * ISA bridge in the system. To work reliably, we should scan trhough
+ * all the ISA bridge devices and check for the first match, instead
+ * of only checking the first one.
+ */
+ while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
+ unsigned short id;
+ enum intel_pch pch_type;
+
+ if (pch->vendor != PCI_VENDOR_ID_INTEL)
+ continue;
+
+ id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
+
+ pch_type = intel_pch_type(dev_priv, id);
+ if (pch_type != PCH_NONE) {
+ dev_priv->pch_type = pch_type;
+ dev_priv->pch_id = id;
+ break;
+ } else if (intel_is_virt_pch(id, pch->subsystem_vendor,
+ pch->subsystem_device)) {
+ id = intel_virt_detect_pch(dev_priv);
+ pch_type = intel_pch_type(dev_priv, id);
+
+ /* Sanity check virtual PCH id */
+ if (WARN_ON(id && pch_type == PCH_NONE))
+ id = 0;
+
+ dev_priv->pch_type = pch_type;
+ dev_priv->pch_id = id;
+ break;
+ }
+ }
+
+ /*
+ * Use PCH_NOP (PCH but no South Display) for PCH platforms without
+ * display.
+ */
+ if (pch && !HAS_DISPLAY(dev_priv)) {
+ DRM_DEBUG_KMS("Display disabled, reverting to NOP PCH\n");
+ dev_priv->pch_type = PCH_NOP;
+ dev_priv->pch_id = 0;
+ }
+
+ if (!pch)
+ DRM_DEBUG_KMS("No PCH found.\n");
+
+ pci_dev_put(pch);
+}
diff --git a/drivers/gpu/drm/i915/intel_pch.h b/drivers/gpu/drm/i915/intel_pch.h
new file mode 100644
index 000000000000..e6a2d65f19c6
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_pch.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2019 Intel Corporation.
+ */
+
+#ifndef __INTEL_PCH__
+#define __INTEL_PCH__
+
+struct drm_i915_private;
+
+/*
+ * Sorted by south display engine compatibility.
+ * If the new PCH comes with a south display engine that is not
+ * inherited from the latest item, please do not add it to the
+ * end. Instead, add it right after its "parent" PCH.
+ */
+enum intel_pch {
+ PCH_NOP = -1, /* PCH without south display */
+ PCH_NONE = 0, /* No PCH present */
+ PCH_IBX, /* Ibexpeak PCH */
+ PCH_CPT, /* Cougarpoint/Pantherpoint PCH */
+ PCH_LPT, /* Lynxpoint/Wildcatpoint PCH */
+ PCH_SPT, /* Sunrisepoint/Kaby Lake PCH */
+ PCH_CNP, /* Cannon/Comet Lake PCH */
+ PCH_ICP, /* Ice Lake PCH */
+ PCH_MCC, /* Mule Creek Canyon PCH */
+ PCH_TGP, /* Tiger Lake PCH */
+};
+
+#define INTEL_PCH_DEVICE_ID_MASK 0xff80
+#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
+#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
+#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
+#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
+#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
+#define INTEL_PCH_WPT_DEVICE_ID_TYPE 0x8c80
+#define INTEL_PCH_WPT_LP_DEVICE_ID_TYPE 0x9c80
+#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
+#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
+#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA280
+#define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300
+#define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80
+#define INTEL_PCH_CMP_DEVICE_ID_TYPE 0x0280
+#define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480
+#define INTEL_PCH_MCC_DEVICE_ID_TYPE 0x4B00
+#define INTEL_PCH_MCC2_DEVICE_ID_TYPE 0x3880
+#define INTEL_PCH_TGP_DEVICE_ID_TYPE 0xA080
+#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
+#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
+#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
+
+#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
+#define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id)
+#define HAS_PCH_MCC(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_MCC)
+#define HAS_PCH_TGP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_TGP)
+#define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP)
+#define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP)
+#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
+#define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT)
+#define HAS_PCH_LPT_LP(dev_priv) \
+ (INTEL_PCH_ID(dev_priv) == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE || \
+ INTEL_PCH_ID(dev_priv) == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE)
+#define HAS_PCH_LPT_H(dev_priv) \
+ (INTEL_PCH_ID(dev_priv) == INTEL_PCH_LPT_DEVICE_ID_TYPE || \
+ INTEL_PCH_ID(dev_priv) == INTEL_PCH_WPT_DEVICE_ID_TYPE)
+#define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT)
+#define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX)
+#define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
+#define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE)
+
+void intel_detect_pch(struct drm_i915_private *dev_priv);
+
+#endif /* __INTEL_PCH__ */
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 30399b245f07..75ee027abb80 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -34,12 +34,13 @@
#include <drm/drm_plane_helper.h>
#include "display/intel_atomic.h"
+#include "display/intel_display_types.h"
#include "display/intel_fbc.h"
#include "display/intel_sprite.h"
#include "i915_drv.h"
#include "i915_irq.h"
-#include "intel_drv.h"
+#include "i915_trace.h"
#include "intel_pm.h"
#include "intel_sideband.h"
#include "../../../platform/x86/intel_ips.h"
@@ -9168,9 +9169,6 @@ static void skl_init_clock_gating(struct drm_i915_private *dev_priv)
static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
{
- /* The GTT cache must be disabled if the system is using 2M pages. */
- bool can_use_gtt_cache = !HAS_PAGE_SIZES(dev_priv,
- I915_GTT_PAGE_SIZE_2M);
enum pipe pipe;
/* WaSwitchSolVfFArbitrationPriority:bdw */
@@ -9203,9 +9201,6 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
/* WaProgramL3SqcReg1Default:bdw */
gen8_set_l3sqc_credits(dev_priv, 30, 2);
- /* WaGttCachingOffByDefault:bdw */
- I915_WRITE(HSW_GTT_CACHE_EN, can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
-
/* WaKVMNotificationOnConfigChange:bdw */
I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1)
| KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);
@@ -9470,12 +9465,6 @@ static void chv_init_clock_gating(struct drm_i915_private *dev_priv)
* LSQC Setting Recommendations.
*/
gen8_set_l3sqc_credits(dev_priv, 38, 2);
-
- /*
- * GTT cache may not work with big pages, so if those
- * are ever enabled GTT cache may need to be disabled.
- */
- I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
}
static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -9608,7 +9597,9 @@ static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
*/
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
{
- if (IS_GEN(dev_priv, 11))
+ if (IS_GEN(dev_priv, 12))
+ dev_priv->display.init_clock_gating = nop_init_clock_gating;
+ else if (IS_GEN(dev_priv, 11))
dev_priv->display.init_clock_gating = icl_init_clock_gating;
else if (IS_CANNONLAKE(dev_priv))
dev_priv->display.init_clock_gating = cnl_init_clock_gating;
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index b2a05850ea42..2fd3c097e1f5 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -32,6 +32,7 @@
#include <drm/drm_print.h>
#include "i915_drv.h"
+#include "i915_trace.h"
/**
* DOC: runtime pm
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index a115625e980c..e06b35b844a0 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -24,10 +24,8 @@
#include <asm/iosf_mbi.h>
-#include "intel_sideband.h"
-
#include "i915_drv.h"
-#include "intel_drv.h"
+#include "intel_sideband.h"
/*
* IOSF sideband, see VLV2_SidebandMsg_HAS.docx and
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 475ab3d4d91d..9e583f13a9e4 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -25,8 +25,8 @@
#include <asm/iosf_mbi.h>
#include "i915_drv.h"
+#include "i915_trace.h"
#include "i915_vgpu.h"
-#include "intel_drv.h"
#include "intel_pm.h"
#define FORCEWAKE_ACK_TIMEOUT_MS 50
@@ -34,6 +34,32 @@
#define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__))
+void
+intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug)
+{
+ spin_lock_init(&mmio_debug->lock);
+ mmio_debug->unclaimed_mmio_check = 1;
+}
+
+static void mmio_debug_suspend(struct intel_uncore_mmio_debug *mmio_debug)
+{
+ lockdep_assert_held(&mmio_debug->lock);
+
+ /* Save and disable mmio debugging for the user bypass */
+ if (!mmio_debug->suspend_count++) {
+ mmio_debug->saved_mmio_check = mmio_debug->unclaimed_mmio_check;
+ mmio_debug->unclaimed_mmio_check = 0;
+ }
+}
+
+static void mmio_debug_resume(struct intel_uncore_mmio_debug *mmio_debug)
+{
+ lockdep_assert_held(&mmio_debug->lock);
+
+ if (!--mmio_debug->suspend_count)
+ mmio_debug->unclaimed_mmio_check = mmio_debug->saved_mmio_check;
+}
+
static const char * const forcewake_domain_names[] = {
"render",
"blitter",
@@ -476,6 +502,11 @@ check_for_unclaimed_mmio(struct intel_uncore *uncore)
{
bool ret = false;
+ lockdep_assert_held(&uncore->debug->lock);
+
+ if (uncore->debug->suspend_count)
+ return false;
+
if (intel_uncore_has_fpga_dbg_unclaimed(uncore))
ret |= fpga_check_for_unclaimed_mmio(uncore);
@@ -608,17 +639,11 @@ void intel_uncore_forcewake_get(struct intel_uncore *uncore,
void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
{
spin_lock_irq(&uncore->lock);
- if (!uncore->user_forcewake.count++) {
+ if (!uncore->user_forcewake_count++) {
intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
-
- /* Save and disable mmio debugging for the user bypass */
- uncore->user_forcewake.saved_mmio_check =
- uncore->unclaimed_mmio_check;
- uncore->user_forcewake.saved_mmio_debug =
- i915_modparams.mmio_debug;
-
- uncore->unclaimed_mmio_check = 0;
- i915_modparams.mmio_debug = 0;
+ spin_lock(&uncore->debug->lock);
+ mmio_debug_suspend(uncore->debug);
+ spin_unlock(&uncore->debug->lock);
}
spin_unlock_irq(&uncore->lock);
}
@@ -633,15 +658,14 @@ void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
{
spin_lock_irq(&uncore->lock);
- if (!--uncore->user_forcewake.count) {
- if (intel_uncore_unclaimed_mmio(uncore))
+ if (!--uncore->user_forcewake_count) {
+ spin_lock(&uncore->debug->lock);
+ mmio_debug_resume(uncore->debug);
+
+ if (check_for_unclaimed_mmio(uncore))
dev_info(uncore->i915->drm.dev,
"Invalid mmio detected during user access\n");
-
- uncore->unclaimed_mmio_check =
- uncore->user_forcewake.saved_mmio_check;
- i915_modparams.mmio_debug =
- uncore->user_forcewake.saved_mmio_debug;
+ spin_unlock(&uncore->debug->lock);
intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL);
}
@@ -1088,7 +1112,16 @@ unclaimed_reg_debug(struct intel_uncore *uncore,
if (likely(!i915_modparams.mmio_debug))
return;
+ /* interrupts are disabled and re-enabled around uncore->lock usage */
+ lockdep_assert_held(&uncore->lock);
+
+ if (before)
+ spin_lock(&uncore->debug->lock);
+
__unclaimed_reg_debug(uncore, reg, read, before);
+
+ if (!before)
+ spin_unlock(&uncore->debug->lock);
}
#define GEN2_READ_HEADER(x) \
@@ -1331,7 +1364,7 @@ static int __fw_domain_init(struct intel_uncore *uncore,
GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
GEM_BUG_ON(uncore->fw_domain[domain_id]);
- if (i915_inject_probe_failure())
+ if (i915_inject_probe_failure(uncore->i915))
return -ENOMEM;
d = kzalloc(sizeof(*d), GFP_KERNEL);
@@ -1607,6 +1640,7 @@ void intel_uncore_init_early(struct intel_uncore *uncore,
spin_lock_init(&uncore->lock);
uncore->i915 = i915;
uncore->rpm = &i915->runtime_pm;
+ uncore->debug = &i915->mmio_debug;
}
static void uncore_raw_init(struct intel_uncore *uncore)
@@ -1632,7 +1666,6 @@ static int uncore_forcewake_init(struct intel_uncore *uncore)
ret = intel_uncore_fw_domains_init(uncore);
if (ret)
return ret;
-
forcewake_early_sanitize(uncore, 0);
if (IS_GEN_RANGE(i915, 6, 7)) {
@@ -1681,8 +1714,6 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915))
uncore->flags |= UNCORE_HAS_FORCEWAKE;
- uncore->unclaimed_mmio_check = 1;
-
if (!intel_uncore_has_forcewake(uncore)) {
uncore_raw_init(uncore);
} else {
@@ -1707,7 +1738,7 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
uncore->flags |= UNCORE_HAS_FIFO;
/* clear out unclaimed reg detection bit */
- if (check_for_unclaimed_mmio(uncore))
+ if (intel_uncore_unclaimed_mmio(uncore))
DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
return 0;
@@ -1776,7 +1807,7 @@ static const struct reg_whitelist {
} reg_read_whitelist[] = { {
.offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
.offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
- .gen_mask = INTEL_GEN_MASK(4, 11),
+ .gen_mask = INTEL_GEN_MASK(4, 12),
.size = 8
} };
@@ -1860,7 +1891,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
* wish to wait without holding forcewake for the duration (i.e. you expect
* the wait to be slow).
*
- * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
+ * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
*/
int __intel_wait_for_register_fw(struct intel_uncore *uncore,
i915_reg_t reg,
@@ -1908,7 +1939,7 @@ int __intel_wait_for_register_fw(struct intel_uncore *uncore,
*
* Otherwise, the wait will timeout after @timeout_ms milliseconds.
*
- * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
+ * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
*/
int __intel_wait_for_register(struct intel_uncore *uncore,
i915_reg_t reg,
@@ -1952,7 +1983,13 @@ int __intel_wait_for_register(struct intel_uncore *uncore,
bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore)
{
- return check_for_unclaimed_mmio(uncore);
+ bool ret;
+
+ spin_lock_irq(&uncore->debug->lock);
+ ret = check_for_unclaimed_mmio(uncore);
+ spin_unlock_irq(&uncore->debug->lock);
+
+ return ret;
}
bool
@@ -1960,24 +1997,24 @@ intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
{
bool ret = false;
- spin_lock_irq(&uncore->lock);
+ spin_lock_irq(&uncore->debug->lock);
- if (unlikely(uncore->unclaimed_mmio_check <= 0))
+ if (unlikely(uncore->debug->unclaimed_mmio_check <= 0))
goto out;
- if (unlikely(intel_uncore_unclaimed_mmio(uncore))) {
+ if (unlikely(check_for_unclaimed_mmio(uncore))) {
if (!i915_modparams.mmio_debug) {
DRM_DEBUG("Unclaimed register detected, "
"enabling oneshot unclaimed register reporting. "
"Please use i915.mmio_debug=N for more information.\n");
i915_modparams.mmio_debug++;
}
- uncore->unclaimed_mmio_check--;
+ uncore->debug->unclaimed_mmio_check--;
ret = true;
}
out:
- spin_unlock_irq(&uncore->lock);
+ spin_unlock_irq(&uncore->debug->lock);
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 2f6ffa309669..414fc2cb0459 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -36,6 +36,13 @@ struct drm_i915_private;
struct intel_runtime_pm;
struct intel_uncore;
+struct intel_uncore_mmio_debug {
+ spinlock_t lock; /** lock is also taken in irq contexts. */
+ int unclaimed_mmio_check;
+ int saved_mmio_check;
+ u32 suspend_count;
+};
+
enum forcewake_domain_id {
FW_DOMAIN_ID_RENDER = 0,
FW_DOMAIN_ID_BLITTER,
@@ -137,14 +144,9 @@ struct intel_uncore {
u32 __iomem *reg_ack;
} *fw_domain[FW_DOMAIN_ID_COUNT];
- struct {
- unsigned int count;
-
- int saved_mmio_check;
- int saved_mmio_debug;
- } user_forcewake;
+ unsigned int user_forcewake_count;
- int unclaimed_mmio_check;
+ struct intel_uncore_mmio_debug *debug;
};
/* Iterate over initialised fw domains */
@@ -179,6 +181,8 @@ intel_uncore_has_fifo(const struct intel_uncore *uncore)
return uncore->flags & UNCORE_HAS_FIFO;
}
+void
+intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug);
void intel_uncore_init_early(struct intel_uncore *uncore,
struct drm_i915_private *i915);
int intel_uncore_init_mmio(struct intel_uncore *uncore);
@@ -393,6 +397,18 @@ static inline void intel_uncore_rmw_fw(struct intel_uncore *uncore,
intel_uncore_write_fw(uncore, reg, val);
}
+static inline int intel_uncore_write_and_verify(struct intel_uncore *uncore,
+ i915_reg_t reg, u32 val,
+ u32 mask, u32 expected_val)
+{
+ u32 reg_val;
+
+ intel_uncore_write(uncore, reg, val);
+ reg_val = intel_uncore_read(uncore, reg);
+
+ return (reg_val & mask) != expected_val ? -EINVAL : 0;
+}
+
#define raw_reg_read(base, reg) \
readl(base + i915_mmio_reg_offset(reg))
#define raw_reg_write(base, reg, value) \
diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c
index 06bd8b215cc2..868cc78048d0 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.c
+++ b/drivers/gpu/drm/i915/intel_wakeref.c
@@ -4,25 +4,25 @@
* Copyright © 2019 Intel Corporation
*/
+#include <linux/wait_bit.h>
+
#include "intel_runtime_pm.h"
#include "intel_wakeref.h"
-static void rpm_get(struct intel_runtime_pm *rpm, struct intel_wakeref *wf)
+static void rpm_get(struct intel_wakeref *wf)
{
- wf->wakeref = intel_runtime_pm_get(rpm);
+ wf->wakeref = intel_runtime_pm_get(wf->rpm);
}
-static void rpm_put(struct intel_runtime_pm *rpm, struct intel_wakeref *wf)
+static void rpm_put(struct intel_wakeref *wf)
{
intel_wakeref_t wakeref = fetch_and_zero(&wf->wakeref);
- intel_runtime_pm_put(rpm, wakeref);
+ intel_runtime_pm_put(wf->rpm, wakeref);
INTEL_WAKEREF_BUG_ON(!wakeref);
}
-int __intel_wakeref_get_first(struct intel_runtime_pm *rpm,
- struct intel_wakeref *wf,
- int (*fn)(struct intel_wakeref *wf))
+int __intel_wakeref_get_first(struct intel_wakeref *wf)
{
/*
* Treat get/put as different subclasses, as we may need to run
@@ -34,11 +34,11 @@ int __intel_wakeref_get_first(struct intel_runtime_pm *rpm,
if (!atomic_read(&wf->count)) {
int err;
- rpm_get(rpm, wf);
+ rpm_get(wf);
- err = fn(wf);
+ err = wf->ops->get(wf);
if (unlikely(err)) {
- rpm_put(rpm, wf);
+ rpm_put(wf);
mutex_unlock(&wf->mutex);
return err;
}
@@ -52,27 +52,65 @@ int __intel_wakeref_get_first(struct intel_runtime_pm *rpm,
return 0;
}
-int __intel_wakeref_put_last(struct intel_runtime_pm *rpm,
- struct intel_wakeref *wf,
- int (*fn)(struct intel_wakeref *wf))
+static void ____intel_wakeref_put_last(struct intel_wakeref *wf)
{
- int err;
+ if (!atomic_dec_and_test(&wf->count))
+ goto unlock;
+
+ /* ops->put() must reschedule its own release on error/deferral */
+ if (likely(!wf->ops->put(wf))) {
+ rpm_put(wf);
+ wake_up_var(&wf->wakeref);
+ }
- err = fn(wf);
- if (likely(!err))
- rpm_put(rpm, wf);
- else
- atomic_inc(&wf->count);
+unlock:
mutex_unlock(&wf->mutex);
+}
+
+void __intel_wakeref_put_last(struct intel_wakeref *wf)
+{
+ INTEL_WAKEREF_BUG_ON(work_pending(&wf->work));
+
+ /* Assume we are not in process context and so cannot sleep. */
+ if (wf->ops->flags & INTEL_WAKEREF_PUT_ASYNC ||
+ !mutex_trylock(&wf->mutex)) {
+ schedule_work(&wf->work);
+ return;
+ }
+
+ ____intel_wakeref_put_last(wf);
+}
+
+static void __intel_wakeref_put_work(struct work_struct *wrk)
+{
+ struct intel_wakeref *wf = container_of(wrk, typeof(*wf), work);
- return err;
+ if (atomic_add_unless(&wf->count, -1, 1))
+ return;
+
+ mutex_lock(&wf->mutex);
+ ____intel_wakeref_put_last(wf);
}
-void __intel_wakeref_init(struct intel_wakeref *wf, struct lock_class_key *key)
+void __intel_wakeref_init(struct intel_wakeref *wf,
+ struct intel_runtime_pm *rpm,
+ const struct intel_wakeref_ops *ops,
+ struct lock_class_key *key)
{
+ wf->rpm = rpm;
+ wf->ops = ops;
+
__mutex_init(&wf->mutex, "wakeref", key);
atomic_set(&wf->count, 0);
wf->wakeref = 0;
+
+ INIT_WORK(&wf->work, __intel_wakeref_put_work);
+}
+
+int intel_wakeref_wait_for_idle(struct intel_wakeref *wf)
+{
+ return wait_var_event_killable(&wf->wakeref,
+ !intel_wakeref_is_active(wf));
}
static void wakeref_auto_timeout(struct timer_list *t)
diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h
index 1d6f5986e4e5..5f0c972a80fb 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.h
+++ b/drivers/gpu/drm/i915/intel_wakeref.h
@@ -8,10 +8,12 @@
#define INTEL_WAKEREF_H
#include <linux/atomic.h>
+#include <linux/bits.h>
#include <linux/mutex.h>
#include <linux/refcount.h>
#include <linux/stackdepot.h>
#include <linux/timer.h>
+#include <linux/workqueue.h>
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
#define INTEL_WAKEREF_BUG_ON(expr) BUG_ON(expr)
@@ -20,29 +22,42 @@
#endif
struct intel_runtime_pm;
+struct intel_wakeref;
typedef depot_stack_handle_t intel_wakeref_t;
+struct intel_wakeref_ops {
+ int (*get)(struct intel_wakeref *wf);
+ int (*put)(struct intel_wakeref *wf);
+
+ unsigned long flags;
+#define INTEL_WAKEREF_PUT_ASYNC BIT(0)
+};
+
struct intel_wakeref {
atomic_t count;
struct mutex mutex;
+
intel_wakeref_t wakeref;
+
+ struct intel_runtime_pm *rpm;
+ const struct intel_wakeref_ops *ops;
+
+ struct work_struct work;
};
void __intel_wakeref_init(struct intel_wakeref *wf,
+ struct intel_runtime_pm *rpm,
+ const struct intel_wakeref_ops *ops,
struct lock_class_key *key);
-#define intel_wakeref_init(wf) do { \
+#define intel_wakeref_init(wf, rpm, ops) do { \
static struct lock_class_key __key; \
\
- __intel_wakeref_init((wf), &__key); \
+ __intel_wakeref_init((wf), (rpm), (ops), &__key); \
} while (0)
-int __intel_wakeref_get_first(struct intel_runtime_pm *rpm,
- struct intel_wakeref *wf,
- int (*fn)(struct intel_wakeref *wf));
-int __intel_wakeref_put_last(struct intel_runtime_pm *rpm,
- struct intel_wakeref *wf,
- int (*fn)(struct intel_wakeref *wf));
+int __intel_wakeref_get_first(struct intel_wakeref *wf);
+void __intel_wakeref_put_last(struct intel_wakeref *wf);
/**
* intel_wakeref_get: Acquire the wakeref
@@ -61,12 +76,10 @@ int __intel_wakeref_put_last(struct intel_runtime_pm *rpm,
* code otherwise.
*/
static inline int
-intel_wakeref_get(struct intel_runtime_pm *rpm,
- struct intel_wakeref *wf,
- int (*fn)(struct intel_wakeref *wf))
+intel_wakeref_get(struct intel_wakeref *wf)
{
if (unlikely(!atomic_inc_not_zero(&wf->count)))
- return __intel_wakeref_get_first(rpm, wf, fn);
+ return __intel_wakeref_get_first(wf);
return 0;
}
@@ -102,16 +115,12 @@ intel_wakeref_get_if_active(struct intel_wakeref *wf)
* Returns: 0 if the wakeref was released successfully, or a negative error
* code otherwise.
*/
-static inline int
-intel_wakeref_put(struct intel_runtime_pm *rpm,
- struct intel_wakeref *wf,
- int (*fn)(struct intel_wakeref *wf))
+static inline void
+intel_wakeref_put(struct intel_wakeref *wf)
{
INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
- if (atomic_dec_and_mutex_lock(&wf->count, &wf->mutex))
- return __intel_wakeref_put_last(rpm, wf, fn);
-
- return 0;
+ if (unlikely(!atomic_add_unless(&wf->count, -1, 1)))
+ __intel_wakeref_put_last(wf);
}
/**
@@ -154,6 +163,30 @@ intel_wakeref_is_active(const struct intel_wakeref *wf)
return READ_ONCE(wf->wakeref);
}
+/**
+ * __intel_wakeref_defer_park: Defer the current park callback
+ * @wf: the wakeref
+ */
+static inline void
+__intel_wakeref_defer_park(struct intel_wakeref *wf)
+{
+ INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count));
+ atomic_set_release(&wf->count, 1);
+}
+
+/**
+ * intel_wakeref_wait_for_idle: Wait until the wakeref is idle
+ * @wf: the wakeref
+ *
+ * Wait for the earlier asynchronous release of the wakeref. Note
+ * this will wait for any third party as well, so make sure you only wait
+ * when you have control over the wakeref and trust no one else is acquiring
+ * it.
+ *
+ * Return: 0 on success, error code if killed.
+ */
+int intel_wakeref_wait_for_idle(struct intel_wakeref *wf);
+
struct intel_wakeref_auto {
struct intel_runtime_pm *rpm;
struct timer_list timer;
diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c
index 0e86a9e85b49..2bb9f9f9a50a 100644
--- a/drivers/gpu/drm/i915/intel_wopcm.c
+++ b/drivers/gpu/drm/i915/intel_wopcm.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2017-2018 Intel Corporation
+ * Copyright © 2017-2019 Intel Corporation
*/
#include "intel_wopcm.h"
@@ -64,6 +63,11 @@
#define GEN9_GUC_FW_RESERVED SZ_128K
#define GEN9_GUC_WOPCM_OFFSET (GUC_WOPCM_RESERVED + GEN9_GUC_FW_RESERVED)
+static inline struct drm_i915_private *wopcm_to_i915(struct intel_wopcm *wopcm)
+{
+ return container_of(wopcm, struct drm_i915_private, wopcm);
+}
+
/**
* intel_wopcm_init_early() - Early initialization of the WOPCM.
* @wopcm: pointer to intel_wopcm.
@@ -82,7 +86,7 @@ void intel_wopcm_init_early(struct intel_wopcm *wopcm)
else
wopcm->size = GEN9_WOPCM_SIZE;
- DRM_DEBUG_DRIVER("WOPCM size: %uKiB\n", wopcm->size / 1024);
+ DRM_DEV_DEBUG_DRIVER(i915->drm.dev, "WOPCM: %uK\n", wopcm->size / 1024);
}
static inline u32 context_reserved_size(struct drm_i915_private *i915)
@@ -95,7 +99,8 @@ static inline u32 context_reserved_size(struct drm_i915_private *i915)
return 0;
}
-static inline int gen9_check_dword_gap(u32 guc_wopcm_base, u32 guc_wopcm_size)
+static inline bool gen9_check_dword_gap(struct drm_i915_private *i915,
+ u32 guc_wopcm_base, u32 guc_wopcm_size)
{
u32 offset;
@@ -107,16 +112,18 @@ static inline int gen9_check_dword_gap(u32 guc_wopcm_base, u32 guc_wopcm_size)
offset = guc_wopcm_base + GEN9_GUC_WOPCM_OFFSET;
if (offset > guc_wopcm_size ||
(guc_wopcm_size - offset) < sizeof(u32)) {
- DRM_ERROR("GuC WOPCM size %uKiB is too small. %uKiB needed.\n",
- guc_wopcm_size / 1024,
- (u32)(offset + sizeof(u32)) / 1024);
- return -E2BIG;
+ dev_err(i915->drm.dev,
+ "WOPCM: invalid GuC region size: %uK < %uK\n",
+ guc_wopcm_size / SZ_1K,
+ (u32)(offset + sizeof(u32)) / SZ_1K);
+ return false;
}
- return 0;
+ return true;
}
-static inline int gen9_check_huc_fw_fits(u32 guc_wopcm_size, u32 huc_fw_size)
+static inline bool gen9_check_huc_fw_fits(struct drm_i915_private *i915,
+ u32 guc_wopcm_size, u32 huc_fw_size)
{
/*
* On Gen9 & CNL A0, hardware requires the total available GuC WOPCM
@@ -124,29 +131,81 @@ static inline int gen9_check_huc_fw_fits(u32 guc_wopcm_size, u32 huc_fw_size)
* firmware uploading would fail.
*/
if (huc_fw_size > guc_wopcm_size - GUC_WOPCM_RESERVED) {
- DRM_ERROR("HuC FW (%uKiB) won't fit in GuC WOPCM (%uKiB).\n",
- huc_fw_size / 1024,
- (guc_wopcm_size - GUC_WOPCM_RESERVED) / 1024);
- return -E2BIG;
+ dev_err(i915->drm.dev, "WOPCM: no space for %s: %uK < %uK\n",
+ intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_HUC),
+ (guc_wopcm_size - GUC_WOPCM_RESERVED) / SZ_1K,
+ huc_fw_size / 1024);
+ return false;
}
- return 0;
+ return true;
}
-static inline int check_hw_restriction(struct drm_i915_private *i915,
- u32 guc_wopcm_base, u32 guc_wopcm_size,
- u32 huc_fw_size)
+static inline bool check_hw_restrictions(struct drm_i915_private *i915,
+ u32 guc_wopcm_base, u32 guc_wopcm_size,
+ u32 huc_fw_size)
{
- int err = 0;
+ if (IS_GEN(i915, 9) && !gen9_check_dword_gap(i915, guc_wopcm_base,
+ guc_wopcm_size))
+ return false;
- if (IS_GEN(i915, 9))
- err = gen9_check_dword_gap(guc_wopcm_base, guc_wopcm_size);
+ if ((IS_GEN(i915, 9) ||
+ IS_CNL_REVID(i915, CNL_REVID_A0, CNL_REVID_A0)) &&
+ !gen9_check_huc_fw_fits(i915, guc_wopcm_size, huc_fw_size))
+ return false;
- if (!err &&
- (IS_GEN(i915, 9) || IS_CNL_REVID(i915, CNL_REVID_A0, CNL_REVID_A0)))
- err = gen9_check_huc_fw_fits(guc_wopcm_size, huc_fw_size);
+ return true;
+}
- return err;
+static inline bool __check_layout(struct drm_i915_private *i915, u32 wopcm_size,
+ u32 guc_wopcm_base, u32 guc_wopcm_size,
+ u32 guc_fw_size, u32 huc_fw_size)
+{
+ const u32 ctx_rsvd = context_reserved_size(i915);
+ u32 size;
+
+ size = wopcm_size - ctx_rsvd;
+ if (unlikely(range_overflows(guc_wopcm_base, guc_wopcm_size, size))) {
+ dev_err(i915->drm.dev,
+ "WOPCM: invalid GuC region layout: %uK + %uK > %uK\n",
+ guc_wopcm_base / SZ_1K, guc_wopcm_size / SZ_1K,
+ size / SZ_1K);
+ return false;
+ }
+
+ size = guc_fw_size + GUC_WOPCM_RESERVED + GUC_WOPCM_STACK_RESERVED;
+ if (unlikely(guc_wopcm_size < size)) {
+ dev_err(i915->drm.dev, "WOPCM: no space for %s: %uK < %uK\n",
+ intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_GUC),
+ guc_wopcm_size / SZ_1K, size / SZ_1K);
+ return false;
+ }
+
+ size = huc_fw_size + WOPCM_RESERVED_SIZE;
+ if (unlikely(guc_wopcm_base < size)) {
+ dev_err(i915->drm.dev, "WOPCM: no space for %s: %uK < %uK\n",
+ intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_HUC),
+ guc_wopcm_base / SZ_1K, size / SZ_1K);
+ return false;
+ }
+
+ return check_hw_restrictions(i915, guc_wopcm_base, guc_wopcm_size,
+ huc_fw_size);
+}
+
+static bool __wopcm_regs_locked(struct intel_uncore *uncore,
+ u32 *guc_wopcm_base, u32 *guc_wopcm_size)
+{
+ u32 reg_base = intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET);
+ u32 reg_size = intel_uncore_read(uncore, GUC_WOPCM_SIZE);
+
+ if (!(reg_size & GUC_WOPCM_SIZE_LOCKED) ||
+ !(reg_base & GUC_WOPCM_OFFSET_VALID))
+ return false;
+
+ *guc_wopcm_base = reg_base & GUC_WOPCM_OFFSET_MASK;
+ *guc_wopcm_size = reg_size & GUC_WOPCM_SIZE_MASK;
+ return true;
}
/**
@@ -156,139 +215,66 @@ static inline int check_hw_restriction(struct drm_i915_private *i915,
* This function will partition WOPCM space based on GuC and HuC firmware sizes
* and will allocate max remaining for use by GuC. This function will also
* enforce platform dependent hardware restrictions on GuC WOPCM offset and
- * size. It will fail the WOPCM init if any of these checks were failed, so that
- * the following GuC firmware uploading would be aborted.
- *
- * Return: 0 on success, non-zero error code on failure.
+ * size. It will fail the WOPCM init if any of these checks fail, so that the
+ * following WOPCM registers setup and GuC firmware uploading would be aborted.
*/
-int intel_wopcm_init(struct intel_wopcm *wopcm)
+void intel_wopcm_init(struct intel_wopcm *wopcm)
{
struct drm_i915_private *i915 = wopcm_to_i915(wopcm);
- u32 guc_fw_size = intel_uc_fw_get_upload_size(&i915->gt.uc.guc.fw);
- u32 huc_fw_size = intel_uc_fw_get_upload_size(&i915->gt.uc.huc.fw);
+ struct intel_gt *gt = &i915->gt;
+ u32 guc_fw_size = intel_uc_fw_get_upload_size(&gt->uc.guc.fw);
+ u32 huc_fw_size = intel_uc_fw_get_upload_size(&gt->uc.huc.fw);
u32 ctx_rsvd = context_reserved_size(i915);
u32 guc_wopcm_base;
u32 guc_wopcm_size;
- u32 guc_wopcm_rsvd;
- int err;
- if (!USES_GUC(i915))
- return 0;
+ if (!guc_fw_size)
+ return;
GEM_BUG_ON(!wopcm->size);
+ GEM_BUG_ON(wopcm->guc.base);
+ GEM_BUG_ON(wopcm->guc.size);
+ GEM_BUG_ON(guc_fw_size >= wopcm->size);
+ GEM_BUG_ON(huc_fw_size >= wopcm->size);
+ GEM_BUG_ON(ctx_rsvd + WOPCM_RESERVED_SIZE >= wopcm->size);
- if (i915_inject_probe_failure())
- return -E2BIG;
+ if (i915_inject_probe_failure(i915))
+ return;
- if (guc_fw_size >= wopcm->size) {
- DRM_ERROR("GuC FW (%uKiB) is too big to fit in WOPCM.",
- guc_fw_size / 1024);
- return -E2BIG;
+ if (__wopcm_regs_locked(gt->uncore, &guc_wopcm_base, &guc_wopcm_size)) {
+ DRM_DEV_DEBUG_DRIVER(i915->drm.dev,
+ "GuC WOPCM is already locked [%uK, %uK)\n",
+ guc_wopcm_base / SZ_1K,
+ guc_wopcm_size / SZ_1K);
+ goto check;
}
- if (huc_fw_size >= wopcm->size) {
- DRM_ERROR("HuC FW (%uKiB) is too big to fit in WOPCM.",
- huc_fw_size / 1024);
- return -E2BIG;
- }
+ /*
+ * Aligned value of guc_wopcm_base will determine available WOPCM space
+ * for HuC firmware and mandatory reserved area.
+ */
+ guc_wopcm_base = huc_fw_size + WOPCM_RESERVED_SIZE;
+ guc_wopcm_base = ALIGN(guc_wopcm_base, GUC_WOPCM_OFFSET_ALIGNMENT);
- guc_wopcm_base = ALIGN(huc_fw_size + WOPCM_RESERVED_SIZE,
- GUC_WOPCM_OFFSET_ALIGNMENT);
- if ((guc_wopcm_base + ctx_rsvd) >= wopcm->size) {
- DRM_ERROR("GuC WOPCM base (%uKiB) is too big.\n",
- guc_wopcm_base / 1024);
- return -E2BIG;
- }
+ /*
+ * Need to clamp guc_wopcm_base now to make sure the following math is
+ * correct. Formal check of whole WOPCM layout will be done below.
+ */
+ guc_wopcm_base = min(guc_wopcm_base, wopcm->size - ctx_rsvd);
- guc_wopcm_size = wopcm->size - guc_wopcm_base - ctx_rsvd;
+ /* Aligned remainings of usable WOPCM space can be assigned to GuC. */
+ guc_wopcm_size = wopcm->size - ctx_rsvd - guc_wopcm_base;
guc_wopcm_size &= GUC_WOPCM_SIZE_MASK;
- DRM_DEBUG_DRIVER("Calculated GuC WOPCM Region: [%uKiB, %uKiB)\n",
- guc_wopcm_base / 1024, guc_wopcm_size / 1024);
+ DRM_DEV_DEBUG_DRIVER(i915->drm.dev, "Calculated GuC WOPCM [%uK, %uK)\n",
+ guc_wopcm_base / SZ_1K, guc_wopcm_size / SZ_1K);
- guc_wopcm_rsvd = GUC_WOPCM_RESERVED + GUC_WOPCM_STACK_RESERVED;
- if ((guc_fw_size + guc_wopcm_rsvd) > guc_wopcm_size) {
- DRM_ERROR("Need %uKiB WOPCM for GuC, %uKiB available.\n",
- (guc_fw_size + guc_wopcm_rsvd) / 1024,
- guc_wopcm_size / 1024);
- return -E2BIG;
+check:
+ if (__check_layout(i915, wopcm->size, guc_wopcm_base, guc_wopcm_size,
+ guc_fw_size, huc_fw_size)) {
+ wopcm->guc.base = guc_wopcm_base;
+ wopcm->guc.size = guc_wopcm_size;
+ GEM_BUG_ON(!wopcm->guc.base);
+ GEM_BUG_ON(!wopcm->guc.size);
}
-
- err = check_hw_restriction(i915, guc_wopcm_base, guc_wopcm_size,
- huc_fw_size);
- if (err)
- return err;
-
- wopcm->guc.base = guc_wopcm_base;
- wopcm->guc.size = guc_wopcm_size;
-
- return 0;
-}
-
-static int
-write_and_verify(struct intel_gt *gt,
- i915_reg_t reg, u32 val, u32 mask, u32 locked_bit)
-{
- struct intel_uncore *uncore = gt->uncore;
- u32 reg_val;
-
- GEM_BUG_ON(val & ~mask);
-
- intel_uncore_write(uncore, reg, val);
-
- reg_val = intel_uncore_read(uncore, reg);
-
- return (reg_val & mask) != (val | locked_bit) ? -EIO : 0;
-}
-
-/**
- * intel_wopcm_init_hw() - Setup GuC WOPCM registers.
- * @wopcm: pointer to intel_wopcm.
- * @gt: pointer to the containing GT
- *
- * Setup the GuC WOPCM size and offset registers with the calculated values. It
- * will verify the register values to make sure the registers are locked with
- * correct values.
- *
- * Return: 0 on success. -EIO if registers were locked with incorrect values.
- */
-int intel_wopcm_init_hw(struct intel_wopcm *wopcm, struct intel_gt *gt)
-{
- struct drm_i915_private *i915 = wopcm_to_i915(wopcm);
- struct intel_uncore *uncore = gt->uncore;
- u32 huc_agent;
- u32 mask;
- int err;
-
- if (!USES_GUC(i915))
- return 0;
-
- GEM_BUG_ON(!HAS_GT_UC(i915));
- GEM_BUG_ON(!wopcm->guc.size);
- GEM_BUG_ON(!wopcm->guc.base);
-
- err = write_and_verify(gt, GUC_WOPCM_SIZE, wopcm->guc.size,
- GUC_WOPCM_SIZE_MASK | GUC_WOPCM_SIZE_LOCKED,
- GUC_WOPCM_SIZE_LOCKED);
- if (err)
- goto err_out;
-
- huc_agent = USES_HUC(i915) ? HUC_LOADING_AGENT_GUC : 0;
- mask = GUC_WOPCM_OFFSET_MASK | GUC_WOPCM_OFFSET_VALID | huc_agent;
- err = write_and_verify(gt, DMA_GUC_WOPCM_OFFSET,
- wopcm->guc.base | huc_agent, mask,
- GUC_WOPCM_OFFSET_VALID);
- if (err)
- goto err_out;
-
- return 0;
-
-err_out:
- DRM_ERROR("Failed to init WOPCM registers:\n");
- DRM_ERROR("DMA_GUC_WOPCM_OFFSET=%#x\n",
- intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET));
- DRM_ERROR("GUC_WOPCM_SIZE=%#x\n",
- intel_uncore_read(uncore, GUC_WOPCM_SIZE));
-
- return err;
}
diff --git a/drivers/gpu/drm/i915/intel_wopcm.h b/drivers/gpu/drm/i915/intel_wopcm.h
index 56aaed4d64ff..17d6aa86008a 100644
--- a/drivers/gpu/drm/i915/intel_wopcm.h
+++ b/drivers/gpu/drm/i915/intel_wopcm.h
@@ -9,8 +9,6 @@
#include <linux/types.h>
-struct intel_gt;
-
/**
* struct intel_wopcm - Overall WOPCM info and WOPCM regions.
* @size: Size of overall WOPCM.
@@ -27,6 +25,21 @@ struct intel_wopcm {
};
/**
+ * intel_wopcm_guc_base()
+ * @wopcm: intel_wopcm structure
+ *
+ * Returns the base of the WOPCM shadowed region.
+ *
+ * Returns:
+ * 0 if GuC is not present or not in use.
+ * Otherwise, the GuC WOPCM base.
+ */
+static inline u32 intel_wopcm_guc_base(struct intel_wopcm *wopcm)
+{
+ return wopcm->guc.base;
+}
+
+/**
* intel_wopcm_guc_size()
* @wopcm: intel_wopcm structure
*
@@ -42,7 +55,6 @@ static inline u32 intel_wopcm_guc_size(struct intel_wopcm *wopcm)
}
void intel_wopcm_init_early(struct intel_wopcm *wopcm);
-int intel_wopcm_init(struct intel_wopcm *wopcm);
-int intel_wopcm_init_hw(struct intel_wopcm *wopcm, struct intel_gt *gt);
+void intel_wopcm_init(struct intel_wopcm *wopcm);
#endif
diff --git a/drivers/gpu/drm/i915/oa/Makefile b/drivers/gpu/drm/i915/oa/Makefile
index e69de29bb2d1..df028e2b0d64 100644
--- a/drivers/gpu/drm/i915/oa/Makefile
+++ b/drivers/gpu/drm/i915/oa/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: MIT
+
+# For building individual subdir files on the command line
+subdir-ccflags-y += -I$(srctree)/$(src)/..
+
+# Extra header tests
+header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_bdw.c b/drivers/gpu/drm/i915/oa/i915_oa_bdw.c
index 4acdb94555b7..14da5c3b569d 100644
--- a/drivers/gpu/drm/i915/oa/i915_oa_bdw.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_bdw.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -66,26 +65,26 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_bdw(struct drm_i915_private *dev_priv)
{
- strlcpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.test_config.uuid,
"d6de6f55-e526-4f79-a6a6-d7315c09044e",
- sizeof(dev_priv->perf.oa.test_config.uuid));
- dev_priv->perf.oa.test_config.id = 1;
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+ dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "d6de6f55-e526-4f79-a6a6-d7315c09044e";
- dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+ dev_priv->perf.test_config.sysfs_metric.name = "d6de6f55-e526-4f79-a6a6-d7315c09044e";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_bdw.h b/drivers/gpu/drm/i915/oa/i915_oa_bdw.h
index b5ed68882588..0cee3334f0a6 100644
--- a/drivers/gpu/drm/i915/oa/i915_oa_bdw.h
+++ b/drivers/gpu/drm/i915/oa/i915_oa_bdw.h
@@ -1,7 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -10,6 +9,8 @@
#ifndef __I915_OA_BDW_H__
#define __I915_OA_BDW_H__
+struct drm_i915_private;
+
void i915_perf_load_test_config_bdw(struct drm_i915_private *dev_priv);
#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_bxt.c b/drivers/gpu/drm/i915/oa/i915_oa_bxt.c
index a44195c39923..3e785bafcf99 100644
--- a/drivers/gpu/drm/i915/oa/i915_oa_bxt.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_bxt.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -64,26 +63,26 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_bxt(struct drm_i915_private *dev_priv)
{
- strlcpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.test_config.uuid,
"5ee72f5c-092f-421e-8b70-225f7c3e9612",
- sizeof(dev_priv->perf.oa.test_config.uuid));
- dev_priv->perf.oa.test_config.id = 1;
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+ dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "5ee72f5c-092f-421e-8b70-225f7c3e9612";
- dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+ dev_priv->perf.test_config.sysfs_metric.name = "5ee72f5c-092f-421e-8b70-225f7c3e9612";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_bxt.h b/drivers/gpu/drm/i915/oa/i915_oa_bxt.h
index 43c3e4ab030a..0bdf391323ec 100644
--- a/drivers/gpu/drm/i915/oa/i915_oa_bxt.h
+++ b/drivers/gpu/drm/i915/oa/i915_oa_bxt.h
@@ -1,7 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -10,6 +9,8 @@
#ifndef __I915_OA_BXT_H__
#define __I915_OA_BXT_H__
+struct drm_i915_private;
+
void i915_perf_load_test_config_bxt(struct drm_i915_private *dev_priv);
#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c b/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c
index 7f60d51b8761..0ea86f70a06c 100644
--- a/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -65,26 +64,26 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_cflgt2(struct drm_i915_private *dev_priv)
{
- strlcpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.test_config.uuid,
"74fb4902-d3d3-4237-9e90-cbdc68d0a446",
- sizeof(dev_priv->perf.oa.test_config.uuid));
- dev_priv->perf.oa.test_config.id = 1;
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+ dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "74fb4902-d3d3-4237-9e90-cbdc68d0a446";
- dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+ dev_priv->perf.test_config.sysfs_metric.name = "74fb4902-d3d3-4237-9e90-cbdc68d0a446";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h b/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h
index 1b4b563bc585..6b862280ab78 100644
--- a/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h
+++ b/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h
@@ -1,7 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -10,6 +9,8 @@
#ifndef __I915_OA_CFLGT2_H__
#define __I915_OA_CFLGT2_H__
+struct drm_i915_private;
+
void i915_perf_load_test_config_cflgt2(struct drm_i915_private *dev_priv);
#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c b/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c
index a92c38e3a0ce..fc632dd890bf 100644
--- a/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -65,26 +64,26 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv)
{
- strlcpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.test_config.uuid,
"577e8e2c-3fa0-4875-8743-3538d585e3b0",
- sizeof(dev_priv->perf.oa.test_config.uuid));
- dev_priv->perf.oa.test_config.id = 1;
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+ dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "577e8e2c-3fa0-4875-8743-3538d585e3b0";
- dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+ dev_priv->perf.test_config.sysfs_metric.name = "577e8e2c-3fa0-4875-8743-3538d585e3b0";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h b/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h
index 500565e055cd..4ca9d8f89b2f 100644
--- a/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h
+++ b/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h
@@ -1,7 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -10,6 +9,8 @@
#ifndef __I915_OA_CFLGT3_H__
#define __I915_OA_CFLGT3_H__
+struct drm_i915_private;
+
void i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv);
#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_chv.c b/drivers/gpu/drm/i915/oa/i915_oa_chv.c
index 71ec889a0114..6cd4e9921a8a 100644
--- a/drivers/gpu/drm/i915/oa/i915_oa_chv.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_chv.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -65,26 +64,26 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_chv(struct drm_i915_private *dev_priv)
{
- strlcpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.test_config.uuid,
"4a534b07-cba3-414d-8d60-874830e883aa",
- sizeof(dev_priv->perf.oa.test_config.uuid));
- dev_priv->perf.oa.test_config.id = 1;
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+ dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "4a534b07-cba3-414d-8d60-874830e883aa";
- dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+ dev_priv->perf.test_config.sysfs_metric.name = "4a534b07-cba3-414d-8d60-874830e883aa";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_chv.h b/drivers/gpu/drm/i915/oa/i915_oa_chv.h
index ad85d6a6a573..3cac7bbc9c71 100644
--- a/drivers/gpu/drm/i915/oa/i915_oa_chv.h
+++ b/drivers/gpu/drm/i915/oa/i915_oa_chv.h
@@ -1,7 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -10,6 +9,8 @@
#ifndef __I915_OA_CHV_H__
#define __I915_OA_CHV_H__
+struct drm_i915_private;
+
void i915_perf_load_test_config_chv(struct drm_i915_private *dev_priv);
#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cnl.c b/drivers/gpu/drm/i915/oa/i915_oa_cnl.c
index 5c23d883d6c9..1041e8914993 100644
--- a/drivers/gpu/drm/i915/oa/i915_oa_cnl.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_cnl.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -77,26 +76,26 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv)
{
- strlcpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.test_config.uuid,
"db41edd4-d8e7-4730-ad11-b9a2d6833503",
- sizeof(dev_priv->perf.oa.test_config.uuid));
- dev_priv->perf.oa.test_config.id = 1;
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+ dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "db41edd4-d8e7-4730-ad11-b9a2d6833503";
- dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+ dev_priv->perf.test_config.sysfs_metric.name = "db41edd4-d8e7-4730-ad11-b9a2d6833503";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cnl.h b/drivers/gpu/drm/i915/oa/i915_oa_cnl.h
index 9faaca38b587..db379f5fcbb9 100644
--- a/drivers/gpu/drm/i915/oa/i915_oa_cnl.h
+++ b/drivers/gpu/drm/i915/oa/i915_oa_cnl.h
@@ -1,7 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -10,6 +9,8 @@
#ifndef __I915_OA_CNL_H__
#define __I915_OA_CNL_H__
+struct drm_i915_private;
+
void i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv);
#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_glk.c b/drivers/gpu/drm/i915/oa/i915_oa_glk.c
index 4bdda66df7d2..bd15ebe9aeeb 100644
--- a/drivers/gpu/drm/i915/oa/i915_oa_glk.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_glk.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -64,26 +63,26 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_glk(struct drm_i915_private *dev_priv)
{
- strlcpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.test_config.uuid,
"dd3fd789-e783-4204-8cd0-b671bbccb0cf",
- sizeof(dev_priv->perf.oa.test_config.uuid));
- dev_priv->perf.oa.test_config.id = 1;
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+ dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "dd3fd789-e783-4204-8cd0-b671bbccb0cf";
- dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+ dev_priv->perf.test_config.sysfs_metric.name = "dd3fd789-e783-4204-8cd0-b671bbccb0cf";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_glk.h b/drivers/gpu/drm/i915/oa/i915_oa_glk.h
index cc13a1e9fd3e..779f343efd11 100644
--- a/drivers/gpu/drm/i915/oa/i915_oa_glk.h
+++ b/drivers/gpu/drm/i915/oa/i915_oa_glk.h
@@ -1,7 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -10,6 +9,8 @@
#ifndef __I915_OA_GLK_H__
#define __I915_OA_GLK_H__
+struct drm_i915_private;
+
void i915_perf_load_test_config_glk(struct drm_i915_private *dev_priv);
#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_hsw.c b/drivers/gpu/drm/i915/oa/i915_oa_hsw.c
index cc6526fdd2bd..133721a8619f 100644
--- a/drivers/gpu/drm/i915/oa/i915_oa_hsw.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_hsw.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -94,26 +93,26 @@ show_render_basic_id(struct device *kdev, struct device_attribute *attr, char *b
void
i915_perf_load_test_config_hsw(struct drm_i915_private *dev_priv)
{
- strlcpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.test_config.uuid,
"403d8832-1a27-4aa6-a64e-f5389ce7b212",
- sizeof(dev_priv->perf.oa.test_config.uuid));
- dev_priv->perf.oa.test_config.id = 1;
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_render_basic;
- dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_render_basic);
+ dev_priv->perf.test_config.mux_regs = mux_config_render_basic;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_render_basic);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_render_basic;
- dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_render_basic);
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_render_basic;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_render_basic);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_render_basic;
- dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_render_basic);
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_render_basic;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_render_basic);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "403d8832-1a27-4aa6-a64e-f5389ce7b212";
- dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+ dev_priv->perf.test_config.sysfs_metric.name = "403d8832-1a27-4aa6-a64e-f5389ce7b212";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_render_basic_id;
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_render_basic_id;
}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_hsw.h b/drivers/gpu/drm/i915/oa/i915_oa_hsw.h
index f0ddcc79c761..ba97f732f136 100644
--- a/drivers/gpu/drm/i915/oa/i915_oa_hsw.h
+++ b/drivers/gpu/drm/i915/oa/i915_oa_hsw.h
@@ -1,7 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -10,6 +9,8 @@
#ifndef __I915_OA_HSW_H__
#define __I915_OA_HSW_H__
+struct drm_i915_private;
+
void i915_perf_load_test_config_hsw(struct drm_i915_private *dev_priv);
#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_icl.c b/drivers/gpu/drm/i915/oa/i915_oa_icl.c
index baa51427a543..2d92041b754f 100644
--- a/drivers/gpu/drm/i915/oa/i915_oa_icl.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_icl.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -74,26 +73,26 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_icl(struct drm_i915_private *dev_priv)
{
- strlcpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.test_config.uuid,
"a291665e-244b-4b76-9b9a-01de9d3c8068",
- sizeof(dev_priv->perf.oa.test_config.uuid));
- dev_priv->perf.oa.test_config.id = 1;
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+ dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "a291665e-244b-4b76-9b9a-01de9d3c8068";
- dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+ dev_priv->perf.test_config.sysfs_metric.name = "a291665e-244b-4b76-9b9a-01de9d3c8068";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_icl.h b/drivers/gpu/drm/i915/oa/i915_oa_icl.h
index e501651d385b..5c64112d720e 100644
--- a/drivers/gpu/drm/i915/oa/i915_oa_icl.h
+++ b/drivers/gpu/drm/i915/oa/i915_oa_icl.h
@@ -1,7 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -10,6 +9,8 @@
#ifndef __I915_OA_ICL_H__
#define __I915_OA_ICL_H__
+struct drm_i915_private;
+
void i915_perf_load_test_config_icl(struct drm_i915_private *dev_priv);
#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c b/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c
index 168e49ab0d4d..1c3a67c9cfe0 100644
--- a/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -65,26 +64,26 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_kblgt2(struct drm_i915_private *dev_priv)
{
- strlcpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.test_config.uuid,
"baa3c7e4-52b6-4b85-801e-465a94b746dd",
- sizeof(dev_priv->perf.oa.test_config.uuid));
- dev_priv->perf.oa.test_config.id = 1;
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+ dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "baa3c7e4-52b6-4b85-801e-465a94b746dd";
- dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+ dev_priv->perf.test_config.sysfs_metric.name = "baa3c7e4-52b6-4b85-801e-465a94b746dd";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h b/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h
index dc460e6e0fae..810532fa6b63 100644
--- a/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h
+++ b/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h
@@ -1,7 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -10,6 +9,8 @@
#ifndef __I915_OA_KBLGT2_H__
#define __I915_OA_KBLGT2_H__
+struct drm_i915_private;
+
void i915_perf_load_test_config_kblgt2(struct drm_i915_private *dev_priv);
#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c b/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c
index 6ffa553c388e..ebbe5a9c9fdc 100644
--- a/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -65,26 +64,26 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_kblgt3(struct drm_i915_private *dev_priv)
{
- strlcpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.test_config.uuid,
"f1792f32-6db2-4b50-b4b2-557128f1688d",
- sizeof(dev_priv->perf.oa.test_config.uuid));
- dev_priv->perf.oa.test_config.id = 1;
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+ dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "f1792f32-6db2-4b50-b4b2-557128f1688d";
- dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+ dev_priv->perf.test_config.sysfs_metric.name = "f1792f32-6db2-4b50-b4b2-557128f1688d";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h b/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h
index 5926992b735a..13d70456fabd 100644
--- a/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h
+++ b/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h
@@ -1,7 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -10,6 +9,8 @@
#ifndef __I915_OA_KBLGT3_H__
#define __I915_OA_KBLGT3_H__
+struct drm_i915_private;
+
void i915_perf_load_test_config_kblgt3(struct drm_i915_private *dev_priv);
#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c b/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c
index 7ce6ee851d43..1bc359ed34e8 100644
--- a/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -64,26 +63,26 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_sklgt2(struct drm_i915_private *dev_priv)
{
- strlcpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.test_config.uuid,
"1651949f-0ac0-4cb1-a06f-dafd74a407d1",
- sizeof(dev_priv->perf.oa.test_config.uuid));
- dev_priv->perf.oa.test_config.id = 1;
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+ dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "1651949f-0ac0-4cb1-a06f-dafd74a407d1";
- dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+ dev_priv->perf.test_config.sysfs_metric.name = "1651949f-0ac0-4cb1-a06f-dafd74a407d1";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h b/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h
index 353db35b36c1..fda70c51a6ec 100644
--- a/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h
+++ b/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h
@@ -1,7 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -10,6 +9,8 @@
#ifndef __I915_OA_SKLGT2_H__
#define __I915_OA_SKLGT2_H__
+struct drm_i915_private;
+
void i915_perf_load_test_config_sklgt2(struct drm_i915_private *dev_priv);
#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c b/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c
index 086ca2631e1c..6e352f881310 100644
--- a/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -65,26 +64,26 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_sklgt3(struct drm_i915_private *dev_priv)
{
- strlcpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.test_config.uuid,
"2b985803-d3c9-4629-8a4f-634bfecba0e8",
- sizeof(dev_priv->perf.oa.test_config.uuid));
- dev_priv->perf.oa.test_config.id = 1;
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+ dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "2b985803-d3c9-4629-8a4f-634bfecba0e8";
- dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+ dev_priv->perf.test_config.sysfs_metric.name = "2b985803-d3c9-4629-8a4f-634bfecba0e8";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h b/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h
index 52f94c674b62..df74eba5799e 100644
--- a/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h
+++ b/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h
@@ -1,7 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -10,6 +9,8 @@
#ifndef __I915_OA_SKLGT3_H__
#define __I915_OA_SKLGT3_H__
+struct drm_i915_private;
+
void i915_perf_load_test_config_sklgt3(struct drm_i915_private *dev_priv);
#endif
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c b/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c
index b291a6eb8a87..8f345115a306 100644
--- a/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: MIT
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -65,26 +64,26 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_sklgt4(struct drm_i915_private *dev_priv)
{
- strlcpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.test_config.uuid,
"882fa433-1f4a-4a67-a962-c741888fe5f5",
- sizeof(dev_priv->perf.oa.test_config.uuid));
- dev_priv->perf.oa.test_config.id = 1;
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
- dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
- dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+ dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
- dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
- dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
- dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
- dev_priv->perf.oa.test_config.sysfs_metric.name = "882fa433-1f4a-4a67-a962-c741888fe5f5";
- dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+ dev_priv->perf.test_config.sysfs_metric.name = "882fa433-1f4a-4a67-a962-c741888fe5f5";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
- dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
- dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
- dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h b/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h
index 8e364820cc63..378ab7ab78d5 100644
--- a/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h
+++ b/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h
@@ -1,7 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
+ * Copyright © 2018-2019 Intel Corporation
*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
@@ -10,6 +9,8 @@
#ifndef __I915_OA_SKLGT4_H__
#define __I915_OA_SKLGT4_H__
+struct drm_i915_private;
+
void i915_perf_load_test_config_sklgt4(struct drm_i915_private *dev_priv);
#endif
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
index e5cd5d47e380..77d844ac8b71 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -110,8 +110,7 @@ __live_active_setup(struct drm_i915_private *i915)
submit,
GFP_KERNEL);
if (err >= 0)
- err = i915_active_ref(&active->base,
- rq->fence.context, rq);
+ err = i915_active_ref(&active->base, rq->timeline, rq);
i915_request_add(rq);
if (err) {
pr_err("Failed to track active ref!\n");
diff --git a/drivers/gpu/drm/i915/selftests/i915_buddy.c b/drivers/gpu/drm/i915/selftests/i915_buddy.c
new file mode 100644
index 000000000000..23f784eae1e7
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_buddy.c
@@ -0,0 +1,720 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/prime_numbers.h>
+
+#include "../i915_selftest.h"
+#include "i915_random.h"
+
+#define SZ_8G (1ULL << 33)
+
+static void __igt_dump_block(struct i915_buddy_mm *mm,
+ struct i915_buddy_block *block,
+ bool buddy)
+{
+ pr_err("block info: header=%llx, state=%u, order=%d, offset=%llx size=%llx root=%s buddy=%s\n",
+ block->header,
+ i915_buddy_block_state(block),
+ i915_buddy_block_order(block),
+ i915_buddy_block_offset(block),
+ i915_buddy_block_size(mm, block),
+ yesno(!block->parent),
+ yesno(buddy));
+}
+
+static void igt_dump_block(struct i915_buddy_mm *mm,
+ struct i915_buddy_block *block)
+{
+ struct i915_buddy_block *buddy;
+
+ __igt_dump_block(mm, block, false);
+
+ buddy = get_buddy(block);
+ if (buddy)
+ __igt_dump_block(mm, buddy, true);
+}
+
+static int igt_check_block(struct i915_buddy_mm *mm,
+ struct i915_buddy_block *block)
+{
+ struct i915_buddy_block *buddy;
+ unsigned int block_state;
+ u64 block_size;
+ u64 offset;
+ int err = 0;
+
+ block_state = i915_buddy_block_state(block);
+
+ if (block_state != I915_BUDDY_ALLOCATED &&
+ block_state != I915_BUDDY_FREE &&
+ block_state != I915_BUDDY_SPLIT) {
+ pr_err("block state mismatch\n");
+ err = -EINVAL;
+ }
+
+ block_size = i915_buddy_block_size(mm, block);
+ offset = i915_buddy_block_offset(block);
+
+ if (block_size < mm->chunk_size) {
+ pr_err("block size smaller than min size\n");
+ err = -EINVAL;
+ }
+
+ if (!is_power_of_2(block_size)) {
+ pr_err("block size not power of two\n");
+ err = -EINVAL;
+ }
+
+ if (!IS_ALIGNED(block_size, mm->chunk_size)) {
+ pr_err("block size not aligned to min size\n");
+ err = -EINVAL;
+ }
+
+ if (!IS_ALIGNED(offset, mm->chunk_size)) {
+ pr_err("block offset not aligned to min size\n");
+ err = -EINVAL;
+ }
+
+ if (!IS_ALIGNED(offset, block_size)) {
+ pr_err("block offset not aligned to block size\n");
+ err = -EINVAL;
+ }
+
+ buddy = get_buddy(block);
+
+ if (!buddy && block->parent) {
+ pr_err("buddy has gone fishing\n");
+ err = -EINVAL;
+ }
+
+ if (buddy) {
+ if (i915_buddy_block_offset(buddy) != (offset ^ block_size)) {
+ pr_err("buddy has wrong offset\n");
+ err = -EINVAL;
+ }
+
+ if (i915_buddy_block_size(mm, buddy) != block_size) {
+ pr_err("buddy size mismatch\n");
+ err = -EINVAL;
+ }
+
+ if (i915_buddy_block_state(buddy) == block_state &&
+ block_state == I915_BUDDY_FREE) {
+ pr_err("block and its buddy are free\n");
+ err = -EINVAL;
+ }
+ }
+
+ return err;
+}
+
+static int igt_check_blocks(struct i915_buddy_mm *mm,
+ struct list_head *blocks,
+ u64 expected_size,
+ bool is_contiguous)
+{
+ struct i915_buddy_block *block;
+ struct i915_buddy_block *prev;
+ u64 total;
+ int err = 0;
+
+ block = NULL;
+ prev = NULL;
+ total = 0;
+
+ list_for_each_entry(block, blocks, link) {
+ err = igt_check_block(mm, block);
+
+ if (!i915_buddy_block_is_allocated(block)) {
+ pr_err("block not allocated\n"),
+ err = -EINVAL;
+ }
+
+ if (is_contiguous && prev) {
+ u64 prev_block_size;
+ u64 prev_offset;
+ u64 offset;
+
+ prev_offset = i915_buddy_block_offset(prev);
+ prev_block_size = i915_buddy_block_size(mm, prev);
+ offset = i915_buddy_block_offset(block);
+
+ if (offset != (prev_offset + prev_block_size)) {
+ pr_err("block offset mismatch\n");
+ err = -EINVAL;
+ }
+ }
+
+ if (err)
+ break;
+
+ total += i915_buddy_block_size(mm, block);
+ prev = block;
+ }
+
+ if (!err) {
+ if (total != expected_size) {
+ pr_err("size mismatch, expected=%llx, found=%llx\n",
+ expected_size, total);
+ err = -EINVAL;
+ }
+ return err;
+ }
+
+ if (prev) {
+ pr_err("prev block, dump:\n");
+ igt_dump_block(mm, prev);
+ }
+
+ if (block) {
+ pr_err("bad block, dump:\n");
+ igt_dump_block(mm, block);
+ }
+
+ return err;
+}
+
+static int igt_check_mm(struct i915_buddy_mm *mm)
+{
+ struct i915_buddy_block *root;
+ struct i915_buddy_block *prev;
+ unsigned int i;
+ u64 total;
+ int err = 0;
+
+ if (!mm->n_roots) {
+ pr_err("n_roots is zero\n");
+ return -EINVAL;
+ }
+
+ if (mm->n_roots != hweight64(mm->size)) {
+ pr_err("n_roots mismatch, n_roots=%u, expected=%lu\n",
+ mm->n_roots, hweight64(mm->size));
+ return -EINVAL;
+ }
+
+ root = NULL;
+ prev = NULL;
+ total = 0;
+
+ for (i = 0; i < mm->n_roots; ++i) {
+ struct i915_buddy_block *block;
+ unsigned int order;
+
+ root = mm->roots[i];
+ if (!root) {
+ pr_err("root(%u) is NULL\n", i);
+ err = -EINVAL;
+ break;
+ }
+
+ err = igt_check_block(mm, root);
+
+ if (!i915_buddy_block_is_free(root)) {
+ pr_err("root not free\n");
+ err = -EINVAL;
+ }
+
+ order = i915_buddy_block_order(root);
+
+ if (!i) {
+ if (order != mm->max_order) {
+ pr_err("max order root missing\n");
+ err = -EINVAL;
+ }
+ }
+
+ if (prev) {
+ u64 prev_block_size;
+ u64 prev_offset;
+ u64 offset;
+
+ prev_offset = i915_buddy_block_offset(prev);
+ prev_block_size = i915_buddy_block_size(mm, prev);
+ offset = i915_buddy_block_offset(root);
+
+ if (offset != (prev_offset + prev_block_size)) {
+ pr_err("root offset mismatch\n");
+ err = -EINVAL;
+ }
+ }
+
+ block = list_first_entry_or_null(&mm->free_list[order],
+ struct i915_buddy_block,
+ link);
+ if (block != root) {
+ pr_err("root mismatch at order=%u\n", order);
+ err = -EINVAL;
+ }
+
+ if (err)
+ break;
+
+ prev = root;
+ total += i915_buddy_block_size(mm, root);
+ }
+
+ if (!err) {
+ if (total != mm->size) {
+ pr_err("expected mm size=%llx, found=%llx\n", mm->size,
+ total);
+ err = -EINVAL;
+ }
+ return err;
+ }
+
+ if (prev) {
+ pr_err("prev root(%u), dump:\n", i - 1);
+ igt_dump_block(mm, prev);
+ }
+
+ if (root) {
+ pr_err("bad root(%u), dump:\n", i);
+ igt_dump_block(mm, root);
+ }
+
+ return err;
+}
+
+static void igt_mm_config(u64 *size, u64 *chunk_size)
+{
+ I915_RND_STATE(prng);
+ u64 s, ms;
+
+ /* Nothing fancy, just try to get an interesting bit pattern */
+
+ prandom_seed_state(&prng, i915_selftest.random_seed);
+
+ s = i915_prandom_u64_state(&prng) & (SZ_8G - 1);
+ ms = BIT_ULL(12 + (prandom_u32_state(&prng) % ilog2(s >> 12)));
+ s = max(s & -ms, ms);
+
+ *chunk_size = ms;
+ *size = s;
+}
+
+static int igt_buddy_alloc_smoke(void *arg)
+{
+ struct i915_buddy_mm mm;
+ int max_order;
+ u64 chunk_size;
+ u64 mm_size;
+ int err;
+
+ igt_mm_config(&mm_size, &chunk_size);
+
+ pr_info("buddy_init with size=%llx, chunk_size=%llx\n", mm_size, chunk_size);
+
+ err = i915_buddy_init(&mm, mm_size, chunk_size);
+ if (err) {
+ pr_err("buddy_init failed(%d)\n", err);
+ return err;
+ }
+
+ for (max_order = mm.max_order; max_order >= 0; max_order--) {
+ struct i915_buddy_block *block;
+ int order;
+ LIST_HEAD(blocks);
+ u64 total;
+
+ err = igt_check_mm(&mm);
+ if (err) {
+ pr_err("pre-mm check failed, abort\n");
+ break;
+ }
+
+ pr_info("filling from max_order=%u\n", max_order);
+
+ order = max_order;
+ total = 0;
+
+ do {
+retry:
+ block = i915_buddy_alloc(&mm, order);
+ if (IS_ERR(block)) {
+ err = PTR_ERR(block);
+ if (err == -ENOMEM) {
+ pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
+ order);
+ } else {
+ if (order--) {
+ err = 0;
+ goto retry;
+ }
+
+ pr_err("buddy_alloc with order=%d failed(%d)\n",
+ order, err);
+ }
+
+ break;
+ }
+
+ list_add_tail(&block->link, &blocks);
+
+ if (i915_buddy_block_order(block) != order) {
+ pr_err("buddy_alloc order mismatch\n");
+ err = -EINVAL;
+ break;
+ }
+
+ total += i915_buddy_block_size(&mm, block);
+ } while (total < mm.size);
+
+ if (!err)
+ err = igt_check_blocks(&mm, &blocks, total, false);
+
+ i915_buddy_free_list(&mm, &blocks);
+
+ if (!err) {
+ err = igt_check_mm(&mm);
+ if (err)
+ pr_err("post-mm check failed\n");
+ }
+
+ if (err)
+ break;
+ }
+
+ if (err == -ENOMEM)
+ err = 0;
+
+ i915_buddy_fini(&mm);
+
+ return err;
+}
+
+static int igt_buddy_alloc_pessimistic(void *arg)
+{
+ const unsigned int max_order = 16;
+ struct i915_buddy_block *block, *bn;
+ struct i915_buddy_mm mm;
+ unsigned int order;
+ LIST_HEAD(blocks);
+ int err;
+
+ /*
+ * Create a pot-sized mm, then allocate one of each possible
+ * order within. This should leave the mm with exactly one
+ * page left.
+ */
+
+ err = i915_buddy_init(&mm, PAGE_SIZE << max_order, PAGE_SIZE);
+ if (err) {
+ pr_err("buddy_init failed(%d)\n", err);
+ return err;
+ }
+ GEM_BUG_ON(mm.max_order != max_order);
+
+ for (order = 0; order < max_order; order++) {
+ block = i915_buddy_alloc(&mm, order);
+ if (IS_ERR(block)) {
+ pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
+ order);
+ err = PTR_ERR(block);
+ goto err;
+ }
+
+ list_add_tail(&block->link, &blocks);
+ }
+
+ /* And now the last remaining block available */
+ block = i915_buddy_alloc(&mm, 0);
+ if (IS_ERR(block)) {
+ pr_info("buddy_alloc hit -ENOMEM on final alloc\n");
+ err = PTR_ERR(block);
+ goto err;
+ }
+ list_add_tail(&block->link, &blocks);
+
+ /* Should be completely full! */
+ for (order = max_order; order--; ) {
+ block = i915_buddy_alloc(&mm, order);
+ if (!IS_ERR(block)) {
+ pr_info("buddy_alloc unexpectedly succeeded at order %d, it should be full!",
+ order);
+ list_add_tail(&block->link, &blocks);
+ err = -EINVAL;
+ goto err;
+ }
+ }
+
+ block = list_last_entry(&blocks, typeof(*block), link);
+ list_del(&block->link);
+ i915_buddy_free(&mm, block);
+
+ /* As we free in increasing size, we make available larger blocks */
+ order = 1;
+ list_for_each_entry_safe(block, bn, &blocks, link) {
+ list_del(&block->link);
+ i915_buddy_free(&mm, block);
+
+ block = i915_buddy_alloc(&mm, order);
+ if (IS_ERR(block)) {
+ pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
+ order);
+ err = PTR_ERR(block);
+ goto err;
+ }
+ i915_buddy_free(&mm, block);
+ order++;
+ }
+
+ /* To confirm, now the whole mm should be available */
+ block = i915_buddy_alloc(&mm, max_order);
+ if (IS_ERR(block)) {
+ pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
+ max_order);
+ err = PTR_ERR(block);
+ goto err;
+ }
+ i915_buddy_free(&mm, block);
+
+err:
+ i915_buddy_free_list(&mm, &blocks);
+ i915_buddy_fini(&mm);
+ return err;
+}
+
+static int igt_buddy_alloc_optimistic(void *arg)
+{
+ const int max_order = 16;
+ struct i915_buddy_block *block;
+ struct i915_buddy_mm mm;
+ LIST_HEAD(blocks);
+ int order;
+ int err;
+
+ /*
+ * Create a mm with one block of each order available, and
+ * try to allocate them all.
+ */
+
+ err = i915_buddy_init(&mm,
+ PAGE_SIZE * ((1 << (max_order + 1)) - 1),
+ PAGE_SIZE);
+ if (err) {
+ pr_err("buddy_init failed(%d)\n", err);
+ return err;
+ }
+ GEM_BUG_ON(mm.max_order != max_order);
+
+ for (order = 0; order <= max_order; order++) {
+ block = i915_buddy_alloc(&mm, order);
+ if (IS_ERR(block)) {
+ pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
+ order);
+ err = PTR_ERR(block);
+ goto err;
+ }
+
+ list_add_tail(&block->link, &blocks);
+ }
+
+ /* Should be completely full! */
+ block = i915_buddy_alloc(&mm, 0);
+ if (!IS_ERR(block)) {
+ pr_info("buddy_alloc unexpectedly succeeded, it should be full!");
+ list_add_tail(&block->link, &blocks);
+ err = -EINVAL;
+ goto err;
+ }
+
+err:
+ i915_buddy_free_list(&mm, &blocks);
+ i915_buddy_fini(&mm);
+ return err;
+}
+
+static int igt_buddy_alloc_pathological(void *arg)
+{
+ const int max_order = 16;
+ struct i915_buddy_block *block;
+ struct i915_buddy_mm mm;
+ LIST_HEAD(blocks);
+ LIST_HEAD(holes);
+ int order, top;
+ int err;
+
+ /*
+ * Create a pot-sized mm, then allocate one of each possible
+ * order within. This should leave the mm with exactly one
+ * page left. Free the largest block, then whittle down again.
+ * Eventually we will have a fully 50% fragmented mm.
+ */
+
+ err = i915_buddy_init(&mm, PAGE_SIZE << max_order, PAGE_SIZE);
+ if (err) {
+ pr_err("buddy_init failed(%d)\n", err);
+ return err;
+ }
+ GEM_BUG_ON(mm.max_order != max_order);
+
+ for (top = max_order; top; top--) {
+ /* Make room by freeing the largest allocated block */
+ block = list_first_entry_or_null(&blocks, typeof(*block), link);
+ if (block) {
+ list_del(&block->link);
+ i915_buddy_free(&mm, block);
+ }
+
+ for (order = top; order--; ) {
+ block = i915_buddy_alloc(&mm, order);
+ if (IS_ERR(block)) {
+ pr_info("buddy_alloc hit -ENOMEM with order=%d, top=%d\n",
+ order, top);
+ err = PTR_ERR(block);
+ goto err;
+ }
+ list_add_tail(&block->link, &blocks);
+ }
+
+ /* There should be one final page for this sub-allocation */
+ block = i915_buddy_alloc(&mm, 0);
+ if (IS_ERR(block)) {
+ pr_info("buddy_alloc hit -ENOMEM for hole\n");
+ err = PTR_ERR(block);
+ goto err;
+ }
+ list_add_tail(&block->link, &holes);
+
+ block = i915_buddy_alloc(&mm, top);
+ if (!IS_ERR(block)) {
+ pr_info("buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!",
+ top, max_order);
+ list_add_tail(&block->link, &blocks);
+ err = -EINVAL;
+ goto err;
+ }
+ }
+
+ i915_buddy_free_list(&mm, &holes);
+
+ /* Nothing larger than blocks of chunk_size now available */
+ for (order = 1; order <= max_order; order++) {
+ block = i915_buddy_alloc(&mm, order);
+ if (!IS_ERR(block)) {
+ pr_info("buddy_alloc unexpectedly succeeded at order %d, it should be full!",
+ order);
+ list_add_tail(&block->link, &blocks);
+ err = -EINVAL;
+ goto err;
+ }
+ }
+
+err:
+ list_splice_tail(&holes, &blocks);
+ i915_buddy_free_list(&mm, &blocks);
+ i915_buddy_fini(&mm);
+ return err;
+}
+
+static int igt_buddy_alloc_range(void *arg)
+{
+ struct i915_buddy_mm mm;
+ unsigned long page_num;
+ LIST_HEAD(blocks);
+ u64 chunk_size;
+ u64 offset;
+ u64 size;
+ u64 rem;
+ int err;
+
+ igt_mm_config(&size, &chunk_size);
+
+ pr_info("buddy_init with size=%llx, chunk_size=%llx\n", size, chunk_size);
+
+ err = i915_buddy_init(&mm, size, chunk_size);
+ if (err) {
+ pr_err("buddy_init failed(%d)\n", err);
+ return err;
+ }
+
+ err = igt_check_mm(&mm);
+ if (err) {
+ pr_err("pre-mm check failed, abort, abort, abort!\n");
+ goto err_fini;
+ }
+
+ rem = mm.size;
+ offset = 0;
+
+ for_each_prime_number_from(page_num, 1, ULONG_MAX - 1) {
+ struct i915_buddy_block *block;
+ LIST_HEAD(tmp);
+
+ size = min(page_num * mm.chunk_size, rem);
+
+ err = i915_buddy_alloc_range(&mm, &tmp, offset, size);
+ if (err) {
+ if (err == -ENOMEM) {
+ pr_info("alloc_range hit -ENOMEM with size=%llx\n",
+ size);
+ } else {
+ pr_err("alloc_range with offset=%llx, size=%llx failed(%d)\n",
+ offset, size, err);
+ }
+
+ break;
+ }
+
+ block = list_first_entry_or_null(&tmp,
+ struct i915_buddy_block,
+ link);
+ if (!block) {
+ pr_err("alloc_range has no blocks\n");
+ err = -EINVAL;
+ break;
+ }
+
+ if (i915_buddy_block_offset(block) != offset) {
+ pr_err("alloc_range start offset mismatch, found=%llx, expected=%llx\n",
+ i915_buddy_block_offset(block), offset);
+ err = -EINVAL;
+ }
+
+ if (!err)
+ err = igt_check_blocks(&mm, &tmp, size, true);
+
+ list_splice_tail(&tmp, &blocks);
+
+ if (err)
+ break;
+
+ offset += size;
+
+ rem -= size;
+ if (!rem)
+ break;
+ }
+
+ if (err == -ENOMEM)
+ err = 0;
+
+ i915_buddy_free_list(&mm, &blocks);
+
+ if (!err) {
+ err = igt_check_mm(&mm);
+ if (err)
+ pr_err("post-mm check failed\n");
+ }
+
+err_fini:
+ i915_buddy_fini(&mm);
+
+ return err;
+}
+
+int i915_buddy_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_buddy_alloc_pessimistic),
+ SUBTEST(igt_buddy_alloc_optimistic),
+ SUBTEST(igt_buddy_alloc_pathological),
+ SUBTEST(igt_buddy_alloc_smoke),
+ SUBTEST(igt_buddy_alloc_range),
+ };
+
+ return i915_subtests(tests, NULL);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index b6449d0a8c17..cb30c669b1b7 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -48,26 +48,29 @@ static int populate_ggtt(struct drm_i915_private *i915,
{
unsigned long unbound, bound, count;
struct drm_i915_gem_object *obj;
- u64 size;
count = 0;
- for (size = 0;
- size + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
- size += I915_GTT_PAGE_SIZE) {
+ do {
struct i915_vma *vma;
obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
- quirk_add(obj, objects);
-
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
- if (IS_ERR(vma))
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ if (vma == ERR_PTR(-ENOSPC))
+ break;
+
return PTR_ERR(vma);
+ }
+ quirk_add(obj, objects);
count++;
- }
+ } while (1);
+ pr_debug("Filled GGTT with %lu pages [%llu total]\n",
+ count, i915->ggtt.vm.total / PAGE_SIZE);
bound = 0;
unbound = 0;
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index 2b31a4ee0b4c..1ccf0f731ac0 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -12,7 +12,9 @@
selftest(sanitycheck, i915_live_sanitycheck) /* keep first (igt selfcheck) */
selftest(uncore, intel_uncore_live_selftests)
selftest(workarounds, intel_workarounds_live_selftests)
-selftest(timelines, intel_timeline_live_selftests)
+selftest(gt_engines, intel_engine_live_selftests)
+selftest(gt_timelines, intel_timeline_live_selftests)
+selftest(gt_contexts, intel_context_live_selftests)
selftest(requests, i915_request_live_selftests)
selftest(active, i915_active_live_selftests)
selftest(objects, i915_gem_object_live_selftests)
@@ -24,7 +26,7 @@ selftest(gtt, i915_gem_gtt_live_selftests)
selftest(gem, i915_gem_live_selftests)
selftest(evict, i915_gem_evict_live_selftests)
selftest(hugepages, i915_gem_huge_page_live_selftests)
-selftest(contexts, i915_gem_context_live_selftests)
+selftest(gem_contexts, i915_gem_context_live_selftests)
selftest(blt, i915_gem_object_blt_live_selftests)
selftest(client, i915_gem_client_blt_live_selftests)
selftest(reset, intel_reset_live_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index b55da4d9ccba..b88084fe3269 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -25,3 +25,4 @@ selftest(evict, i915_gem_evict_mock_selftests)
selftest(gtt, i915_gem_gtt_mock_selftests)
selftest(hugepages, i915_gem_huge_page_mock_selftests)
selftest(contexts, i915_gem_context_mock_selftests)
+selftest(buddy, i915_buddy_mock_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 86c299663934..b3688543ed7d 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -46,9 +46,7 @@ static int igt_add_request(void *arg)
/* Basic preliminary test to create a request and let it loose! */
mutex_lock(&i915->drm.struct_mutex);
- request = mock_request(i915->engine[RCS0],
- i915->kernel_context,
- HZ / 10);
+ request = mock_request(i915->engine[RCS0]->kernel_context, HZ / 10);
if (!request)
goto out_unlock;
@@ -70,7 +68,7 @@ static int igt_wait_request(void *arg)
/* Submit a request, then wait upon it */
mutex_lock(&i915->drm.struct_mutex);
- request = mock_request(i915->engine[RCS0], i915->kernel_context, T);
+ request = mock_request(i915->engine[RCS0]->kernel_context, T);
if (!request) {
err = -ENOMEM;
goto out_unlock;
@@ -143,7 +141,7 @@ static int igt_fence_wait(void *arg)
/* Submit a request, treat it as a fence and wait upon it */
mutex_lock(&i915->drm.struct_mutex);
- request = mock_request(i915->engine[RCS0], i915->kernel_context, T);
+ request = mock_request(i915->engine[RCS0]->kernel_context, T);
if (!request) {
err = -ENOMEM;
goto out_locked;
@@ -196,11 +194,15 @@ static int igt_request_rewind(void *arg)
struct drm_i915_private *i915 = arg;
struct i915_request *request, *vip;
struct i915_gem_context *ctx[2];
+ struct intel_context *ce;
int err = -EINVAL;
mutex_lock(&i915->drm.struct_mutex);
ctx[0] = mock_context(i915, "A");
- request = mock_request(i915->engine[RCS0], ctx[0], 2 * HZ);
+ ce = i915_gem_context_get_engine(ctx[0], RCS0);
+ GEM_BUG_ON(IS_ERR(ce));
+ request = mock_request(ce, 2 * HZ);
+ intel_context_put(ce);
if (!request) {
err = -ENOMEM;
goto err_context_0;
@@ -210,7 +212,10 @@ static int igt_request_rewind(void *arg)
i915_request_add(request);
ctx[1] = mock_context(i915, "B");
- vip = mock_request(i915->engine[RCS0], ctx[1], 0);
+ ce = i915_gem_context_get_engine(ctx[1], RCS0);
+ GEM_BUG_ON(IS_ERR(ce));
+ vip = mock_request(ce, 0);
+ intel_context_put(ce);
if (!vip) {
err = -ENOMEM;
goto err_context_1;
@@ -259,22 +264,19 @@ struct smoketest {
struct i915_gem_context **contexts;
atomic_long_t num_waits, num_fences;
int ncontexts, max_batch;
- struct i915_request *(*request_alloc)(struct i915_gem_context *,
- struct intel_engine_cs *);
+ struct i915_request *(*request_alloc)(struct intel_context *ce);
};
static struct i915_request *
-__mock_request_alloc(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+__mock_request_alloc(struct intel_context *ce)
{
- return mock_request(engine, ctx, 0);
+ return mock_request(ce, 0);
}
static struct i915_request *
-__live_request_alloc(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+__live_request_alloc(struct intel_context *ce)
{
- return igt_request_alloc(ctx, engine);
+ return intel_context_create_request(ce);
}
static int __igt_breadcrumbs_smoketest(void *arg)
@@ -333,10 +335,14 @@ static int __igt_breadcrumbs_smoketest(void *arg)
struct i915_gem_context *ctx =
t->contexts[order[n] % t->ncontexts];
struct i915_request *rq;
+ struct intel_context *ce;
mutex_lock(BKL);
- rq = t->request_alloc(ctx, t->engine);
+ ce = i915_gem_context_get_engine(ctx, t->engine->legacy_idx);
+ GEM_BUG_ON(IS_ERR(ce));
+ rq = t->request_alloc(ce);
+ intel_context_put(ce);
if (IS_ERR(rq)) {
mutex_unlock(BKL);
err = PTR_ERR(rq);
@@ -870,7 +876,9 @@ static int live_all_engines(void *arg)
request[id]->batch = batch;
i915_vma_lock(batch);
- err = i915_vma_move_to_active(batch, request[id], 0);
+ err = i915_request_await_object(request[id], batch->obj, 0);
+ if (err == 0)
+ err = i915_vma_move_to_active(batch, request[id], 0);
i915_vma_unlock(batch);
GEM_BUG_ON(err);
@@ -986,7 +994,9 @@ static int live_sequential_engines(void *arg)
request[id]->batch = batch;
i915_vma_lock(batch);
- err = i915_vma_move_to_active(batch, request[id], 0);
+ err = i915_request_await_object(request[id], batch->obj, false);
+ if (err == 0)
+ err = i915_vma_move_to_active(batch, request[id], 0);
i915_vma_unlock(batch);
GEM_BUG_ON(err);
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
index db9c645bbdfe..438ea0eaa416 100644
--- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -185,7 +185,7 @@ int i915_live_selftests(struct pci_dev *pdev)
if (!i915_selftest.live)
return 0;
- err = run_selftests(live, to_i915(pci_get_drvdata(pdev)));
+ err = run_selftests(live, pdev_to_i915(pdev));
if (err) {
i915_selftest.live = err;
return err;
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index 89b6552a6497..11f04ad48e68 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -9,25 +9,24 @@
#include "igt_spinner.h"
-int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915)
+int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt)
{
unsigned int mode;
void *vaddr;
int err;
- GEM_BUG_ON(INTEL_GEN(i915) < 8);
+ GEM_BUG_ON(INTEL_GEN(gt->i915) < 8);
memset(spin, 0, sizeof(*spin));
- spin->i915 = i915;
- spin->gt = &i915->gt;
+ spin->gt = gt;
- spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ spin->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
if (IS_ERR(spin->hws)) {
err = PTR_ERR(spin->hws);
goto err;
}
- spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ spin->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
if (IS_ERR(spin->obj)) {
err = PTR_ERR(spin->obj);
goto err_hws;
@@ -41,7 +40,7 @@ int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915)
}
spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
- mode = i915_coherent_map_type(i915);
+ mode = i915_coherent_map_type(gt->i915);
vaddr = i915_gem_object_pin_map(spin->obj, mode);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
@@ -79,7 +78,10 @@ static int move_to_active(struct i915_vma *vma,
int err;
i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, flags);
+ err = i915_request_await_object(rq, vma->obj,
+ flags & EXEC_OBJECT_WRITE);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, flags);
i915_vma_unlock(vma);
return err;
@@ -87,22 +89,22 @@ static int move_to_active(struct i915_vma *vma,
struct i915_request *
igt_spinner_create_request(struct igt_spinner *spin,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
+ struct intel_context *ce,
u32 arbitration_command)
{
+ struct intel_engine_cs *engine = ce->engine;
struct i915_request *rq = NULL;
struct i915_vma *hws, *vma;
u32 *batch;
int err;
- spin->gt = engine->gt;
+ GEM_BUG_ON(spin->gt != ce->vm->gt);
- vma = i915_vma_instance(spin->obj, ctx->vm, NULL);
+ vma = i915_vma_instance(spin->obj, ce->vm, NULL);
if (IS_ERR(vma))
return ERR_CAST(vma);
- hws = i915_vma_instance(spin->hws, ctx->vm, NULL);
+ hws = i915_vma_instance(spin->hws, ce->vm, NULL);
if (IS_ERR(hws))
return ERR_CAST(hws);
@@ -114,7 +116,7 @@ igt_spinner_create_request(struct igt_spinner *spin,
if (err)
goto unpin_vma;
- rq = igt_request_alloc(ctx, engine);
+ rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto unpin_hws;
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.h b/drivers/gpu/drm/i915/selftests/igt_spinner.h
index 1bfc39efa773..ec62c9ef320b 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.h
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.h
@@ -17,7 +17,6 @@
struct intel_gt;
struct igt_spinner {
- struct drm_i915_private *i915;
struct intel_gt *gt;
struct drm_i915_gem_object *hws;
struct drm_i915_gem_object *obj;
@@ -25,13 +24,12 @@ struct igt_spinner {
void *seqno;
};
-int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915);
+int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt);
void igt_spinner_fini(struct igt_spinner *spin);
struct i915_request *
igt_spinner_create_request(struct igt_spinner *spin,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
+ struct intel_context *ce,
u32 arbitration_command);
void igt_spinner_end(struct igt_spinner *spin);
diff --git a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
index b976c12817c5..080b90b63d16 100644
--- a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
+++ b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
@@ -40,6 +40,7 @@ void __onstack_fence_init(struct i915_sw_fence *fence,
__init_waitqueue_head(&fence->wait, name, key);
atomic_set(&fence->pending, 1);
+ fence->error = 0;
fence->flags = (unsigned long)nop_fence_notify;
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index fd4cc4809eb8..01a89c071bf5 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -213,6 +213,7 @@ struct drm_i915_private *mock_gem_device(void)
if (mock_engine_init(i915->engine[RCS0]))
goto err_context;
+ intel_engines_driver_register(i915);
mutex_unlock(&i915->drm.struct_mutex);
WARN_ON(i915_gemfs_init(i915));
diff --git a/drivers/gpu/drm/i915/selftests/mock_request.c b/drivers/gpu/drm/i915/selftests/mock_request.c
index 9390fc09984b..09f747228dff 100644
--- a/drivers/gpu/drm/i915/selftests/mock_request.c
+++ b/drivers/gpu/drm/i915/selftests/mock_request.c
@@ -28,14 +28,12 @@
#include "mock_request.h"
struct i915_request *
-mock_request(struct intel_engine_cs *engine,
- struct i915_gem_context *context,
- unsigned long delay)
+mock_request(struct intel_context *ce, unsigned long delay)
{
struct i915_request *request;
/* NB the i915->requests slab cache is enlarged to fit mock_request */
- request = igt_request_alloc(context, engine);
+ request = intel_context_create_request(ce);
if (IS_ERR(request))
return NULL;
diff --git a/drivers/gpu/drm/i915/selftests/mock_request.h b/drivers/gpu/drm/i915/selftests/mock_request.h
index 4acf0211df20..8907b60c290d 100644
--- a/drivers/gpu/drm/i915/selftests/mock_request.h
+++ b/drivers/gpu/drm/i915/selftests/mock_request.h
@@ -30,9 +30,7 @@
#include "../i915_request.h"
struct i915_request *
-mock_request(struct intel_engine_cs *engine,
- struct i915_gem_context *context,
- unsigned long delay);
+mock_request(struct intel_context *ce, unsigned long delay);
bool mock_cancel_request(struct i915_request *request);
diff --git a/drivers/gpu/drm/imx/Makefile b/drivers/gpu/drm/imx/Makefile
index ab6c83caceb7..21cdcc2faabc 100644
--- a/drivers/gpu/drm/imx/Makefile
+++ b/drivers/gpu/drm/imx/Makefile
@@ -8,5 +8,4 @@ obj-$(CONFIG_DRM_IMX_PARALLEL_DISPLAY) += parallel-display.o
obj-$(CONFIG_DRM_IMX_TVE) += imx-tve.o
obj-$(CONFIG_DRM_IMX_LDB) += imx-ldb.o
-obj-$(CONFIG_DRM_IMX_IPUV3) += imx-ipuv3-crtc.o
obj-$(CONFIG_DRM_IMX_HDMI) += dw_hdmi-imx.o
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index 5a3ad6fc8ea7..f22cfbf9353e 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -16,7 +16,7 @@
#include <drm/bridge/dw_hdmi.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
-#include <drm/drm_encoder_slave.h>
+#include <drm/drm_encoder.h>
#include <drm/drm_of.h>
#include "imx-drm.h"
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index db461b6a257f..695f307f36b2 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -124,14 +124,11 @@ static void imx_ldb_ch_set_bus_format(struct imx_ldb_channel *imx_ldb_ch,
static int imx_ldb_connector_get_modes(struct drm_connector *connector)
{
struct imx_ldb_channel *imx_ldb_ch = con_to_imx_ldb_ch(connector);
- int num_modes = 0;
+ int num_modes;
- if (imx_ldb_ch->panel && imx_ldb_ch->panel->funcs &&
- imx_ldb_ch->panel->funcs->get_modes) {
- num_modes = imx_ldb_ch->panel->funcs->get_modes(imx_ldb_ch->panel);
- if (num_modes > 0)
- return num_modes;
- }
+ num_modes = drm_panel_get_modes(imx_ldb_ch->panel);
+ if (num_modes > 0)
+ return num_modes;
if (!imx_ldb_ch->edid && imx_ldb_ch->ddc)
imx_ldb_ch->edid = drm_get_edid(connector, imx_ldb_ch->ddc);
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 2e51b2fade75..e7ce17503ae1 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -47,14 +47,11 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
{
struct imx_parallel_display *imxpd = con_to_imxpd(connector);
struct device_node *np = imxpd->dev->of_node;
- int num_modes = 0;
+ int num_modes;
- if (imxpd->panel && imxpd->panel->funcs &&
- imxpd->panel->funcs->get_modes) {
- num_modes = imxpd->panel->funcs->get_modes(imxpd->panel);
- if (num_modes > 0)
- return num_modes;
- }
+ num_modes = drm_panel_get_modes(imxpd->panel);
+ if (num_modes > 0)
+ return num_modes;
if (imxpd->edid) {
drm_connector_update_edid_property(connector, imxpd->edid);
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index fd1a024703d2..ff3d9acc24fc 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -136,7 +136,7 @@ static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
int err = 0;
if (!write) {
- err = reservation_object_reserve_shared(bo->gem.resv, 1);
+ err = dma_resv_reserve_shared(bo->gem.resv, 1);
if (err)
return err;
}
@@ -296,9 +296,9 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
for (i = 0; i < submit->nr_bos; i++) {
if (submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE)
- reservation_object_add_excl_fence(bos[i]->gem.resv, fence);
+ dma_resv_add_excl_fence(bos[i]->gem.resv, fence);
else
- reservation_object_add_shared_fence(bos[i]->gem.resv, fence);
+ dma_resv_add_shared_fence(bos[i]->gem.resv, fence);
}
lima_gem_unlock_bos(bos, submit->nr_bos, &ctx);
@@ -341,7 +341,7 @@ int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, s64 timeout_ns)
timeout = drm_timeout_abs_to_jiffies(timeout_ns);
- ret = drm_gem_reservation_object_wait(file, handle, write, timeout);
+ ret = drm_gem_dma_resv_wait(file, handle, write, timeout);
if (ret == 0)
ret = timeout ? -ETIMEDOUT : -EBUSY;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_fb.c b/drivers/gpu/drm/mediatek/mtk_drm_fb.c
index ae40b080ae47..3f230a28a2dc 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_fb.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_fb.c
@@ -4,7 +4,7 @@
*/
#include <linux/dma-buf.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_fb_helper.h>
diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c
index 3320a74e67fa..57ae1c13d1e6 100644
--- a/drivers/gpu/drm/meson/meson_crtc.c
+++ b/drivers/gpu/drm/meson/meson_crtc.c
@@ -265,11 +265,11 @@ static void meson_crtc_enable_vd1(struct meson_drm *priv)
static void meson_g12a_crtc_enable_vd1(struct meson_drm *priv)
{
- writel_relaxed(((1 << 16) | /* post bld premult*/
- (1 << 8) | /* post src */
- (1 << 4) | /* pre bld premult*/
- (1 << 0)),
- priv->io_base + _REG(VD1_BLEND_SRC_CTRL));
+ writel_relaxed(VD_BLEND_PREBLD_SRC_VD1 |
+ VD_BLEND_PREBLD_PREMULT_EN |
+ VD_BLEND_POSTBLD_SRC_VD1 |
+ VD_BLEND_POSTBLD_PREMULT_EN,
+ priv->io_base + _REG(VD1_BLEND_SRC_CTRL));
}
void meson_crtc_irq(struct meson_drm *priv)
@@ -487,7 +487,12 @@ void meson_crtc_irq(struct meson_drm *priv)
writel_relaxed(priv->viu.vd1_range_map_cr,
priv->io_base + meson_crtc->viu_offset +
_REG(VD1_IF0_RANGE_MAP_CR));
- writel_relaxed(0x78404,
+ writel_relaxed(VPP_VSC_BANK_LENGTH(4) |
+ VPP_HSC_BANK_LENGTH(4) |
+ VPP_SC_VD_EN_ENABLE |
+ VPP_SC_TOP_EN_ENABLE |
+ VPP_SC_HSC_EN_ENABLE |
+ VPP_SC_VSC_EN_ENABLE,
priv->io_base + _REG(VPP_SC_MISC));
writel_relaxed(priv->viu.vpp_pic_in_height,
priv->io_base + _REG(VPP_PIC_IN_HEIGHT));
@@ -570,7 +575,7 @@ int meson_crtc_create(struct meson_drm *priv)
return ret;
}
- if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
meson_crtc->enable_osd1 = meson_g12a_crtc_enable_osd1;
meson_crtc->enable_vd1 = meson_g12a_crtc_enable_vd1;
meson_crtc->viu_offset = MESON_G12A_VIU_OFFSET;
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 42af49afdd75..a24f8dec5adc 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -140,10 +140,28 @@ static struct regmap_config meson_regmap_config = {
static void meson_vpu_init(struct meson_drm *priv)
{
- writel_relaxed(0x210000, priv->io_base + _REG(VPU_RDARB_MODE_L1C1));
- writel_relaxed(0x10000, priv->io_base + _REG(VPU_RDARB_MODE_L1C2));
- writel_relaxed(0x900000, priv->io_base + _REG(VPU_RDARB_MODE_L2C1));
- writel_relaxed(0x20000, priv->io_base + _REG(VPU_WRARB_MODE_L2C1));
+ u32 value;
+
+ /*
+ * Slave dc0 and dc5 connected to master port 1.
+ * By default other slaves are connected to master port 0.
+ */
+ value = VPU_RDARB_SLAVE_TO_MASTER_PORT(0, 1) |
+ VPU_RDARB_SLAVE_TO_MASTER_PORT(5, 1);
+ writel_relaxed(value, priv->io_base + _REG(VPU_RDARB_MODE_L1C1));
+
+ /* Slave dc0 connected to master port 1 */
+ value = VPU_RDARB_SLAVE_TO_MASTER_PORT(0, 1);
+ writel_relaxed(value, priv->io_base + _REG(VPU_RDARB_MODE_L1C2));
+
+ /* Slave dc4 and dc7 connected to master port 1 */
+ value = VPU_RDARB_SLAVE_TO_MASTER_PORT(4, 1) |
+ VPU_RDARB_SLAVE_TO_MASTER_PORT(7, 1);
+ writel_relaxed(value, priv->io_base + _REG(VPU_RDARB_MODE_L2C1));
+
+ /* Slave dc1 connected to master port 1 */
+ value = VPU_RDARB_SLAVE_TO_MASTER_PORT(1, 1);
+ writel_relaxed(value, priv->io_base + _REG(VPU_WRARB_MODE_L2C1));
}
static void meson_remove_framebuffers(void)
@@ -191,6 +209,8 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
priv->drm = drm;
priv->dev = dev;
+ priv->compat = (enum vpu_compatible)of_device_get_match_data(priv->dev);
+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpu");
regs = devm_ioremap_resource(dev, res);
if (IS_ERR(regs)) {
@@ -435,10 +455,14 @@ static int meson_drv_probe(struct platform_device *pdev)
};
static const struct of_device_id dt_match[] = {
- { .compatible = "amlogic,meson-gxbb-vpu" },
- { .compatible = "amlogic,meson-gxl-vpu" },
- { .compatible = "amlogic,meson-gxm-vpu" },
- { .compatible = "amlogic,meson-g12a-vpu" },
+ { .compatible = "amlogic,meson-gxbb-vpu",
+ .data = (void *)VPU_COMPATIBLE_GXBB },
+ { .compatible = "amlogic,meson-gxl-vpu",
+ .data = (void *)VPU_COMPATIBLE_GXL },
+ { .compatible = "amlogic,meson-gxm-vpu",
+ .data = (void *)VPU_COMPATIBLE_GXM },
+ { .compatible = "amlogic,meson-g12a-vpu",
+ .data = (void *)VPU_COMPATIBLE_G12A },
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h
index c9aaec1a846e..820d07bdd42a 100644
--- a/drivers/gpu/drm/meson/meson_drv.h
+++ b/drivers/gpu/drm/meson/meson_drv.h
@@ -9,6 +9,7 @@
#include <linux/device.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/regmap.h>
struct drm_crtc;
@@ -16,8 +17,16 @@ struct drm_device;
struct drm_plane;
struct meson_drm;
+enum vpu_compatible {
+ VPU_COMPATIBLE_GXBB = 0,
+ VPU_COMPATIBLE_GXL = 1,
+ VPU_COMPATIBLE_GXM = 2,
+ VPU_COMPATIBLE_G12A = 3,
+};
+
struct meson_drm {
struct device *dev;
+ enum vpu_compatible compat;
void __iomem *io_base;
struct regmap *hhi;
int vsync_irq;
@@ -116,9 +125,9 @@ struct meson_drm {
};
static inline int meson_vpu_is_compatible(struct meson_drm *priv,
- const char *compat)
+ enum vpu_compatible family)
{
- return of_device_is_compatible(priv->dev->of_node, compat);
+ return priv->compat == family;
}
#endif /* __MESON_DRV_H */
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index 9f0b08eaf003..68bbd987147b 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -429,6 +429,8 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data,
/* Enable internal pixclk, tmds_clk, spdif_clk, i2s_clk, cecclk */
dw_hdmi_top_write_bits(dw_hdmi, HDMITX_TOP_CLK_CNTL,
0x3, 0x3);
+
+ /* Enable cec_clk and hdcp22_tmdsclk_en */
dw_hdmi_top_write_bits(dw_hdmi, HDMITX_TOP_CLK_CNTL,
0x3 << 4, 0x3 << 4);
@@ -935,7 +937,7 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
reset_control_reset(meson_dw_hdmi->hdmitx_phy);
/* Enable APB3 fail on error */
- if (!meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ if (!meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
writel_bits_relaxed(BIT(15), BIT(15),
meson_dw_hdmi->hdmitx + HDMITX_TOP_CTRL_REG);
writel_bits_relaxed(BIT(15), BIT(15),
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.h b/drivers/gpu/drm/meson/meson_dw_hdmi.h
index 1b2ef043eb5c..08e1c14e4ea0 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.h
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.h
@@ -100,7 +100,8 @@
#define HDMITX_TOP_INTR_RXSENSE_RISE BIT(6)
#define HDMITX_TOP_INTR_RXSENSE_FALL BIT(7)
-/* Bit 14:12 RW tmds_sel: 3'b000=Output zero; 3'b001=Output normal TMDS data;
+/*
+ * Bit 14:12 RW tmds_sel: 3'b000=Output zero; 3'b001=Output normal TMDS data;
* 3'b010=Output PRBS data; 3'b100=Output shift pattern. Default 0.
* Bit 11: 9 RW shift_pttn_repeat: 0=New pattern every clk cycle; 1=New pattern
* every 2 clk cycles; ...; 7=New pattern every 8 clk cycles. Default 0.
@@ -135,7 +136,8 @@
/* Bit 9: 0 RW tmds_clk_pttn[29:20]. Default 0. */
#define HDMITX_TOP_TMDS_CLK_PTTN_23 (0x00B)
-/* Bit 1 RW shift_tmds_clk_pttn:1=Enable shifting clk pattern,
+/*
+ * Bit 1 RW shift_tmds_clk_pttn:1=Enable shifting clk pattern,
* used when TMDS CLK rate = TMDS character rate /4. Default 0.
* Bit 0 R Reserved. Default 0.
* [ 1] shift_tmds_clk_pttn
@@ -143,12 +145,14 @@
*/
#define HDMITX_TOP_TMDS_CLK_PTTN_CNTL (0x00C)
-/* Bit 0 RW revocmem_wr_fail: Read back 1 to indicate Host write REVOC MEM
+/*
+ * Bit 0 RW revocmem_wr_fail: Read back 1 to indicate Host write REVOC MEM
* failure, write 1 to clear the failure flag. Default 0.
*/
#define HDMITX_TOP_REVOCMEM_STAT (0x00D)
-/* Bit 1 R filtered RxSense status
+/*
+ * Bit 1 R filtered RxSense status
* Bit 0 R filtered HPD status.
*/
#define HDMITX_TOP_STAT0 (0x00E)
diff --git a/drivers/gpu/drm/meson/meson_overlay.c b/drivers/gpu/drm/meson/meson_overlay.c
index 5aa9dcb4b35e..2468b0212d52 100644
--- a/drivers/gpu/drm/meson/meson_overlay.c
+++ b/drivers/gpu/drm/meson/meson_overlay.c
@@ -513,7 +513,7 @@ static void meson_overlay_atomic_disable(struct drm_plane *plane,
priv->viu.vd1_enabled = false;
/* Disable VD1 */
- if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
writel_relaxed(0, priv->io_base + _REG(VD1_BLEND_SRC_CTRL));
writel_relaxed(0, priv->io_base + _REG(VD2_BLEND_SRC_CTRL));
writel_relaxed(0, priv->io_base + _REG(VD1_IF0_GEN_REG + 0x17b0));
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
index 80b8d70c4d75..ed543227b00d 100644
--- a/drivers/gpu/drm/meson/meson_plane.c
+++ b/drivers/gpu/drm/meson/meson_plane.c
@@ -138,7 +138,7 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
OSD_ENDIANNESS_LE);
/* On GXBB, Use the old non-HDR RGB2YUV converter */
- if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB))
priv->viu.osd1_blk0_cfg[0] |= OSD_OUTPUT_COLOR_RGB;
switch (fb->format->format) {
@@ -292,7 +292,7 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
priv->viu.osd1_blk0_cfg[3] = ((dest.x2 - 1) << 16) | dest.x1;
priv->viu.osd1_blk0_cfg[4] = ((dest.y2 - 1) << 16) | dest.y1;
- if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
priv->viu.osd_blend_din0_scope_h = ((dest.x2 - 1) << 16) | dest.x1;
priv->viu.osd_blend_din0_scope_v = ((dest.y2 - 1) << 16) | dest.y1;
priv->viu.osb_blend0_size = dst_h << 16 | dst_w;
@@ -308,8 +308,8 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
if (!meson_plane->enabled) {
/* Reset OSD1 before enabling it on GXL+ SoCs */
- if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
+ meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL))
meson_viu_osd1_reset(priv);
meson_plane->enabled = true;
@@ -327,8 +327,8 @@ static void meson_plane_atomic_disable(struct drm_plane *plane,
struct meson_drm *priv = meson_plane->priv;
/* Disable OSD1 */
- if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
- writel_bits_relaxed(3 << 8, 0,
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
+ writel_bits_relaxed(VIU_OSD1_POSTBLD_SRC_OSD1, 0,
priv->io_base + _REG(OSD1_BLEND_SRC_CTRL));
else
writel_bits_relaxed(VPP_OSD1_POSTBLEND, 0,
diff --git a/drivers/gpu/drm/meson/meson_registers.h b/drivers/gpu/drm/meson/meson_registers.h
index 057453ce027c..05fce48ceee0 100644
--- a/drivers/gpu/drm/meson/meson_registers.h
+++ b/drivers/gpu/drm/meson/meson_registers.h
@@ -12,7 +12,7 @@
#define _REG(reg) ((reg) << 2)
#define writel_bits_relaxed(mask, val, addr) \
- writel_relaxed((readl_relaxed(addr) & ~(mask)) | (val), addr)
+ writel_relaxed((readl_relaxed(addr) & ~(mask)) | ((val) & (mask)), addr)
/* vpp2 */
#define VPP2_DUMMY_DATA 0x1900
@@ -138,11 +138,19 @@
#define VIU_ADDR_START 0x1a00
#define VIU_ADDR_END 0x1aff
#define VIU_SW_RESET 0x1a01
+#define VIU_SW_RESET_OSD1 BIT(0)
#define VIU_MISC_CTRL0 0x1a06
+#define VIU_CTRL0_VD1_AFBC_MASK 0x170000
#define VIU_MISC_CTRL1 0x1a07
#define D2D3_INTF_LENGTH 0x1a08
#define D2D3_INTF_CTRL0 0x1a09
#define VIU_OSD1_CTRL_STAT 0x1a10
+#define VIU_OSD1_OSD_BLK_ENABLE BIT(0)
+#define VIU_OSD1_POSTBLD_SRC_VD1 (1 << 8)
+#define VIU_OSD1_POSTBLD_SRC_VD2 (2 << 8)
+#define VIU_OSD1_POSTBLD_SRC_OSD1 (3 << 8)
+#define VIU_OSD1_POSTBLD_SRC_OSD2 (4 << 8)
+#define VIU_OSD1_OSD_ENABLE BIT(21)
#define VIU_OSD1_CTRL_STAT2 0x1a2d
#define VIU_OSD1_COLOR_ADDR 0x1a11
#define VIU_OSD1_COLOR 0x1a12
@@ -232,6 +240,12 @@
#define VIU_OSD3_MALI_UNPACK_CTRL 0x3d9f
#define VIU_OSD3_DIMM_CTRL 0x3da0
+#define VIU_OSD_DDR_PRIORITY_URGENT BIT(0)
+#define VIU_OSD_HOLD_FIFO_LINES(lines) ((lines & 0x1f) << 5)
+#define VIU_OSD_FIFO_DEPTH_VAL(val) ((val & 0x7f) << 12)
+#define VIU_OSD_WORDS_PER_BURST(words) (((words & 0x4) >> 1) << 22)
+#define VIU_OSD_FIFO_LIMITS(size) ((size & 0xf) << 24)
+
#define VD1_IF0_GEN_REG 0x1a50
#define VD1_IF0_CANVAS0 0x1a51
#define VD1_IF0_CANVAS1 0x1a52
@@ -341,6 +355,7 @@
#define VPP_LINE_IN_LENGTH 0x1d01
#define VPP_PIC_IN_HEIGHT 0x1d02
#define VPP_SCALE_COEF_IDX 0x1d03
+#define VPP_SCALE_HORIZONTAL_COEF BIT(8)
#define VPP_SCALE_COEF 0x1d04
#define VPP_VSC_REGION12_STARTP 0x1d05
#define VPP_VSC_REGION34_STARTP 0x1d06
@@ -362,6 +377,12 @@
#define VPP_HSC_REGION4_PHASE_SLOPE 0x1d17
#define VPP_HSC_PHASE_CTRL 0x1d18
#define VPP_SC_MISC 0x1d19
+#define VPP_SC_VD_EN_ENABLE BIT(15)
+#define VPP_SC_TOP_EN_ENABLE BIT(16)
+#define VPP_SC_HSC_EN_ENABLE BIT(17)
+#define VPP_SC_VSC_EN_ENABLE BIT(18)
+#define VPP_VSC_BANK_LENGTH(length) (length & 0x7)
+#define VPP_HSC_BANK_LENGTH(length) ((length & 0x7) << 8)
#define VPP_PREBLEND_VD1_H_START_END 0x1d1a
#define VPP_PREBLEND_VD1_V_START_END 0x1d1b
#define VPP_POSTBLEND_VD1_H_START_END 0x1d1c
@@ -371,24 +392,28 @@
#define VPP_PREBLEND_H_SIZE 0x1d20
#define VPP_POSTBLEND_H_SIZE 0x1d21
#define VPP_HOLD_LINES 0x1d22
+#define VPP_POSTBLEND_HOLD_LINES(lines) (lines & 0xf)
+#define VPP_PREBLEND_HOLD_LINES(lines) ((lines & 0xf) << 8)
#define VPP_BLEND_ONECOLOR_CTRL 0x1d23
#define VPP_PREBLEND_CURRENT_XY 0x1d24
#define VPP_POSTBLEND_CURRENT_XY 0x1d25
#define VPP_MISC 0x1d26
-#define VPP_PREBLEND_ENABLE BIT(6)
-#define VPP_POSTBLEND_ENABLE BIT(7)
-#define VPP_OSD2_ALPHA_PREMULT BIT(8)
-#define VPP_OSD1_ALPHA_PREMULT BIT(9)
-#define VPP_VD1_POSTBLEND BIT(10)
-#define VPP_VD2_POSTBLEND BIT(11)
-#define VPP_OSD1_POSTBLEND BIT(12)
-#define VPP_OSD2_POSTBLEND BIT(13)
-#define VPP_VD1_PREBLEND BIT(14)
-#define VPP_VD2_PREBLEND BIT(15)
-#define VPP_OSD1_PREBLEND BIT(16)
-#define VPP_OSD2_PREBLEND BIT(17)
-#define VPP_COLOR_MNG_ENABLE BIT(28)
+#define VPP_PREBLEND_ENABLE BIT(6)
+#define VPP_POSTBLEND_ENABLE BIT(7)
+#define VPP_OSD2_ALPHA_PREMULT BIT(8)
+#define VPP_OSD1_ALPHA_PREMULT BIT(9)
+#define VPP_VD1_POSTBLEND BIT(10)
+#define VPP_VD2_POSTBLEND BIT(11)
+#define VPP_OSD1_POSTBLEND BIT(12)
+#define VPP_OSD2_POSTBLEND BIT(13)
+#define VPP_VD1_PREBLEND BIT(14)
+#define VPP_VD2_PREBLEND BIT(15)
+#define VPP_OSD1_PREBLEND BIT(16)
+#define VPP_OSD2_PREBLEND BIT(17)
+#define VPP_COLOR_MNG_ENABLE BIT(28)
#define VPP_OFIFO_SIZE 0x1d27
+#define VPP_OFIFO_SIZE_MASK GENMASK(13, 0)
+#define VPP_OFIFO_SIZE_DEFAULT (0xfff << 20 | 0x1000)
#define VPP_FIFO_STATUS 0x1d28
#define VPP_SMOKE_CTRL 0x1d29
#define VPP_SMOKE1_VAL 0x1d2a
@@ -404,6 +429,8 @@
#define VPP_HSC_PHASE_CTRL1 0x1d34
#define VPP_HSC_INI_PAT_CTRL 0x1d35
#define VPP_VADJ_CTRL 0x1d40
+#define VPP_MINUS_BLACK_LVL_VADJ1_ENABLE BIT(1)
+
#define VPP_VADJ1_Y 0x1d41
#define VPP_VADJ1_MA_MB 0x1d42
#define VPP_VADJ1_MC_MD 0x1d43
@@ -463,6 +490,7 @@
#define VPP_PEAKING_VGAIN 0x1d92
#define VPP_PEAKING_NLP_1 0x1d93
#define VPP_DOLBY_CTRL 0x1d93
+#define VPP_PPS_DUMMY_DATA_MODE (1 << 17)
#define VPP_PEAKING_NLP_2 0x1d94
#define VPP_PEAKING_NLP_3 0x1d95
#define VPP_PEAKING_NLP_4 0x1d96
@@ -593,6 +621,7 @@
#define OSD34_SCI_WH_M1 0x3d29
#define OSD34_SCO_H_START_END 0x3d2a
#define OSD34_SCO_V_START_END 0x3d2b
+
/* viu2 */
#define VIU2_ADDR_START 0x1e00
#define VIU2_ADDR_END 0x1eff
@@ -706,6 +735,25 @@
#define VENC_UPSAMPLE_CTRL0 0x1b64
#define VENC_UPSAMPLE_CTRL1 0x1b65
#define VENC_UPSAMPLE_CTRL2 0x1b66
+#define VENC_UPSAMPLE_CTRL_F0_2_CLK_RATIO BIT(0)
+#define VENC_UPSAMPLE_CTRL_F1_EN BIT(5)
+#define VENC_UPSAMPLE_CTRL_F1_UPSAMPLE_EN BIT(6)
+#define VENC_UPSAMPLE_CTRL_INTERLACE_HIGH_LUMA (0x0 << 12)
+#define VENC_UPSAMPLE_CTRL_CVBS (0x1 << 12)
+#define VENC_UPSAMPLE_CTRL_S_VIDEO_LUMA (0x2 << 12)
+#define VENC_UPSAMPLE_CTRL_S_VIDEO_CHROMA (0x3 << 12)
+#define VENC_UPSAMPLE_CTRL_INTERLACE_PB (0x4 << 12)
+#define VENC_UPSAMPLE_CTRL_INTERLACE_PR (0x5 << 12)
+#define VENC_UPSAMPLE_CTRL_INTERLACE_R (0x6 << 12)
+#define VENC_UPSAMPLE_CTRL_INTERLACE_G (0x7 << 12)
+#define VENC_UPSAMPLE_CTRL_INTERLACE_B (0x8 << 12)
+#define VENC_UPSAMPLE_CTRL_PROGRESSIVE_Y (0x9 << 12)
+#define VENC_UPSAMPLE_CTRL_PROGRESSIVE_PB (0xa << 12)
+#define VENC_UPSAMPLE_CTRL_PROGRESSIVE_PR (0xb << 12)
+#define VENC_UPSAMPLE_CTRL_PROGRESSIVE_R (0xc << 12)
+#define VENC_UPSAMPLE_CTRL_PROGRESSIVE_G (0xd << 12)
+#define VENC_UPSAMPLE_CTRL_PROGRESSIVE_B (0xe << 12)
+#define VENC_UPSAMPLE_CTRL_VDAC_TEST_VALUE (0xf << 12)
#define TCON_INVERT_CTL 0x1b67
#define VENC_VIDEO_PROG_MODE 0x1b68
#define VENC_ENCI_LINE 0x1b69
@@ -714,6 +762,7 @@
#define VENC_ENCP_PIXEL 0x1b6c
#define VENC_STATA 0x1b6d
#define VENC_INTCTRL 0x1b6e
+#define VENC_INTCTRL_ENCI_LNRST_INT_EN BIT(1)
#define VENC_INTFLAG 0x1b6f
#define VENC_VIDEO_TST_EN 0x1b70
#define VENC_VIDEO_TST_MDSEL 0x1b71
@@ -724,6 +773,7 @@
#define VENC_VIDEO_TST_CLRBAR_WIDTH 0x1b76
#define VENC_VIDEO_TST_VDCNT_STSET 0x1b77
#define VENC_VDAC_DACSEL0 0x1b78
+#define VENC_VDAC_SEL_ATV_DMD BIT(5)
#define VENC_VDAC_DACSEL1 0x1b79
#define VENC_VDAC_DACSEL2 0x1b7a
#define VENC_VDAC_DACSEL3 0x1b7b
@@ -744,6 +794,7 @@
#define VENC_VDAC_DAC5_GAINCTRL 0x1bfa
#define VENC_VDAC_DAC5_OFFSET 0x1bfb
#define VENC_VDAC_FIFO_CTRL 0x1bfc
+#define VENC_VDAC_FIFO_EN_ENCI_ENABLE BIT(13)
#define ENCL_TCON_INVERT_CTL 0x1bfd
#define ENCP_VIDEO_EN 0x1b80
#define ENCP_VIDEO_SYNC_MODE 0x1b81
@@ -759,6 +810,7 @@
#define ENCP_VIDEO_SYNC_OFFST 0x1b8b
#define ENCP_VIDEO_MACV_OFFST 0x1b8c
#define ENCP_VIDEO_MODE 0x1b8d
+#define ENCP_VIDEO_MODE_DE_V_HIGH BIT(14)
#define ENCP_VIDEO_MODE_ADV 0x1b8e
#define ENCP_DBG_PX_RST 0x1b90
#define ENCP_DBG_LN_RST 0x1b91
@@ -837,6 +889,11 @@
#define C656_FS_LNED 0x1be7
#define ENCI_VIDEO_MODE 0x1b00
#define ENCI_VIDEO_MODE_ADV 0x1b01
+#define ENCI_VIDEO_MODE_ADV_DMXMD(val) (val & 0x3)
+#define ENCI_VIDEO_MODE_ADV_VBICTL_LINE_17_22 BIT(2)
+#define ENCI_VIDEO_MODE_ADV_YBW_MEDIUM (0 << 4)
+#define ENCI_VIDEO_MODE_ADV_YBW_LOW (0x1 << 4)
+#define ENCI_VIDEO_MODE_ADV_YBW_HIGH (0x2 << 4)
#define ENCI_VIDEO_FSC_ADJ 0x1b02
#define ENCI_VIDEO_BRIGHT 0x1b03
#define ENCI_VIDEO_CONT 0x1b04
@@ -907,13 +964,17 @@
#define ENCI_DBG_MAXPX 0x1b4c
#define ENCI_DBG_MAXLN 0x1b4d
#define ENCI_MACV_MAX_AMP 0x1b50
+#define ENCI_MACV_MAX_AMP_ENABLE_CHANGE BIT(15)
+#define ENCI_MACV_MAX_AMP_VAL(val) (val & 0x83ff)
#define ENCI_MACV_PULSE_LO 0x1b51
#define ENCI_MACV_PULSE_HI 0x1b52
#define ENCI_MACV_BKP_MAX 0x1b53
#define ENCI_CFILT_CTRL 0x1b54
+#define ENCI_CFILT_CMPT_SEL_HIGH BIT(1)
#define ENCI_CFILT7 0x1b55
#define ENCI_YC_DELAY 0x1b56
#define ENCI_VIDEO_EN 0x1b57
+#define ENCI_VIDEO_EN_ENABLE BIT(0)
#define ENCI_DVI_HSO_BEGIN 0x1c00
#define ENCI_DVI_HSO_END 0x1c01
#define ENCI_DVI_VSO_BLINE_EVN 0x1c02
@@ -925,6 +986,10 @@
#define ENCI_DVI_VSO_END_EVN 0x1c08
#define ENCI_DVI_VSO_END_ODD 0x1c09
#define ENCI_CFILT_CTRL2 0x1c0a
+#define ENCI_CFILT_CMPT_CR_DLY(delay) (delay & 0xf)
+#define ENCI_CFILT_CMPT_CB_DLY(delay) ((delay & 0xf) << 4)
+#define ENCI_CFILT_CVBS_CR_DLY(delay) ((delay & 0xf) << 8)
+#define ENCI_CFILT_CVBS_CB_DLY(delay) ((delay & 0xf) << 12)
#define ENCI_DACSEL_0 0x1c0b
#define ENCI_DACSEL_1 0x1c0c
#define ENCP_DACSEL_0 0x1c0d
@@ -939,6 +1004,8 @@
#define ENCI_TST_CLRBAR_WIDTH 0x1c16
#define ENCI_TST_VDCNT_STSET 0x1c17
#define ENCI_VFIFO2VD_CTL 0x1c18
+#define ENCI_VFIFO2VD_CTL_ENABLE BIT(0)
+#define ENCI_VFIFO2VD_CTL_VD_SEL(val) ((val & 0xff) << 8)
#define ENCI_VFIFO2VD_PIXEL_START 0x1c19
#define ENCI_VFIFO2VD_PIXEL_END 0x1c1a
#define ENCI_VFIFO2VD_LINE_TOP_START 0x1c1b
@@ -1001,6 +1068,7 @@
#define VENC_VDAC_DAC5_FILT_CTRL0 0x1c56
#define VENC_VDAC_DAC5_FILT_CTRL1 0x1c57
#define VENC_VDAC_DAC0_FILT_CTRL0 0x1c58
+#define VENC_VDAC_DAC0_FILT_CTRL0_EN BIT(0)
#define VENC_VDAC_DAC0_FILT_CTRL1 0x1c59
#define VENC_VDAC_DAC1_FILT_CTRL0 0x1c5a
#define VENC_VDAC_DAC1_FILT_CTRL1 0x1c5b
@@ -1406,6 +1474,18 @@
#define VIU2_SEL_VENC_ENCP (2 << 2)
#define VIU2_SEL_VENC_ENCT (3 << 2)
#define VPU_HDMI_SETTING 0x271b
+#define VPU_HDMI_ENCI_DATA_TO_HDMI BIT(0)
+#define VPU_HDMI_ENCP_DATA_TO_HDMI BIT(1)
+#define VPU_HDMI_INV_HSYNC BIT(2)
+#define VPU_HDMI_INV_VSYNC BIT(3)
+#define VPU_HDMI_OUTPUT_CRYCB (0 << 5)
+#define VPU_HDMI_OUTPUT_YCBCR (1 << 5)
+#define VPU_HDMI_OUTPUT_YCRCB (2 << 5)
+#define VPU_HDMI_OUTPUT_CBCRY (3 << 5)
+#define VPU_HDMI_OUTPUT_CBYCR (4 << 5)
+#define VPU_HDMI_OUTPUT_CRCBY (5 << 5)
+#define VPU_HDMI_WR_RATE(rate) (((rate & 0x1f) - 1) << 8)
+#define VPU_HDMI_RD_RATE(rate) (((rate & 0x1f) - 1) << 12)
#define ENCI_INFO_READ 0x271c
#define ENCP_INFO_READ 0x271d
#define ENCT_INFO_READ 0x271e
@@ -1482,6 +1562,7 @@
#define VPU_RDARB_MODE_L1C2 0x2799
#define VPU_RDARB_MODE_L2C1 0x279d
#define VPU_WRARB_MODE_L2C1 0x27a2
+#define VPU_RDARB_SLAVE_TO_MASTER_PORT(dc, port) (port << (16 + dc))
/* osd super scale */
#define OSDSR_HV_SIZEIN 0x3130
@@ -1523,7 +1604,6 @@
#define OSD1_AFBCD_STATUS 0x31a8
#define OSD1_AFBCD_PIXEL_HSCOPE 0x31a9
#define OSD1_AFBCD_PIXEL_VSCOPE 0x31aa
-#define VIU_MISC_CTRL1 0x1a07
/* add for gxm and 962e dv core2 */
#define DOLBY_CORE2A_SWAP_CTRL1 0x3434
@@ -1538,8 +1618,6 @@
#define VPU_MAFBC_COMMAND 0x3a05
#define VPU_MAFBC_STATUS 0x3a06
#define VPU_MAFBC_SURFACE_CFG 0x3a07
-
-/* osd afbc on g12a */
#define VPU_MAFBC_HEADER_BUF_ADDR_LOW_S0 0x3a10
#define VPU_MAFBC_HEADER_BUF_ADDR_HIGH_S0 0x3a11
#define VPU_MAFBC_FORMAT_SPECIFIER_S0 0x3a12
@@ -1597,10 +1675,18 @@
#define VPU_MAFBC_PREFETCH_CFG_S3 0x3a7c
#define DOLBY_PATH_CTRL 0x1a0c
+#define DOLBY_BYPASS_EN(val) (val & 0xf)
#define OSD_PATH_MISC_CTRL 0x1a0e
#define MALI_AFBCD_TOP_CTRL 0x1a0f
#define VIU_OSD_BLEND_CTRL 0x39b0
+#define VIU_OSD_BLEND_REORDER(dest, src) ((src) << (dest * 4))
+#define VIU_OSD_BLEND_DIN_EN(bits) ((bits & 0xf) << 20)
+#define VIU_OSD_BLEND1_DIN3_BYPASS_TO_DOUT1 BIT(24)
+#define VIU_OSD_BLEND1_DOUT_BYPASS_TO_BLEND2 BIT(25)
+#define VIU_OSD_BLEND_DIN0_BYPASS_TO_DOUT0 BIT(26)
+#define VIU_OSD_BLEND_BLEN2_PREMULT_EN(input) ((input & 0x3) << 27)
+#define VIU_OSD_BLEND_HOLD_LINES(lines) ((lines & 0x7) << 29)
#define VIU_OSD_BLEND_CTRL1 0x39c0
#define VIU_OSD_BLEND_DIN0_SCOPE_H 0x39b1
#define VIU_OSD_BLEND_DIN0_SCOPE_V 0x39b2
@@ -1630,13 +1716,27 @@
#define VPP_SLEEP_CTRL 0x1dfa
#define VD1_BLEND_SRC_CTRL 0x1dfb
#define VD2_BLEND_SRC_CTRL 0x1dfc
+#define VD_BLEND_PREBLD_SRC_VD1 (1 << 0)
+#define VD_BLEND_PREBLD_SRC_VD2 (2 << 0)
+#define VD_BLEND_PREBLD_SRC_OSD1 (3 << 0)
+#define VD_BLEND_PREBLD_SRC_OSD2 (4 << 0)
+#define VD_BLEND_PREBLD_PREMULT_EN BIT(4)
+#define VD_BLEND_POSTBLD_SRC_VD1 (1 << 8)
+#define VD_BLEND_POSTBLD_SRC_VD2 (2 << 8)
+#define VD_BLEND_POSTBLD_SRC_OSD1 (3 << 8)
+#define VD_BLEND_POSTBLD_SRC_OSD2 (4 << 8)
+#define VD_BLEND_POSTBLD_PREMULT_EN BIT(16)
#define OSD1_BLEND_SRC_CTRL 0x1dfd
#define OSD2_BLEND_SRC_CTRL 0x1dfe
+#define OSD_BLEND_POSTBLD_SRC_VD1 (1 << 8)
+#define OSD_BLEND_POSTBLD_SRC_VD2 (2 << 8)
+#define OSD_BLEND_POSTBLD_SRC_OSD1 (3 << 8)
+#define OSD_BLEND_POSTBLD_SRC_OSD2 (4 << 8)
+#define OSD_BLEND_PATH_SEL_ENABLE BIT(20)
#define VPP_POST_BLEND_BLEND_DUMMY_DATA 0x3968
#define VPP_POST_BLEND_DUMMY_ALPHA 0x3969
#define VPP_RDARB_MODE 0x3978
#define VPP_RDARB_REQEN_SLV 0x3979
-#define VPU_RDARB_MODE_L2C1 0x279d
#endif /* __MESON_REGISTERS_H */
diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
index 8abff51f937d..ac491a781952 100644
--- a/drivers/gpu/drm/meson/meson_vclk.c
+++ b/drivers/gpu/drm/meson/meson_vclk.c
@@ -97,6 +97,7 @@
#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */
#define HHI_HDMI_PLL_CNTL 0x320 /* 0xc8 offset in data sheet */
+#define HHI_HDMI_PLL_CNTL_EN BIT(30)
#define HHI_HDMI_PLL_CNTL2 0x324 /* 0xc9 offset in data sheet */
#define HHI_HDMI_PLL_CNTL3 0x328 /* 0xca offset in data sheet */
#define HHI_HDMI_PLL_CNTL4 0x32C /* 0xcb offset in data sheet */
@@ -241,7 +242,7 @@ static void meson_venci_cvbs_clock_config(struct meson_drm *priv)
unsigned int val;
/* Setup PLL to output 1.485GHz */
- if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) {
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) {
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x5800023d);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00404e00);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
@@ -253,8 +254,8 @@ static void meson_venci_cvbs_clock_config(struct meson_drm *priv)
/* Poll for lock bit */
regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL, val,
(val & HDMI_PLL_LOCK), 10, 0);
- } else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) {
+ } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
+ meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL)) {
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x4000027b);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb300);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0xa6212844);
@@ -271,7 +272,7 @@ static void meson_venci_cvbs_clock_config(struct meson_drm *priv)
/* Poll for lock bit */
regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL, val,
(val & HDMI_PLL_LOCK), 10, 0);
- } else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x1a0504f7);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00010000);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x00000000);
@@ -299,7 +300,7 @@ static void meson_venci_cvbs_clock_config(struct meson_drm *priv)
VCLK2_DIV_MASK, (55 - 1));
/* select vid_pll for vclk2 */
- if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
regmap_update_bits(priv->hhi, HHI_VIID_CLK_CNTL,
VCLK2_SEL_MASK, (0 << VCLK2_SEL_SHIFT));
else
@@ -454,7 +455,7 @@ void meson_hdmi_pll_set_params(struct meson_drm *priv, unsigned int m,
{
unsigned int val;
- if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) {
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) {
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x58000200 | m);
if (frac)
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2,
@@ -469,13 +470,13 @@ void meson_hdmi_pll_set_params(struct meson_drm *priv, unsigned int m,
/* Enable and unreset */
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
- 0x7 << 28, 0x4 << 28);
+ 0x7 << 28, HHI_HDMI_PLL_CNTL_EN);
/* Poll for lock bit */
regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
val, (val & HDMI_PLL_LOCK), 10, 0);
- } else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) {
+ } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
+ meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL)) {
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x40000200 | m);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb000 | frac);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
@@ -492,10 +493,11 @@ void meson_hdmi_pll_set_params(struct meson_drm *priv, unsigned int m,
/* Poll for lock bit */
regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL, val,
(val & HDMI_PLL_LOCK), 10, 0);
- } else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x0b3a0400 | m);
/* Enable and reset */
+ /* TODO: add specific macro for g12a here */
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
0x3 << 28, 0x3 << 28);
@@ -543,36 +545,36 @@ void meson_hdmi_pll_set_params(struct meson_drm *priv, unsigned int m,
} while(1);
}
- if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
3 << 16, pll_od_to_reg(od1) << 16);
- else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+ else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
+ meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL3,
3 << 21, pll_od_to_reg(od1) << 21);
- else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
+ else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
3 << 16, pll_od_to_reg(od1) << 16);
- if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
3 << 22, pll_od_to_reg(od2) << 22);
- else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+ else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
+ meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL3,
3 << 23, pll_od_to_reg(od2) << 23);
- else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
+ else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
3 << 18, pll_od_to_reg(od2) << 18);
- if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
3 << 18, pll_od_to_reg(od3) << 18);
- else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+ else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
+ meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL3,
3 << 19, pll_od_to_reg(od3) << 19);
- else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
+ else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
3 << 20, pll_od_to_reg(od3) << 20);
}
@@ -583,7 +585,7 @@ static unsigned int meson_hdmi_pll_get_m(struct meson_drm *priv,
unsigned int pll_freq)
{
/* The GXBB PLL has a /2 pre-multiplier */
- if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB))
pll_freq /= 2;
return pll_freq / XTAL_FREQ;
@@ -603,12 +605,12 @@ static unsigned int meson_hdmi_pll_get_frac(struct meson_drm *priv,
unsigned int frac;
/* The GXBB PLL has a /2 pre-multiplier and a larger FRAC width */
- if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) {
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) {
frac_max = HDMI_FRAC_MAX_GXBB;
parent_freq *= 2;
}
- if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
frac_max = HDMI_FRAC_MAX_G12A;
/* We can have a perfect match !*/
@@ -629,15 +631,15 @@ static bool meson_hdmi_pll_validate_params(struct meson_drm *priv,
unsigned int m,
unsigned int frac)
{
- if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) {
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) {
/* Empiric supported min/max dividers */
if (m < 53 || m > 123)
return false;
if (frac >= HDMI_FRAC_MAX_GXBB)
return false;
- } else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
+ meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL) ||
+ meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
/* Empiric supported min/max dividers */
if (m < 106 || m > 247)
return false;
@@ -757,7 +759,7 @@ static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
/* Set HDMI PLL rate */
if (!od1 && !od2 && !od3) {
meson_hdmi_pll_generic_set(priv, pll_base_freq);
- } else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) {
+ } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) {
switch (pll_base_freq) {
case 2970000:
m = 0x3d;
@@ -774,8 +776,8 @@ static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
}
meson_hdmi_pll_set_params(priv, m, frac, od1, od2, od3);
- } else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) {
+ } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
+ meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL)) {
switch (pll_base_freq) {
case 2970000:
m = 0x7b;
@@ -792,7 +794,7 @@ static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
}
meson_hdmi_pll_set_params(priv, m, frac, od1, od2, od3);
- } else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
switch (pll_base_freq) {
case 2970000:
m = 0x7b;
@@ -970,7 +972,8 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
meson_venci_cvbs_clock_config(priv);
return;
} else if (target == MESON_VCLK_TARGET_DMT) {
- /* The DMT clock path is fixed after the PLL:
+ /*
+ * The DMT clock path is fixed after the PLL:
* - automatic PLL freq + OD management
* - vid_pll_div = VID_PLL_DIV_5
* - vclk_div = 2
diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c
index 3d4791798ae0..4efd7864d5bf 100644
--- a/drivers/gpu/drm/meson/meson_venc.c
+++ b/drivers/gpu/drm/meson/meson_venc.c
@@ -61,9 +61,9 @@
/* HHI Registers */
#define HHI_GCLK_MPEG2 0x148 /* 0x52 offset in data sheet */
#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */
-#define HHI_VDAC_CNTL0_G12A 0x2EC /* 0xbd offset in data sheet */
+#define HHI_VDAC_CNTL0_G12A 0x2EC /* 0xbb offset in data sheet */
#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */
-#define HHI_VDAC_CNTL1_G12A 0x2F0 /* 0xbe offset in data sheet */
+#define HHI_VDAC_CNTL1_G12A 0x2F0 /* 0xbc offset in data sheet */
#define HHI_HDMI_PHY_CNTL0 0x3a0 /* 0xe8 offset in data sheet */
struct meson_cvbs_enci_mode meson_cvbs_enci_pal = {
@@ -192,7 +192,7 @@ union meson_hdmi_venc_mode meson_hdmi_enci_mode_480i = {
.hso_end = 129,
.vso_even = 3,
.vso_odd = 260,
- .macv_max_amp = 0x810b,
+ .macv_max_amp = 0xb,
.video_prog_mode = 0xf0,
.video_mode = 0x8,
.sch_adjust = 0x20,
@@ -212,7 +212,7 @@ union meson_hdmi_venc_mode meson_hdmi_enci_mode_576i = {
.hso_end = 129,
.vso_even = 3,
.vso_odd = 260,
- .macv_max_amp = 8107,
+ .macv_max_amp = 0x7,
.video_prog_mode = 0xff,
.video_mode = 0x13,
.sch_adjust = 0x28,
@@ -976,6 +976,7 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
unsigned int eof_lines;
unsigned int sof_lines;
unsigned int vsync_lines;
+ u32 reg;
/* Use VENCI for 480i and 576i and double HDMI pixels */
if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
@@ -1048,8 +1049,11 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
unsigned int lines_f1;
/* CVBS Filter settings */
- writel_relaxed(0x12, priv->io_base + _REG(ENCI_CFILT_CTRL));
- writel_relaxed(0x12, priv->io_base + _REG(ENCI_CFILT_CTRL2));
+ writel_relaxed(ENCI_CFILT_CMPT_SEL_HIGH | 0x10,
+ priv->io_base + _REG(ENCI_CFILT_CTRL));
+ writel_relaxed(ENCI_CFILT_CMPT_CR_DLY(2) |
+ ENCI_CFILT_CMPT_CB_DLY(1),
+ priv->io_base + _REG(ENCI_CFILT_CTRL2));
/* Digital Video Select : Interlace, clk27 clk, external */
writel_relaxed(0, priv->io_base + _REG(VENC_DVI_SETTING));
@@ -1071,8 +1075,9 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
priv->io_base + _REG(ENCI_SYNC_VSO_ODDLN));
/* Macrovision max amplitude change */
- writel_relaxed(vmode->enci.macv_max_amp,
- priv->io_base + _REG(ENCI_MACV_MAX_AMP));
+ writel_relaxed(ENCI_MACV_MAX_AMP_ENABLE_CHANGE |
+ ENCI_MACV_MAX_AMP_VAL(vmode->enci.macv_max_amp),
+ priv->io_base + _REG(ENCI_MACV_MAX_AMP));
/* Video mode */
writel_relaxed(vmode->enci.video_prog_mode,
@@ -1080,7 +1085,8 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
writel_relaxed(vmode->enci.video_mode,
priv->io_base + _REG(ENCI_VIDEO_MODE));
- /* Advanced Video Mode :
+ /*
+ * Advanced Video Mode :
* Demux shifting 0x2
* Blank line end at line17/22
* High bandwidth Luma Filter
@@ -1088,7 +1094,10 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
* Bypass luma low pass filter
* No macrovision on CSYNC
*/
- writel_relaxed(0x26, priv->io_base + _REG(ENCI_VIDEO_MODE_ADV));
+ writel_relaxed(ENCI_VIDEO_MODE_ADV_DMXMD(2) |
+ ENCI_VIDEO_MODE_ADV_VBICTL_LINE_17_22 |
+ ENCI_VIDEO_MODE_ADV_YBW_HIGH,
+ priv->io_base + _REG(ENCI_VIDEO_MODE_ADV));
writel(vmode->enci.sch_adjust,
priv->io_base + _REG(ENCI_VIDEO_SCH));
@@ -1104,8 +1113,17 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
/* UNreset Interlaced TV Encoder */
writel_relaxed(0, priv->io_base + _REG(ENCI_DBG_PX_RST));
- /* Enable Vfifo2vd, Y_Cb_Y_Cr select */
- writel_relaxed(0x4e01, priv->io_base + _REG(ENCI_VFIFO2VD_CTL));
+ /*
+ * Enable Vfifo2vd and set Y_Cb_Y_Cr:
+ * Corresponding value:
+ * Y => 00 or 10
+ * Cb => 01
+ * Cr => 11
+ * Ex: 0x4e => 01001110 would mean Cb/Y/Cr/Y
+ */
+ writel_relaxed(ENCI_VFIFO2VD_CTL_ENABLE |
+ ENCI_VFIFO2VD_CTL_VD_SEL(0x4e),
+ priv->io_base + _REG(ENCI_VFIFO2VD_CTL));
/* Timings */
writel_relaxed(vmode->enci.pixel_start,
@@ -1127,7 +1145,8 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
meson_vpp_setup_mux(priv, MESON_VIU_VPP_MUX_ENCI);
/* Interlace video enable */
- writel_relaxed(1, priv->io_base + _REG(ENCI_VIDEO_EN));
+ writel_relaxed(ENCI_VIDEO_EN_ENABLE,
+ priv->io_base + _REG(ENCI_VIDEO_EN));
lines_f0 = mode->vtotal >> 1;
lines_f1 = lines_f0 + 1;
@@ -1374,7 +1393,8 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
writel_relaxed(1, priv->io_base + _REG(ENCP_VIDEO_EN));
/* Set DE signal’s polarity is active high */
- writel_bits_relaxed(BIT(14), BIT(14),
+ writel_bits_relaxed(ENCP_VIDEO_MODE_DE_V_HIGH,
+ ENCP_VIDEO_MODE_DE_V_HIGH,
priv->io_base + _REG(ENCP_VIDEO_MODE));
/* Program DE timing */
@@ -1493,13 +1513,39 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
meson_vpp_setup_mux(priv, MESON_VIU_VPP_MUX_ENCP);
}
- writel_relaxed((use_enci ? 1 : 2) |
- (mode->flags & DRM_MODE_FLAG_PHSYNC ? 1 << 2 : 0) |
- (mode->flags & DRM_MODE_FLAG_PVSYNC ? 1 << 3 : 0) |
- 4 << 5 |
- (venc_repeat ? 1 << 8 : 0) |
- (hdmi_repeat ? 1 << 12 : 0),
- priv->io_base + _REG(VPU_HDMI_SETTING));
+ /* Set VPU HDMI setting */
+ /* Select ENCP or ENCI data to HDMI */
+ if (use_enci)
+ reg = VPU_HDMI_ENCI_DATA_TO_HDMI;
+ else
+ reg = VPU_HDMI_ENCP_DATA_TO_HDMI;
+
+ /* Invert polarity of HSYNC from VENC */
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ reg |= VPU_HDMI_INV_HSYNC;
+
+ /* Invert polarity of VSYNC from VENC */
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ reg |= VPU_HDMI_INV_VSYNC;
+
+ /* Output data format: CbYCr */
+ reg |= VPU_HDMI_OUTPUT_CBYCR;
+
+ /*
+ * Write rate to the async FIFO between VENC and HDMI.
+ * One write every 2 wr_clk.
+ */
+ if (venc_repeat)
+ reg |= VPU_HDMI_WR_RATE(2);
+
+ /*
+ * Read rate to the async FIFO between VENC and HDMI.
+ * One read every 2 wr_clk.
+ */
+ if (hdmi_repeat)
+ reg |= VPU_HDMI_RD_RATE(2);
+
+ writel_relaxed(reg, priv->io_base + _REG(VPU_HDMI_SETTING));
priv->venc.hdmi_repeat = hdmi_repeat;
priv->venc.venc_repeat = venc_repeat;
@@ -1512,12 +1558,17 @@ EXPORT_SYMBOL_GPL(meson_venc_hdmi_mode_set);
void meson_venci_cvbs_mode_set(struct meson_drm *priv,
struct meson_cvbs_enci_mode *mode)
{
+ u32 reg;
+
if (mode->mode_tag == priv->venc.current_mode)
return;
/* CVBS Filter settings */
- writel_relaxed(0x12, priv->io_base + _REG(ENCI_CFILT_CTRL));
- writel_relaxed(0x12, priv->io_base + _REG(ENCI_CFILT_CTRL2));
+ writel_relaxed(ENCI_CFILT_CMPT_SEL_HIGH | 0x10,
+ priv->io_base + _REG(ENCI_CFILT_CTRL));
+ writel_relaxed(ENCI_CFILT_CMPT_CR_DLY(2) |
+ ENCI_CFILT_CMPT_CB_DLY(1),
+ priv->io_base + _REG(ENCI_CFILT_CTRL2));
/* Digital Video Select : Interlace, clk27 clk, external */
writel_relaxed(0, priv->io_base + _REG(VENC_DVI_SETTING));
@@ -1539,8 +1590,9 @@ void meson_venci_cvbs_mode_set(struct meson_drm *priv,
priv->io_base + _REG(ENCI_SYNC_VSO_ODDLN));
/* Macrovision max amplitude change */
- writel_relaxed(0x8100 + mode->macv_max_amp,
- priv->io_base + _REG(ENCI_MACV_MAX_AMP));
+ writel_relaxed(ENCI_MACV_MAX_AMP_ENABLE_CHANGE |
+ ENCI_MACV_MAX_AMP_VAL(mode->macv_max_amp),
+ priv->io_base + _REG(ENCI_MACV_MAX_AMP));
/* Video mode */
writel_relaxed(mode->video_prog_mode,
@@ -1548,7 +1600,8 @@ void meson_venci_cvbs_mode_set(struct meson_drm *priv,
writel_relaxed(mode->video_mode,
priv->io_base + _REG(ENCI_VIDEO_MODE));
- /* Advanced Video Mode :
+ /*
+ * Advanced Video Mode :
* Demux shifting 0x2
* Blank line end at line17/22
* High bandwidth Luma Filter
@@ -1556,7 +1609,10 @@ void meson_venci_cvbs_mode_set(struct meson_drm *priv,
* Bypass luma low pass filter
* No macrovision on CSYNC
*/
- writel_relaxed(0x26, priv->io_base + _REG(ENCI_VIDEO_MODE_ADV));
+ writel_relaxed(ENCI_VIDEO_MODE_ADV_DMXMD(2) |
+ ENCI_VIDEO_MODE_ADV_VBICTL_LINE_17_22 |
+ ENCI_VIDEO_MODE_ADV_YBW_HIGH,
+ priv->io_base + _REG(ENCI_VIDEO_MODE_ADV));
writel(mode->sch_adjust, priv->io_base + _REG(ENCI_VIDEO_SCH));
@@ -1588,16 +1644,50 @@ void meson_venci_cvbs_mode_set(struct meson_drm *priv,
/* UNreset Interlaced TV Encoder */
writel_relaxed(0, priv->io_base + _REG(ENCI_DBG_PX_RST));
- /* Enable Vfifo2vd, Y_Cb_Y_Cr select */
- writel_relaxed(0x4e01, priv->io_base + _REG(ENCI_VFIFO2VD_CTL));
+ /*
+ * Enable Vfifo2vd and set Y_Cb_Y_Cr:
+ * Corresponding value:
+ * Y => 00 or 10
+ * Cb => 01
+ * Cr => 11
+ * Ex: 0x4e => 01001110 would mean Cb/Y/Cr/Y
+ */
+ writel_relaxed(ENCI_VFIFO2VD_CTL_ENABLE |
+ ENCI_VFIFO2VD_CTL_VD_SEL(0x4e),
+ priv->io_base + _REG(ENCI_VFIFO2VD_CTL));
/* Power UP Dacs */
writel_relaxed(0, priv->io_base + _REG(VENC_VDAC_SETTING));
/* Video Upsampling */
- writel_relaxed(0x0061, priv->io_base + _REG(VENC_UPSAMPLE_CTRL0));
- writel_relaxed(0x4061, priv->io_base + _REG(VENC_UPSAMPLE_CTRL1));
- writel_relaxed(0x5061, priv->io_base + _REG(VENC_UPSAMPLE_CTRL2));
+ /*
+ * CTRL0, CTRL1 and CTRL2:
+ * Filter0: input data sample every 2 cloks
+ * Filter1: filtering and upsample enable
+ */
+ reg = VENC_UPSAMPLE_CTRL_F0_2_CLK_RATIO | VENC_UPSAMPLE_CTRL_F1_EN |
+ VENC_UPSAMPLE_CTRL_F1_UPSAMPLE_EN;
+
+ /*
+ * Upsample CTRL0:
+ * Interlace High Bandwidth Luma
+ */
+ writel_relaxed(VENC_UPSAMPLE_CTRL_INTERLACE_HIGH_LUMA | reg,
+ priv->io_base + _REG(VENC_UPSAMPLE_CTRL0));
+
+ /*
+ * Upsample CTRL1:
+ * Interlace Pb
+ */
+ writel_relaxed(VENC_UPSAMPLE_CTRL_INTERLACE_PB | reg,
+ priv->io_base + _REG(VENC_UPSAMPLE_CTRL1));
+
+ /*
+ * Upsample CTRL2:
+ * Interlace R
+ */
+ writel_relaxed(VENC_UPSAMPLE_CTRL_INTERLACE_PR | reg,
+ priv->io_base + _REG(VENC_UPSAMPLE_CTRL2));
/* Select Interlace Y DACs */
writel_relaxed(0, priv->io_base + _REG(VENC_VDAC_DACSEL0));
@@ -1611,14 +1701,16 @@ void meson_venci_cvbs_mode_set(struct meson_drm *priv,
meson_vpp_setup_mux(priv, MESON_VIU_VPP_MUX_ENCI);
/* Enable ENCI FIFO */
- writel_relaxed(0x2000, priv->io_base + _REG(VENC_VDAC_FIFO_CTRL));
+ writel_relaxed(VENC_VDAC_FIFO_EN_ENCI_ENABLE,
+ priv->io_base + _REG(VENC_VDAC_FIFO_CTRL));
/* Select ENCI DACs 0, 1, 4, and 5 */
writel_relaxed(0x11, priv->io_base + _REG(ENCI_DACSEL_0));
writel_relaxed(0x11, priv->io_base + _REG(ENCI_DACSEL_1));
/* Interlace video enable */
- writel_relaxed(1, priv->io_base + _REG(ENCI_VIDEO_EN));
+ writel_relaxed(ENCI_VIDEO_EN_ENABLE,
+ priv->io_base + _REG(ENCI_VIDEO_EN));
/* Configure Video Saturation / Contrast / Brightness / Hue */
writel_relaxed(mode->video_saturation,
@@ -1631,7 +1723,8 @@ void meson_venci_cvbs_mode_set(struct meson_drm *priv,
priv->io_base + _REG(ENCI_VIDEO_HUE));
/* Enable DAC0 Filter */
- writel_relaxed(0x1, priv->io_base + _REG(VENC_VDAC_DAC0_FILT_CTRL0));
+ writel_relaxed(VENC_VDAC_DAC0_FILT_CTRL0_EN,
+ priv->io_base + _REG(VENC_VDAC_DAC0_FILT_CTRL0));
writel_relaxed(0xfc48, priv->io_base + _REG(VENC_VDAC_DAC0_FILT_CTRL1));
/* 0 in Macrovision register 0 */
@@ -1652,7 +1745,8 @@ unsigned int meson_venci_get_field(struct meson_drm *priv)
void meson_venc_enable_vsync(struct meson_drm *priv)
{
- writel_relaxed(2, priv->io_base + _REG(VENC_INTCTRL));
+ writel_relaxed(VENC_INTCTRL_ENCI_LNRST_INT_EN,
+ priv->io_base + _REG(VENC_INTCTRL));
regmap_update_bits(priv->hhi, HHI_GCLK_MPEG2, BIT(25), BIT(25));
}
@@ -1665,7 +1759,7 @@ void meson_venc_disable_vsync(struct meson_drm *priv)
void meson_venc_init(struct meson_drm *priv)
{
/* Disable CVBS VDAC */
- if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
regmap_write(priv->hhi, HHI_VDAC_CNTL0_G12A, 0);
regmap_write(priv->hhi, HHI_VDAC_CNTL1_G12A, 8);
} else {
@@ -1680,7 +1774,8 @@ void meson_venc_init(struct meson_drm *priv)
regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0);
/* Disable HDMI */
- writel_bits_relaxed(0x3, 0,
+ writel_bits_relaxed(VPU_HDMI_ENCI_DATA_TO_HDMI |
+ VPU_HDMI_ENCP_DATA_TO_HDMI, 0,
priv->io_base + _REG(VPU_HDMI_SETTING));
/* Disable all encoders */
diff --git a/drivers/gpu/drm/meson/meson_venc_cvbs.c b/drivers/gpu/drm/meson/meson_venc_cvbs.c
index 45a467f10b9b..9ab27aecfcf3 100644
--- a/drivers/gpu/drm/meson/meson_venc_cvbs.c
+++ b/drivers/gpu/drm/meson/meson_venc_cvbs.c
@@ -155,7 +155,7 @@ static void meson_venc_cvbs_encoder_disable(struct drm_encoder *encoder)
struct meson_drm *priv = meson_venc_cvbs->priv;
/* Disable CVBS VDAC */
- if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
regmap_write(priv->hhi, HHI_VDAC_CNTL0_G12A, 0);
regmap_write(priv->hhi, HHI_VDAC_CNTL1_G12A, 0);
} else {
@@ -171,16 +171,17 @@ static void meson_venc_cvbs_encoder_enable(struct drm_encoder *encoder)
struct meson_drm *priv = meson_venc_cvbs->priv;
/* VDAC0 source is not from ATV */
- writel_bits_relaxed(BIT(5), 0, priv->io_base + _REG(VENC_VDAC_DACSEL0));
+ writel_bits_relaxed(VENC_VDAC_SEL_ATV_DMD, 0,
+ priv->io_base + _REG(VENC_VDAC_DACSEL0));
- if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) {
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) {
regmap_write(priv->hhi, HHI_VDAC_CNTL0, 1);
regmap_write(priv->hhi, HHI_VDAC_CNTL1, 0);
- } else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) {
+ } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
+ meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL)) {
regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0xf0001);
regmap_write(priv->hhi, HHI_VDAC_CNTL1, 0);
- } else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
regmap_write(priv->hhi, HHI_VDAC_CNTL0_G12A, 0x906001);
regmap_write(priv->hhi, HHI_VDAC_CNTL1_G12A, 0);
}
diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c
index 9f8a450d50d5..68cf2c2eca5f 100644
--- a/drivers/gpu/drm/meson/meson_viu.c
+++ b/drivers/gpu/drm/meson/meson_viu.c
@@ -320,9 +320,9 @@ void meson_viu_osd1_reset(struct meson_drm *priv)
priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
/* Reset OSD1 */
- writel_bits_relaxed(BIT(0), BIT(0),
+ writel_bits_relaxed(VIU_SW_RESET_OSD1, VIU_SW_RESET_OSD1,
priv->io_base + _REG(VIU_SW_RESET));
- writel_bits_relaxed(BIT(0), 0,
+ writel_bits_relaxed(VIU_SW_RESET_OSD1, 0,
priv->io_base + _REG(VIU_SW_RESET));
/* Rewrite these registers state lost in the reset */
@@ -335,38 +335,43 @@ void meson_viu_osd1_reset(struct meson_drm *priv)
meson_viu_load_matrix(priv);
}
+static inline uint32_t meson_viu_osd_burst_length_reg(uint32_t length)
+{
+ uint32_t val = (((length & 0x80) % 24) / 12);
+
+ return (((val & 0x3) << 10) | (((val & 0x4) >> 2) << 31));
+}
+
void meson_viu_init(struct meson_drm *priv)
{
uint32_t reg;
/* Disable OSDs */
- writel_bits_relaxed(BIT(0) | BIT(21), 0,
- priv->io_base + _REG(VIU_OSD1_CTRL_STAT));
- writel_bits_relaxed(BIT(0) | BIT(21), 0,
- priv->io_base + _REG(VIU_OSD2_CTRL_STAT));
+ writel_bits_relaxed(VIU_OSD1_OSD_BLK_ENABLE | VIU_OSD1_OSD_ENABLE, 0,
+ priv->io_base + _REG(VIU_OSD1_CTRL_STAT));
+ writel_bits_relaxed(VIU_OSD1_OSD_BLK_ENABLE | VIU_OSD1_OSD_ENABLE, 0,
+ priv->io_base + _REG(VIU_OSD2_CTRL_STAT));
/* On GXL/GXM, Use the 10bit HDR conversion matrix */
- if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
+ meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL))
meson_viu_load_matrix(priv);
- else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
+ else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
meson_viu_set_g12a_osd1_matrix(priv, RGB709_to_YUV709l_coeff,
true);
/* Initialize OSD1 fifo control register */
- reg = BIT(0) | /* Urgent DDR request priority */
- (4 << 5); /* hold_fifo_lines */
- if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
- reg |= (1 << 10) | /* burst length 32 */
- (32 << 12) | /* fifo_depth_val: 32*8=256 */
- (2 << 22) | /* 4 words in 1 burst */
- (2 << 24) |
- (1 << 31);
+ reg = VIU_OSD_DDR_PRIORITY_URGENT |
+ VIU_OSD_HOLD_FIFO_LINES(4) |
+ VIU_OSD_FIFO_DEPTH_VAL(32) | /* fifo_depth_val: 32*8=256 */
+ VIU_OSD_WORDS_PER_BURST(4) | /* 4 words in 1 burst */
+ VIU_OSD_FIFO_LIMITS(2); /* fifo_lim: 2*16=32 */
+
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
+ reg |= meson_viu_osd_burst_length_reg(32);
else
- reg |= (3 << 10) | /* burst length 64 */
- (32 << 12) | /* fifo_depth_val: 32*8=256 */
- (2 << 22) | /* 4 words in 1 burst */
- (2 << 24);
+ reg |= meson_viu_osd_burst_length_reg(64);
+
writel_relaxed(reg, priv->io_base + _REG(VIU_OSD1_FIFO_CTRL_STAT));
writel_relaxed(reg, priv->io_base + _REG(VIU_OSD2_FIFO_CTRL_STAT));
@@ -379,12 +384,9 @@ void meson_viu_init(struct meson_drm *priv)
priv->io_base + _REG(VIU_OSD2_CTRL_STAT2));
/* Disable VD1 AFBC */
- /* di_mif0_en=0 mif0_to_vpp_en=0 di_mad_en=0 */
- writel_bits_relaxed(0x7 << 16, 0,
- priv->io_base + _REG(VIU_MISC_CTRL0));
- /* afbc vd1 set=0 */
- writel_bits_relaxed(BIT(20), 0,
- priv->io_base + _REG(VIU_MISC_CTRL0));
+ /* di_mif0_en=0 mif0_to_vpp_en=0 di_mad_en=0 and afbc vd1 set=0*/
+ writel_bits_relaxed(VIU_CTRL0_VD1_AFBC_MASK, 0,
+ priv->io_base + _REG(VIU_MISC_CTRL0));
writel_relaxed(0, priv->io_base + _REG(AFBC_ENABLE));
writel_relaxed(0x00FF00C0,
@@ -392,28 +394,32 @@ void meson_viu_init(struct meson_drm *priv)
writel_relaxed(0x00FF00C0,
priv->io_base + _REG(VD2_IF0_LUMA_FIFO_SIZE));
- if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
- writel_relaxed(4 << 29 |
- 1 << 27 |
- 1 << 26 | /* blend_din0 input to blend0 */
- 1 << 25 | /* blend1_dout to blend2 */
- 1 << 24 | /* blend1_din3 input to blend1 */
- 1 << 20 |
- 0 << 16 |
- 1,
- priv->io_base + _REG(VIU_OSD_BLEND_CTRL));
- writel_relaxed(1 << 20,
- priv->io_base + _REG(OSD1_BLEND_SRC_CTRL));
- writel_relaxed(1 << 20,
- priv->io_base + _REG(OSD2_BLEND_SRC_CTRL));
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
+ writel_relaxed(VIU_OSD_BLEND_REORDER(0, 1) |
+ VIU_OSD_BLEND_REORDER(1, 0) |
+ VIU_OSD_BLEND_REORDER(2, 0) |
+ VIU_OSD_BLEND_REORDER(3, 0) |
+ VIU_OSD_BLEND_DIN_EN(1) |
+ VIU_OSD_BLEND1_DIN3_BYPASS_TO_DOUT1 |
+ VIU_OSD_BLEND1_DOUT_BYPASS_TO_BLEND2 |
+ VIU_OSD_BLEND_DIN0_BYPASS_TO_DOUT0 |
+ VIU_OSD_BLEND_BLEN2_PREMULT_EN(1) |
+ VIU_OSD_BLEND_HOLD_LINES(4),
+ priv->io_base + _REG(VIU_OSD_BLEND_CTRL));
+
+ writel_relaxed(OSD_BLEND_PATH_SEL_ENABLE,
+ priv->io_base + _REG(OSD1_BLEND_SRC_CTRL));
+ writel_relaxed(OSD_BLEND_PATH_SEL_ENABLE,
+ priv->io_base + _REG(OSD2_BLEND_SRC_CTRL));
writel_relaxed(0, priv->io_base + _REG(VD1_BLEND_SRC_CTRL));
writel_relaxed(0, priv->io_base + _REG(VD2_BLEND_SRC_CTRL));
writel_relaxed(0,
priv->io_base + _REG(VIU_OSD_BLEND_DUMMY_DATA0));
writel_relaxed(0,
priv->io_base + _REG(VIU_OSD_BLEND_DUMMY_ALPHA));
- writel_bits_relaxed(0x3 << 2, 0x3 << 2,
- priv->io_base + _REG(DOLBY_PATH_CTRL));
+
+ writel_bits_relaxed(DOLBY_BYPASS_EN(0xc), DOLBY_BYPASS_EN(0xc),
+ priv->io_base + _REG(DOLBY_PATH_CTRL));
}
priv->viu.osd1_enabled = false;
diff --git a/drivers/gpu/drm/meson/meson_vpp.c b/drivers/gpu/drm/meson/meson_vpp.c
index cbe6cf46e541..154837688ab0 100644
--- a/drivers/gpu/drm/meson/meson_vpp.c
+++ b/drivers/gpu/drm/meson/meson_vpp.c
@@ -56,7 +56,7 @@ static void meson_vpp_write_scaling_filter_coefs(struct meson_drm *priv,
{
int i;
- writel_relaxed(is_horizontal ? BIT(8) : 0,
+ writel_relaxed(is_horizontal ? VPP_SCALE_HORIZONTAL_COEF : 0,
priv->io_base + _REG(VPP_OSD_SCALE_COEF_IDX));
for (i = 0; i < 33; i++)
writel_relaxed(coefs[i],
@@ -81,7 +81,7 @@ static void meson_vpp_write_vd_scaling_filter_coefs(struct meson_drm *priv,
{
int i;
- writel_relaxed(is_horizontal ? BIT(8) : 0,
+ writel_relaxed(is_horizontal ? VPP_SCALE_HORIZONTAL_COEF : 0,
priv->io_base + _REG(VPP_SCALE_COEF_IDX));
for (i = 0; i < 33; i++)
writel_relaxed(coefs[i],
@@ -91,27 +91,29 @@ static void meson_vpp_write_vd_scaling_filter_coefs(struct meson_drm *priv,
void meson_vpp_init(struct meson_drm *priv)
{
/* set dummy data default YUV black */
- if (meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL))
writel_relaxed(0x108080, priv->io_base + _REG(VPP_DUMMY_DATA1));
- else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu")) {
+ else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM)) {
writel_bits_relaxed(0xff << 16, 0xff << 16,
priv->io_base + _REG(VIU_MISC_CTRL1));
- writel_relaxed(0x20000, priv->io_base + _REG(VPP_DOLBY_CTRL));
+ writel_relaxed(VPP_PPS_DUMMY_DATA_MODE,
+ priv->io_base + _REG(VPP_DOLBY_CTRL));
writel_relaxed(0x1020080,
priv->io_base + _REG(VPP_DUMMY_DATA1));
- } else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
+ } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
writel_relaxed(0xf, priv->io_base + _REG(DOLBY_PATH_CTRL));
/* Initialize vpu fifo control registers */
- if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
- writel_relaxed(0xfff << 20 | 0x1000,
+ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
+ writel_relaxed(VPP_OFIFO_SIZE_DEFAULT,
priv->io_base + _REG(VPP_OFIFO_SIZE));
else
- writel_relaxed(readl_relaxed(priv->io_base + _REG(VPP_OFIFO_SIZE)) |
- 0x77f, priv->io_base + _REG(VPP_OFIFO_SIZE));
- writel_relaxed(0x08080808, priv->io_base + _REG(VPP_HOLD_LINES));
+ writel_bits_relaxed(VPP_OFIFO_SIZE_MASK, 0x77f,
+ priv->io_base + _REG(VPP_OFIFO_SIZE));
+ writel_relaxed(VPP_POSTBLEND_HOLD_LINES(4) | VPP_PREBLEND_HOLD_LINES(4),
+ priv->io_base + _REG(VPP_HOLD_LINES));
- if (!meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ if (!meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
/* Turn off preblend */
writel_bits_relaxed(VPP_PREBLEND_ENABLE, 0,
priv->io_base + _REG(VPP_MISC));
@@ -137,10 +139,15 @@ void meson_vpp_init(struct meson_drm *priv)
writel_relaxed(0, priv->io_base + _REG(VPP_OSD_SC_CTRL0));
writel_relaxed(0, priv->io_base + _REG(VPP_OSD_VSC_CTRL0));
writel_relaxed(0, priv->io_base + _REG(VPP_OSD_HSC_CTRL0));
- writel_relaxed(4 | (4 << 8) | BIT(15),
+
+ /* Set horizontal/vertical bank length and enable video scale out */
+ writel_relaxed(VPP_VSC_BANK_LENGTH(4) | VPP_HSC_BANK_LENGTH(4) |
+ VPP_SC_VD_EN_ENABLE,
priv->io_base + _REG(VPP_SC_MISC));
- writel_relaxed(1, priv->io_base + _REG(VPP_VADJ_CTRL));
+ /* Enable minus black level for vadj1 */
+ writel_relaxed(VPP_MINUS_BLACK_LVL_VADJ1_ENABLE,
+ priv->io_base + _REG(VPP_VADJ_CTRL));
/* Write in the proper filter coefficients. */
meson_vpp_write_scaling_filter_coefs(priv,
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 8cc70026c358..0c2a1252c8be 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -700,13 +700,13 @@ void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
int msm_gem_sync_object(struct drm_gem_object *obj,
struct msm_fence_context *fctx, bool exclusive)
{
- struct reservation_object_list *fobj;
+ struct dma_resv_list *fobj;
struct dma_fence *fence;
int i, ret;
- fobj = reservation_object_get_list(obj->resv);
+ fobj = dma_resv_get_list(obj->resv);
if (!fobj || (fobj->shared_count == 0)) {
- fence = reservation_object_get_excl(obj->resv);
+ fence = dma_resv_get_excl(obj->resv);
/* don't need to wait on our own fences, since ring is fifo */
if (fence && (fence->context != fctx->context)) {
ret = dma_fence_wait(fence, true);
@@ -720,7 +720,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
for (i = 0; i < fobj->shared_count; i++) {
fence = rcu_dereference_protected(fobj->shared[i],
- reservation_object_held(obj->resv));
+ dma_resv_held(obj->resv));
if (fence->context != fctx->context) {
ret = dma_fence_wait(fence, true);
if (ret)
@@ -738,9 +738,9 @@ void msm_gem_move_to_active(struct drm_gem_object *obj,
WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
msm_obj->gpu = gpu;
if (exclusive)
- reservation_object_add_excl_fence(obj->resv, fence);
+ dma_resv_add_excl_fence(obj->resv, fence);
else
- reservation_object_add_shared_fence(obj->resv, fence);
+ dma_resv_add_shared_fence(obj->resv, fence);
list_del_init(&msm_obj->mm_list);
list_add_tail(&msm_obj->mm_list, &gpu->active_list);
}
@@ -765,7 +765,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
long ret;
- ret = reservation_object_wait_timeout_rcu(obj->resv, write,
+ ret = dma_resv_wait_timeout_rcu(obj->resv, write,
true, remain);
if (ret == 0)
return remain == 0 ? -EBUSY : -ETIMEDOUT;
@@ -797,8 +797,8 @@ static void describe_fence(struct dma_fence *fence, const char *type,
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct reservation_object *robj = obj->resv;
- struct reservation_object_list *fobj;
+ struct dma_resv *robj = obj->resv;
+ struct dma_resv_list *fobj;
struct dma_fence *fence;
struct msm_gem_vma *vma;
uint64_t off = drm_vma_node_start(&obj->vma_node);
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 8cfcf8f09e3e..9e0953c2b7ce 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -8,7 +8,7 @@
#define __MSM_GEM_H__
#include <linux/kref.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
#include "msm_drv.h"
/* Additional internal-use only BO flags: */
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 348f8c2be806..2e1556b7af26 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -225,7 +225,7 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
* strange place to call it. OTOH this is a
* convenient can-fail point to hook it in.
*/
- ret = reservation_object_reserve_shared(msm_obj->base.resv,
+ ret = dma_resv_reserve_shared(msm_obj->base.resv,
1);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index 878ef6822812..e8506335cd15 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -17,7 +17,7 @@
#include <linux/of_graph.h>
#include <linux/of_reserved_mem.h>
#include <linux/pm_runtime.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
#include <linux/spinlock.h>
#include <drm/drm_atomic.h>
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_out.c b/drivers/gpu/drm/mxsfb/mxsfb_out.c
index 231d016c6f47..be36f4d6cc96 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_out.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_out.c
@@ -30,7 +30,7 @@ static int mxsfb_panel_get_modes(struct drm_connector *connector)
drm_connector_to_mxsfb_drm_private(connector);
if (mxsfb->panel)
- return mxsfb->panel->funcs->get_modes(mxsfb->panel);
+ return drm_panel_get_modes(mxsfb->panel);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/arb.c b/drivers/gpu/drm/nouveau/dispnv04/arb.c
index c79160c37f84..362495535e69 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/arb.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/arb.c
@@ -21,8 +21,6 @@
* SOFTWARE.
*/
-#include <drm/drmP.h>
-
#include "nouveau_drv.h"
#include "nouveau_reg.h"
#include "hw.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index f22f01020625..37c50ea8f847 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -22,11 +22,10 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
-#include <linux/pm_runtime.h>
-
-#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_vblank.h>
#include "nouveau_drv.h"
#include "nouveau_reg.h"
@@ -1031,53 +1030,6 @@ nv04_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
return 0;
}
-static int
-nouveau_crtc_set_config(struct drm_mode_set *set,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct drm_device *dev;
- struct nouveau_drm *drm;
- int ret;
- struct drm_crtc *crtc;
- bool active = false;
- if (!set || !set->crtc)
- return -EINVAL;
-
- dev = set->crtc->dev;
-
- /* get a pm reference here */
- ret = pm_runtime_get_sync(dev->dev);
- if (ret < 0 && ret != -EACCES)
- return ret;
-
- ret = drm_crtc_helper_set_config(set, ctx);
-
- drm = nouveau_drm(dev);
-
- /* if we get here with no crtcs active then we can drop a reference */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (crtc->enabled)
- active = true;
- }
-
- pm_runtime_mark_last_busy(dev->dev);
- /* if we have active crtcs and we don't have a power ref,
- take the current one */
- if (active && !drm->have_disp_power_ref) {
- drm->have_disp_power_ref = true;
- return ret;
- }
- /* if we have no active crtcs, then drop the power ref
- we got before */
- if (!active && drm->have_disp_power_ref) {
- pm_runtime_put_autosuspend(dev->dev);
- drm->have_disp_power_ref = false;
- }
- /* drop the power reference we got coming in here */
- pm_runtime_put_autosuspend(dev->dev);
- return ret;
-}
-
struct nv04_page_flip_state {
struct list_head head;
struct drm_pending_vblank_event *event;
@@ -1293,7 +1245,7 @@ static const struct drm_crtc_funcs nv04_crtc_funcs = {
.cursor_set = nv04_crtc_cursor_set,
.cursor_move = nv04_crtc_cursor_move,
.gamma_set = nv_crtc_gamma_set,
- .set_config = nouveau_crtc_set_config,
+ .set_config = drm_crtc_helper_set_config,
.page_flip = nv04_crtc_page_flip,
.destroy = nv_crtc_destroy,
};
diff --git a/drivers/gpu/drm/nouveau/dispnv04/cursor.c b/drivers/gpu/drm/nouveau/dispnv04/cursor.c
index 16e09f6b9113..4c6440d29c3f 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/cursor.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/cursor.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: MIT
-#include <drm/drmP.h>
#include <drm/drm_mode.h>
#include "nouveau_drv.h"
#include "nouveau_reg.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c
index e7af95d37ddb..e8eef88a8382 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dac.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c
@@ -24,7 +24,6 @@
* DEALINGS IN THE SOFTWARE.
*/
-#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include "nouveau_drv.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index 73d41abbb510..f9f4482c79b5 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -24,8 +24,8 @@
* DEALINGS IN THE SOFTWARE.
*/
-#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fourcc.h>
#include "nouveau_drv.h"
#include "nouveau_reg.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 5713bacaee80..dc64863b5fd8 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -22,7 +22,6 @@
* Author: Ben Skeggs
*/
-#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include "nouveau_drv.h"
@@ -210,7 +209,7 @@ nv04_display_create(struct drm_device *dev)
nouveau_display(dev)->fini = nv04_display_fini;
/* Pre-nv50 doesn't support atomic, so don't expose the ioctls */
- dev->driver->driver_features &= ~DRIVER_ATOMIC;
+ dev->driver_features &= ~DRIVER_ATOMIC;
/* Request page flip completion event. */
if (drm->nvsw.client) {
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index 6ccfc09bcf0f..495d3284e876 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -161,7 +161,6 @@ nv_match_device(struct drm_device *dev, unsigned device,
dev->pdev->subsystem_device == sub_device;
}
-#include <subdev/bios.h>
#include <subdev/bios/init.h>
static inline void
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
index 0c9bdf023f5b..3fdfafa8b0ad 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
@@ -22,7 +22,6 @@
* SOFTWARE.
*/
-#include <drm/drmP.h>
#include "nouveau_drv.h"
#include "hw.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.h b/drivers/gpu/drm/nouveau/dispnv04/hw.h
index 3a2be47fb4f1..6987e1766cd2 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.h
@@ -23,7 +23,6 @@
#ifndef __NOUVEAU_HW_H__
#define __NOUVEAU_HW_H__
-#include <drm/drmP.h>
#include "disp.h"
#include "nvreg.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
index df4358e31075..a3a0a73ae8ab 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -23,7 +23,6 @@
* written by Arthur Huillet.
*/
-#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c
index 2b83b2c39d1d..2f6d2b6711ab 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c
@@ -24,7 +24,6 @@
*
*/
-#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include "nouveau_drv.h"
#include "nouveau_encoder.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index de4490b4ed30..b701a4d8fe76 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -24,7 +24,6 @@
*
*/
-#include <drm/drmP.h>
#include "nouveau_drv.h"
#include "nouveau_reg.h"
#include "nouveau_encoder.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index 26fd71c06626..03466f04c741 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -24,7 +24,6 @@
*
*/
-#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_probe_helper.h>
#include "nouveau_drv.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv50/atom.h b/drivers/gpu/drm/nouveau/dispnv50/atom.h
index b5fae5ab3fa8..43df86c38f58 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/atom.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/atom.h
@@ -185,6 +185,11 @@ struct nv50_wndw_atom {
} xlut;
struct {
+ u32 matrix[12];
+ bool valid;
+ } csc;
+
+ struct {
u8 mode:2;
u8 interval:4;
@@ -216,14 +221,23 @@ struct nv50_wndw_atom {
u16 y;
} point;
+ struct {
+ u8 depth;
+ u8 k1;
+ u8 src_color:4;
+ u8 dst_color:4;
+ } blend;
+
union nv50_wndw_atom_mask {
struct {
bool ntfy:1;
bool sema:1;
bool xlut:1;
+ bool csc:1;
bool image:1;
bool scale:1;
bool point:1;
+ bool blend:1;
};
u8 mask;
} set, clr;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base507c.c b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
index d5e295ca2caa..00a85f1e1a4a 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base507c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
@@ -25,7 +25,9 @@
#include <nvif/event.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
+
#include "nouveau_bo.h"
void
@@ -56,12 +58,21 @@ static void
base507c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
u32 *push;
- if ((push = evo_wait(&wndw->wndw, 10))) {
+ if ((push = evo_wait(&wndw->wndw, 13))) {
evo_mthd(push, 0x0084, 1);
evo_data(push, asyw->image.mode << 8 |
asyw->image.interval << 4);
evo_mthd(push, 0x00c0, 1);
evo_data(push, asyw->image.handle[0]);
+ if (asyw->image.format == 0xca) {
+ evo_mthd(push, 0x0110, 2);
+ evo_data(push, 1);
+ evo_data(push, 0x6400);
+ } else {
+ evo_mthd(push, 0x0110, 2);
+ evo_data(push, 0);
+ evo_data(push, 0);
+ }
evo_mthd(push, 0x0800, 5);
evo_data(push, asyw->image.offset[0] >> 8);
evo_data(push, 0x00000000);
@@ -179,9 +190,6 @@ base507c_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
const struct drm_framebuffer *fb = asyw->state.fb;
int ret;
- if (!fb->format->depth)
- return -EINVAL;
-
ret = drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
@@ -200,6 +208,14 @@ base507c_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
asyh->base.y = asyw->state.src.y1 >> 16;
asyh->base.w = asyw->state.fb->width;
asyh->base.h = asyw->state.fb->height;
+
+ /* Some newer formats, esp FP16 ones, don't have a
+ * "depth". There's nothing that really makes sense there
+ * either, so just set it to the implicit bit count.
+ */
+ if (!asyh->base.depth)
+ asyh->base.depth = asyh->base.cpp * 8;
+
return 0;
}
@@ -215,6 +231,8 @@ base507c_format[] = {
DRM_FORMAT_ABGR2101010,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XBGR16161616F,
+ DRM_FORMAT_ABGR16161616F,
0
};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base827c.c b/drivers/gpu/drm/nouveau/dispnv50/base827c.c
index 73646819a0d6..f4c05949dd62 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base827c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base827c.c
@@ -25,12 +25,21 @@ static void
base827c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
u32 *push;
- if ((push = evo_wait(&wndw->wndw, 10))) {
+ if ((push = evo_wait(&wndw->wndw, 13))) {
evo_mthd(push, 0x0084, 1);
evo_data(push, asyw->image.mode << 8 |
asyw->image.interval << 4);
evo_mthd(push, 0x00c0, 1);
evo_data(push, asyw->image.handle[0]);
+ if (asyw->image.format == 0xca) {
+ evo_mthd(push, 0x0110, 2);
+ evo_data(push, 1);
+ evo_data(push, 0x6400);
+ } else {
+ evo_mthd(push, 0x0110, 2);
+ evo_data(push, 0);
+ evo_data(push, 0);
+ }
evo_mthd(push, 0x0800, 5);
evo_data(push, asyw->image.offset[0] >> 8);
evo_data(push, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base907c.c b/drivers/gpu/drm/nouveau/dispnv50/base907c.c
index 049ce6da321c..5f2de77e0f32 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base907c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base907c.c
@@ -83,6 +83,68 @@ base907c_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
asyw->xlut.i.load = head907d_olut_load;
}
+static inline u32
+csc_drm_to_base(u64 in)
+{
+ /* base takes a 19-bit 2's complement value in S3.16 format */
+ bool sign = in & BIT_ULL(63);
+ u32 integer = (in >> 32) & 0x7fffffff;
+ u32 fraction = in & 0xffffffff;
+
+ if (integer >= 4) {
+ return (1 << 18) - (sign ? 0 : 1);
+ } else {
+ u32 ret = (integer << 16) | (fraction >> 16);
+ if (sign)
+ ret = -ret;
+ return ret & GENMASK(18, 0);
+ }
+}
+
+void
+base907c_csc(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
+ const struct drm_color_ctm *ctm)
+{
+ int i, j;
+
+ for (j = 0; j < 3; j++) {
+ for (i = 0; i < 4; i++) {
+ u32 *val = &asyw->csc.matrix[j * 4 + i];
+ /* DRM does not support constant offset, while
+ * HW CSC does. Skip it. */
+ if (i == 3) {
+ *val = 0;
+ } else {
+ *val = csc_drm_to_base(ctm->matrix[j * 3 + i]);
+ }
+ }
+ }
+}
+
+static void
+base907c_csc_clr(struct nv50_wndw *wndw)
+{
+ u32 *push;
+ if ((push = evo_wait(&wndw->wndw, 2))) {
+ evo_mthd(push, 0x0140, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, &wndw->wndw);
+ }
+}
+
+static void
+base907c_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ u32 *push, i;
+ if ((push = evo_wait(&wndw->wndw, 13))) {
+ evo_mthd(push, 0x0140, 12);
+ evo_data(push, asyw->csc.matrix[0] | 0x80000000);
+ for (i = 1; i < 12; i++)
+ evo_data(push, asyw->csc.matrix[i]);
+ evo_kick(push, &wndw->wndw);
+ }
+}
+
const struct nv50_wndw_func
base907c = {
.acquire = base507c_acquire,
@@ -94,6 +156,9 @@ base907c = {
.ntfy_clr = base507c_ntfy_clr,
.ntfy_wait_begun = base507c_ntfy_wait_begun,
.ilut = base907c_ilut,
+ .csc = base907c_csc,
+ .csc_set = base907c_csc_set,
+ .csc_clr = base907c_csc_clr,
.olut_core = true,
.xlut_set = base907c_xlut_set,
.xlut_clr = base907c_xlut_clr,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base917c.c b/drivers/gpu/drm/nouveau/dispnv50/base917c.c
index 54d705bb81a5..a1baed4fe0e9 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base917c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base917c.c
@@ -36,6 +36,8 @@ base917c_format[] = {
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_XBGR16161616F,
+ DRM_FORMAT_ABGR16161616F,
0
};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
index 7860774b65bc..40d9b654ab8c 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
@@ -82,7 +82,7 @@ corec37d_init(struct nv50_core *core)
for (i = 0; i < windows; i++) {
evo_mthd(push, 0x1000 + (i * 0x080), 3);
evo_data(push, i >> 1);
- evo_data(push, 0x00000017);
+ evo_data(push, 0x0000001f);
evo_data(push, 0x00000000);
evo_mthd(push, 0x1010 + (i * 0x080), 1);
evo_data(push, 0x00127fff);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 126703816794..f1dbc7852414 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -30,14 +30,14 @@
#include <linux/dma-mapping.h>
#include <linux/hdmi.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_dp_helper.h>
+#include <drm/drm_edid.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_scdc_helper.h>
-#include <drm/drm_edid.h>
+#include <drm/drm_vblank.h>
#include <nvif/class.h>
#include <nvif/cl0002.h>
@@ -1826,8 +1826,11 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
NV_ATOMIC(drm, "%s: clr %04x (set %04x)\n", crtc->name,
asyh->clr.mask, asyh->set.mask);
- if (old_crtc_state->active && !new_crtc_state->active)
+
+ if (old_crtc_state->active && !new_crtc_state->active) {
+ pm_runtime_put_noidle(dev->dev);
drm_crtc_vblank_off(crtc);
+ }
if (asyh->clr.mask) {
nv50_head_flush_clr(head, asyh, atom->flush_disable);
@@ -1913,8 +1916,10 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
}
if (new_crtc_state->active) {
- if (!old_crtc_state->active)
+ if (!old_crtc_state->active) {
drm_crtc_vblank_on(crtc);
+ pm_runtime_get_noresume(dev->dev);
+ }
if (new_crtc_state->event)
drm_crtc_vblank_get(crtc);
}
@@ -1979,6 +1984,10 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_cleanup_planes(dev, state);
drm_atomic_helper_commit_cleanup_done(state);
drm_atomic_state_put(state);
+
+ /* Drop the RPM ref we got from nv50_disp_atomic_commit() */
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
}
static void
@@ -1993,11 +2002,8 @@ static int
nv50_disp_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool nonblock)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_plane_state *new_plane_state;
struct drm_plane *plane;
- struct drm_crtc *crtc;
- bool active = false;
int ret, i;
ret = pm_runtime_get_sync(dev->dev);
@@ -2034,27 +2040,17 @@ nv50_disp_atomic_commit(struct drm_device *dev,
drm_atomic_state_get(state);
+ /*
+ * Grab another RPM ref for the commit tail, which will release the
+ * ref when it's finished
+ */
+ pm_runtime_get_noresume(dev->dev);
+
if (nonblock)
queue_work(system_unbound_wq, &state->commit_work);
else
nv50_disp_atomic_commit_tail(state);
- drm_for_each_crtc(crtc, dev) {
- if (crtc->state->active) {
- if (!drm->have_disp_power_ref) {
- drm->have_disp_power_ref = true;
- return 0;
- }
- active = true;
- break;
- }
- }
-
- if (!active && drm->have_disp_power_ref) {
- pm_runtime_put_autosuspend(dev->dev);
- drm->have_disp_power_ref = false;
- }
-
err_cleanup:
if (ret)
drm_atomic_helper_cleanup_planes(dev, state);
@@ -2316,6 +2312,7 @@ nv50_display_create(struct drm_device *dev)
disp->disp = &nouveau_display(dev)->disp;
dev->mode_config.funcs = &nv50_disp_func;
dev->mode_config.quirk_addfb_prefer_xbgr_30bpp = true;
+ dev->mode_config.normalize_zpos = true;
/* small shared memory area we use for notifiers and semaphores */
ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index 929d93b1677e..71c23bf1fe25 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -480,7 +480,7 @@ nv50_head_create(struct drm_device *dev, int index)
struct nouveau_drm *drm = nouveau_drm(dev);
struct nv50_disp *disp = nv50_disp(dev);
struct nv50_head *head;
- struct nv50_wndw *curs, *wndw;
+ struct nv50_wndw *base, *ovly, *curs;
struct drm_crtc *crtc;
int ret;
@@ -492,13 +492,13 @@ nv50_head_create(struct drm_device *dev, int index)
head->base.index = index;
if (disp->disp->object.oclass < GV100_DISP) {
- ret = nv50_ovly_new(drm, head->base.index, &wndw);
- ret = nv50_base_new(drm, head->base.index, &wndw);
+ ret = nv50_base_new(drm, head->base.index, &base);
+ ret = nv50_ovly_new(drm, head->base.index, &ovly);
} else {
- ret = nv50_wndw_new(drm, DRM_PLANE_TYPE_OVERLAY,
- head->base.index * 2 + 1, &wndw);
ret = nv50_wndw_new(drm, DRM_PLANE_TYPE_PRIMARY,
- head->base.index * 2 + 0, &wndw);
+ head->base.index * 2 + 0, &base);
+ ret = nv50_wndw_new(drm, DRM_PLANE_TYPE_OVERLAY,
+ head->base.index * 2 + 1, &ovly);
}
if (ret == 0)
ret = nv50_curs_new(drm, head->base.index, &curs);
@@ -508,10 +508,14 @@ nv50_head_create(struct drm_device *dev, int index)
}
crtc = &head->base.base;
- drm_crtc_init_with_planes(dev, crtc, &wndw->plane, &curs->plane,
+ drm_crtc_init_with_planes(dev, crtc, &base->plane, &curs->plane,
&nv50_head_func, "head-%d", head->base.index);
drm_crtc_helper_add(crtc, &nv50_head_help);
drm_mode_crtc_set_gamma_size(crtc, 256);
+ if (disp->disp->object.oclass >= GF110_DISP)
+ drm_crtc_enable_color_mgmt(crtc, 256, true, 256);
+ else
+ drm_crtc_enable_color_mgmt(crtc, 0, false, 256);
if (head->func->olut_set) {
ret = nv50_lut_init(disp, &drm->client.mmu, &head->olut);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c b/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c
index cc417664f823..8ccd96113bad 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c
@@ -23,6 +23,7 @@
#include "atom.h"
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
#include <nvif/cl507e.h>
@@ -160,9 +161,7 @@ ovly507e_format[] = {
DRM_FORMAT_YUYV,
DRM_FORMAT_UYVY,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_ARGB8888,
DRM_FORMAT_XRGB1555,
- DRM_FORMAT_ARGB1555,
0
};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c b/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c
index aaa9fe5a4fc8..2e68fc736fe1 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c
@@ -90,11 +90,8 @@ ovly827e_format[] = {
DRM_FORMAT_YUYV,
DRM_FORMAT_UYVY,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_ARGB8888,
DRM_FORMAT_XRGB1555,
- DRM_FORMAT_ARGB1555,
DRM_FORMAT_XBGR2101010,
- DRM_FORMAT_ABGR2101010,
0
};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly907e.c b/drivers/gpu/drm/nouveau/dispnv50/ovly907e.c
index a3ce53046015..9efe5e9d5ce4 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/ovly907e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly907e.c
@@ -61,10 +61,21 @@ ovly907e = {
.update = ovly507e_update,
};
+static const u32
+ovly907e_format[] = {
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XBGR16161616F,
+ 0
+};
+
int
ovly907e_new(struct nouveau_drm *drm, int head, s32 oclass,
struct nv50_wndw **pwndw)
{
- return ovly507e_new_(&ovly907e, ovly827e_format, drm, head, oclass,
+ return ovly507e_new_(&ovly907e, ovly907e_format, drm, head, oclass,
0x00000004 << (head * 4), pwndw);
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly917e.c b/drivers/gpu/drm/nouveau/dispnv50/ovly917e.c
index 505fa7e78523..e24d6fd23450 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/ovly917e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly917e.c
@@ -26,13 +26,10 @@ ovly917e_format[] = {
DRM_FORMAT_YUYV,
DRM_FORMAT_UYVY,
DRM_FORMAT_XRGB8888,
- DRM_FORMAT_ARGB8888,
DRM_FORMAT_XRGB1555,
- DRM_FORMAT_ARGB1555,
DRM_FORMAT_XBGR2101010,
- DRM_FORMAT_ABGR2101010,
DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_XBGR16161616F,
0
};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 89f8e76a2d7d..2db029371c91 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -26,6 +26,8 @@
#include <nvif/cl0002.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
+
#include "nouveau_bo.h"
static void
@@ -118,6 +120,7 @@ nv50_wndw_flush_clr(struct nv50_wndw *wndw, u32 *interlock, bool flush,
if (clr.sema ) wndw->func-> sema_clr(wndw);
if (clr.ntfy ) wndw->func-> ntfy_clr(wndw);
if (clr.xlut ) wndw->func-> xlut_clr(wndw);
+ if (clr.csc ) wndw->func-> csc_clr(wndw);
if (clr.image) wndw->func->image_clr(wndw);
interlock[wndw->interlock.type] |= wndw->interlock.data;
@@ -145,7 +148,9 @@ nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 *interlock,
wndw->func->xlut_set(wndw, asyw);
}
+ if (asyw->set.csc ) wndw->func->csc_set (wndw, asyw);
if (asyw->set.scale) wndw->func->scale_set(wndw, asyw);
+ if (asyw->set.blend) wndw->func->blend_set(wndw, asyw);
if (asyw->set.point) {
if (asyw->set.point = false, asyw->set.mask)
interlock[wndw->interlock.type] |= wndw->interlock.data;
@@ -202,18 +207,20 @@ static int
nv50_wndw_atomic_check_acquire_rgb(struct nv50_wndw_atom *asyw)
{
switch (asyw->state.fb->format->format) {
- case DRM_FORMAT_C8 : asyw->image.format = 0x1e; break;
- case DRM_FORMAT_XRGB8888 :
- case DRM_FORMAT_ARGB8888 : asyw->image.format = 0xcf; break;
- case DRM_FORMAT_RGB565 : asyw->image.format = 0xe8; break;
- case DRM_FORMAT_XRGB1555 :
- case DRM_FORMAT_ARGB1555 : asyw->image.format = 0xe9; break;
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_ABGR2101010: asyw->image.format = 0xd1; break;
- case DRM_FORMAT_XBGR8888 :
- case DRM_FORMAT_ABGR8888 : asyw->image.format = 0xd5; break;
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_ARGB2101010: asyw->image.format = 0xdf; break;
+ case DRM_FORMAT_C8 : asyw->image.format = 0x1e; break;
+ case DRM_FORMAT_XRGB8888 :
+ case DRM_FORMAT_ARGB8888 : asyw->image.format = 0xcf; break;
+ case DRM_FORMAT_RGB565 : asyw->image.format = 0xe8; break;
+ case DRM_FORMAT_XRGB1555 :
+ case DRM_FORMAT_ARGB1555 : asyw->image.format = 0xe9; break;
+ case DRM_FORMAT_XBGR2101010 :
+ case DRM_FORMAT_ABGR2101010 : asyw->image.format = 0xd1; break;
+ case DRM_FORMAT_XBGR8888 :
+ case DRM_FORMAT_ABGR8888 : asyw->image.format = 0xd5; break;
+ case DRM_FORMAT_XRGB2101010 :
+ case DRM_FORMAT_ARGB2101010 : asyw->image.format = 0xdf; break;
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F: asyw->image.format = 0xca; break;
default:
return -EINVAL;
}
@@ -279,6 +286,28 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, bool modeset,
asyw->set.scale = true;
}
+ if (wndw->func->blend_set) {
+ asyw->blend.depth = 255 - asyw->state.normalized_zpos;
+ asyw->blend.k1 = asyw->state.alpha >> 8;
+ switch (asyw->state.pixel_blend_mode) {
+ case DRM_MODE_BLEND_PREMULTI:
+ asyw->blend.src_color = 2; /* K1 */
+ asyw->blend.dst_color = 7; /* NEG_K1_TIMES_SRC */
+ break;
+ case DRM_MODE_BLEND_COVERAGE:
+ asyw->blend.src_color = 5; /* K1_TIMES_SRC */
+ asyw->blend.dst_color = 7; /* NEG_K1_TIMES_SRC */
+ break;
+ case DRM_MODE_BLEND_PIXEL_NONE:
+ default:
+ asyw->blend.src_color = 2; /* K1 */
+ asyw->blend.dst_color = 4; /* NEG_K1 */
+ break;
+ }
+ if (memcmp(&armw->blend, &asyw->blend, sizeof(asyw->blend)))
+ asyw->set.blend = true;
+ }
+
if (wndw->immd) {
asyw->point.x = asyw->state.crtc_x;
asyw->point.y = asyw->state.crtc_y;
@@ -320,7 +349,9 @@ nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw,
asyh->wndw.olut &= ~BIT(wndw->id);
}
- if (!ilut && wndw->func->ilut_identity) {
+ if (!ilut && wndw->func->ilut_identity &&
+ asyw->state.fb->format->format != DRM_FORMAT_XBGR16161616F &&
+ asyw->state.fb->format->format != DRM_FORMAT_ABGR16161616F) {
static struct drm_property_blob dummy = {};
ilut = &dummy;
}
@@ -332,6 +363,8 @@ nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw,
asyw->xlut.handle = wndw->wndw.vram.handle;
asyw->xlut.i.buffer = !asyw->xlut.i.buffer;
asyw->set.xlut = true;
+ } else {
+ asyw->clr.xlut = armw->xlut.handle != 0;
}
/* Handle setting base SET_OUTPUT_LUT_LO_ENABLE_USE_CORE_LUT. */
@@ -339,6 +372,16 @@ nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw,
(!armw->visible || (armw->xlut.handle && !asyw->xlut.handle)))
asyw->set.xlut = true;
+ if (wndw->func->csc && asyh->state.ctm) {
+ const struct drm_color_ctm *ctm = asyh->state.ctm->data;
+ wndw->func->csc(wndw, asyw, ctm);
+ asyw->csc.valid = true;
+ asyw->set.csc = true;
+ } else {
+ asyw->csc.valid = false;
+ asyw->clr.csc = armw->csc.valid;
+ }
+
/* Can't do an immediate flip while changing the LUT. */
asyh->state.pageflip_flags &= ~DRM_MODE_PAGE_FLIP_ASYNC;
}
@@ -408,6 +451,7 @@ nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
asyw->clr.ntfy = armw->ntfy.handle != 0;
asyw->clr.sema = armw->sema.handle != 0;
asyw->clr.xlut = armw->xlut.handle != 0;
+ asyw->clr.csc = armw->csc.valid;
if (wndw->func->image_clr)
asyw->clr.image = armw->image.handle[0] != 0;
}
@@ -457,7 +501,7 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
asyw->image.handle[0] = ctxdma->object.handle;
}
- asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.base.resv);
+ asyw->state.fence = dma_resv_get_excl_rcu(fb->nvbo->bo.base.resv);
asyw->image.offset[0] = fb->nvbo->bo.offset;
if (wndw->func->prepare) {
@@ -499,6 +543,7 @@ nv50_wndw_atomic_duplicate_state(struct drm_plane *plane)
asyw->ntfy = armw->ntfy;
asyw->ilut = NULL;
asyw->xlut = armw->xlut;
+ asyw->csc = armw->csc;
asyw->image = armw->image;
asyw->point = armw->point;
asyw->clr.mask = 0;
@@ -506,6 +551,13 @@ nv50_wndw_atomic_duplicate_state(struct drm_plane *plane)
return &asyw->state;
}
+static int
+nv50_wndw_zpos_default(struct drm_plane *plane)
+{
+ return (plane->type == DRM_PLANE_TYPE_PRIMARY) ? 0 :
+ (plane->type == DRM_PLANE_TYPE_OVERLAY) ? 1 : 255;
+}
+
static void
nv50_wndw_reset(struct drm_plane *plane)
{
@@ -516,9 +568,10 @@ nv50_wndw_reset(struct drm_plane *plane)
if (plane->state)
plane->funcs->atomic_destroy_state(plane, plane->state);
- plane->state = &asyw->state;
- plane->state->plane = plane;
- plane->state->rotation = DRM_MODE_ROTATE_0;
+
+ __drm_atomic_helper_plane_reset(plane, &asyw->state);
+ plane->state->zpos = nv50_wndw_zpos_default(plane);
+ plane->state->normalized_zpos = nv50_wndw_zpos_default(plane);
}
static void
@@ -613,6 +666,30 @@ nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
}
wndw->notify.func = nv50_wndw_notify;
+
+ if (wndw->func->blend_set) {
+ ret = drm_plane_create_zpos_property(&wndw->plane,
+ nv50_wndw_zpos_default(&wndw->plane), 0, 254);
+ if (ret)
+ return ret;
+
+ ret = drm_plane_create_alpha_property(&wndw->plane);
+ if (ret)
+ return ret;
+
+ ret = drm_plane_create_blend_mode_property(&wndw->plane,
+ BIT(DRM_MODE_BLEND_PIXEL_NONE) |
+ BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE));
+ if (ret)
+ return ret;
+ } else {
+ ret = drm_plane_create_zpos_immutable_property(&wndw->plane,
+ nv50_wndw_zpos_default(&wndw->plane));
+ if (ret)
+ return ret;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.h b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
index 03f3d8dc235a..c63bd3bdaf06 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
@@ -65,6 +65,10 @@ struct nv50_wndw_func {
int (*ntfy_wait_begun)(struct nouveau_bo *, u32 offset,
struct nvif_device *);
void (*ilut)(struct nv50_wndw *, struct nv50_wndw_atom *);
+ void (*csc)(struct nv50_wndw *, struct nv50_wndw_atom *,
+ const struct drm_color_ctm *);
+ void (*csc_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
+ void (*csc_clr)(struct nv50_wndw *);
bool ilut_identity;
bool olut_core;
void (*xlut_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
@@ -72,6 +76,7 @@ struct nv50_wndw_func {
void (*image_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
void (*image_clr)(struct nv50_wndw *);
void (*scale_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
+ void (*blend_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
void (*update)(struct nv50_wndw *, u32 *interlock);
};
@@ -81,6 +86,9 @@ extern const struct drm_plane_funcs nv50_wndw;
void base507c_ntfy_reset(struct nouveau_bo *, u32);
int base507c_ntfy_wait_begun(struct nouveau_bo *, u32, struct nvif_device *);
+void base907c_csc(struct nv50_wndw *, struct nv50_wndw_atom *,
+ const struct drm_color_ctm *);
+
struct nv50_wimm_func {
void (*point)(struct nv50_wndw *, struct nv50_wndw_atom *);
@@ -102,8 +110,8 @@ void wndwc37e_sema_set(struct nv50_wndw *, struct nv50_wndw_atom *);
void wndwc37e_sema_clr(struct nv50_wndw *);
void wndwc37e_ntfy_set(struct nv50_wndw *, struct nv50_wndw_atom *);
void wndwc37e_ntfy_clr(struct nv50_wndw *);
-void wndwc37e_image_set(struct nv50_wndw *, struct nv50_wndw_atom *);
void wndwc37e_image_clr(struct nv50_wndw *);
+void wndwc37e_blend_set(struct nv50_wndw *, struct nv50_wndw_atom *);
void wndwc37e_update(struct nv50_wndw *, u32 *);
int wndwc57e_new(struct nouveau_drm *, enum drm_plane_type, int, s32,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
index e52a85c83f7a..0f9402162bde 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
@@ -29,6 +29,23 @@
#include <nvif/clc37e.h>
static void
+wndwc37e_csc_clr(struct nv50_wndw *wndw)
+{
+}
+
+static void
+wndwc37e_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ u32 *push, i;
+ if ((push = evo_wait(&wndw->wndw, 13))) {
+ evo_mthd(push, 0x02bc, 12);
+ for (i = 0; i < 12; i++)
+ evo_data(push, asyw->csc.matrix[i]);
+ evo_kick(push, &wndw->wndw);
+ }
+}
+
+static void
wndwc37e_ilut_clr(struct nv50_wndw *wndw)
{
u32 *push;
@@ -65,6 +82,26 @@ wndwc37e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
}
void
+wndwc37e_blend_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ u32 *push;
+ if ((push = evo_wait(&wndw->wndw, 8))) {
+ evo_mthd(push, 0x02ec, 7);
+ evo_data(push, asyw->blend.depth << 4);
+ evo_data(push, asyw->blend.k1);
+ evo_data(push, asyw->blend.dst_color << 12 |
+ asyw->blend.dst_color << 8 |
+ asyw->blend.src_color << 4 |
+ asyw->blend.src_color);
+ evo_data(push, 0xffff0000);
+ evo_data(push, 0xffff0000);
+ evo_data(push, 0xffff0000);
+ evo_data(push, 0xffff0000);
+ evo_kick(push, &wndw->wndw);
+ }
+}
+
+void
wndwc37e_image_clr(struct nv50_wndw *wndw)
{
u32 *push;
@@ -77,12 +114,12 @@ wndwc37e_image_clr(struct nv50_wndw *wndw)
}
}
-void
+static void
wndwc37e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
u32 *push;
- if (!(push = evo_wait(&wndw->wndw, 25)))
+ if (!(push = evo_wait(&wndw->wndw, 17)))
return;
evo_mthd(push, 0x0308, 1);
@@ -90,7 +127,9 @@ wndwc37e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
evo_mthd(push, 0x0224, 4);
evo_data(push, asyw->image.h << 16 | asyw->image.w);
evo_data(push, asyw->image.layout << 4 | asyw->image.blockh);
- evo_data(push, asyw->image.colorspace << 8 | asyw->image.format);
+ evo_data(push, asyw->csc.valid << 17 |
+ asyw->image.colorspace << 8 |
+ asyw->image.format);
evo_data(push, asyw->image.blocks[0] | (asyw->image.pitch[0] >> 6));
evo_mthd(push, 0x0240, 1);
evo_data(push, asyw->image.handle[0]);
@@ -105,16 +144,6 @@ wndwc37e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
evo_mthd(push, 0x02a4, 1);
evo_data(push, asyw->state.crtc_h << 16 |
asyw->state.crtc_w);
-
- /*XXX: Composition-related stuff. Need to implement properly. */
- evo_mthd(push, 0x02ec, 1);
- evo_data(push, (2 - (wndw->id & 1)) << 4);
- evo_mthd(push, 0x02f4, 5);
- evo_data(push, 0x00000011);
- evo_data(push, 0xffff0000);
- evo_data(push, 0xffff0000);
- evo_data(push, 0xffff0000);
- evo_data(push, 0xffff0000);
evo_kick(push, &wndw->wndw);
}
@@ -216,6 +245,8 @@ wndwc37e_format[] = {
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_XBGR16161616F,
+ DRM_FORMAT_ABGR16161616F,
0
};
@@ -232,8 +263,12 @@ wndwc37e = {
.ilut = wndwc37e_ilut,
.xlut_set = wndwc37e_ilut_set,
.xlut_clr = wndwc37e_ilut_clr,
+ .csc = base907c_csc,
+ .csc_set = wndwc37e_csc_set,
+ .csc_clr = wndwc37e_csc_clr,
.image_set = wndwc37e_image_set,
.image_clr = wndwc37e_image_clr,
+ .blend_set = wndwc37e_blend_set,
.update = wndwc37e_update,
};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
index ba89f1a5fcfa..a311c79e5295 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
@@ -29,6 +29,72 @@
#include <nvif/clc37e.h>
static void
+wndwc57e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ u32 *push;
+
+ if (!(push = evo_wait(&wndw->wndw, 17)))
+ return;
+
+ evo_mthd(push, 0x0308, 1);
+ evo_data(push, asyw->image.mode << 4 | asyw->image.interval);
+ evo_mthd(push, 0x0224, 4);
+ evo_data(push, asyw->image.h << 16 | asyw->image.w);
+ evo_data(push, asyw->image.layout << 4 | asyw->image.blockh);
+ evo_data(push, asyw->image.colorspace << 8 |
+ asyw->image.format);
+ evo_data(push, asyw->image.blocks[0] | (asyw->image.pitch[0] >> 6));
+ evo_mthd(push, 0x0240, 1);
+ evo_data(push, asyw->image.handle[0]);
+ evo_mthd(push, 0x0260, 1);
+ evo_data(push, asyw->image.offset[0] >> 8);
+ evo_mthd(push, 0x0290, 1);
+ evo_data(push, (asyw->state.src_y >> 16) << 16 |
+ (asyw->state.src_x >> 16));
+ evo_mthd(push, 0x0298, 1);
+ evo_data(push, (asyw->state.src_h >> 16) << 16 |
+ (asyw->state.src_w >> 16));
+ evo_mthd(push, 0x02a4, 1);
+ evo_data(push, asyw->state.crtc_h << 16 |
+ asyw->state.crtc_w);
+ evo_kick(push, &wndw->wndw);
+}
+
+static void
+wndwc57e_csc_clr(struct nv50_wndw *wndw)
+{
+ u32 *push;
+ if ((push = evo_wait(&wndw->wndw, 13))) {
+ evo_mthd(push, 0x0400, 12);
+ evo_data(push, 0x00010000);
+ evo_data(push, 0x00000000);
+ evo_data(push, 0x00000000);
+ evo_data(push, 0x00000000);
+ evo_data(push, 0x00000000);
+ evo_data(push, 0x00010000);
+ evo_data(push, 0x00000000);
+ evo_data(push, 0x00000000);
+ evo_data(push, 0x00000000);
+ evo_data(push, 0x00000000);
+ evo_data(push, 0x00010000);
+ evo_data(push, 0x00000000);
+ evo_kick(push, &wndw->wndw);
+ }
+}
+
+static void
+wndwc57e_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ u32 *push, i;
+ if ((push = evo_wait(&wndw->wndw, 13))) {
+ evo_mthd(push, 0x0400, 12);
+ for (i = 0; i < 12; i++)
+ evo_data(push, asyw->csc.matrix[i]);
+ evo_kick(push, &wndw->wndw);
+ }
+}
+
+static void
wndwc57e_ilut_clr(struct nv50_wndw *wndw)
{
u32 *push;
@@ -119,8 +185,12 @@ wndwc57e = {
.ilut_identity = true,
.xlut_set = wndwc57e_ilut_set,
.xlut_clr = wndwc57e_ilut_clr,
- .image_set = wndwc37e_image_set,
+ .csc = base907c_csc,
+ .csc_set = wndwc57e_csc_set,
+ .csc_clr = wndwc57e_csc_clr,
+ .image_set = wndwc57e_image_set,
.image_clr = wndwc37e_image_clr,
+ .blend_set = wndwc37e_blend_set,
.update = wndwc37e_update,
};
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/extdev.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/extdev.h
index f29f2d8da142..9ac3dda4b44f 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/extdev.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/extdev.h
@@ -26,4 +26,6 @@ nvbios_extdev_parse(struct nvkm_bios *, int, struct nvbios_extdev_func *);
int
nvbios_extdev_find(struct nvkm_bios *, enum nvbios_extdev_type,
struct nvbios_extdev_func *);
+
+bool nvbios_extdev_skip_probe(struct nvkm_bios *);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h
index 7c4f00366e71..3f785f29dfac 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h
@@ -3,10 +3,13 @@
#define __NVBIOS_GPIO_H__
enum dcb_gpio_func_name {
DCB_GPIO_PANEL_POWER = 0x01,
+ DCB_GPIO_FAN = 0x09,
DCB_GPIO_TVDAC0 = 0x0c,
+ DCB_GPIO_THERM_EXT_POWER_EVENT = 0x10,
DCB_GPIO_TVDAC1 = 0x2d,
- DCB_GPIO_FAN = 0x09,
DCB_GPIO_FAN_SENSE = 0x3d,
+ DCB_GPIO_POWER_ALERT = 0x4c,
+ DCB_GPIO_EXT_POWER_LOW = 0x79,
DCB_GPIO_LOGO_LED_PWM = 0x84,
DCB_GPIO_UNUSED = 0xff,
DCB_GPIO_VID0 = 0x04,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
index 24fbcccd93eb..4752006880f3 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
@@ -30,6 +30,7 @@ struct nvkm_pmu {
int nvkm_pmu_send(struct nvkm_pmu *, u32 reply[2], u32 process,
u32 message, u32 data0, u32 data1);
void nvkm_pmu_pgob(struct nvkm_pmu *, bool enable);
+bool nvkm_pmu_fan_controlled(struct nvkm_device *);
int gt215_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
int gf100_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 66bf2aff4a3e..d204ea8a5618 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -22,8 +22,6 @@
* SOFTWARE.
*/
-#include <drm/drmP.h>
-
#include "nouveau_drv.h"
#include "nouveau_reg.h"
#include "dispnv04/hw.h"
@@ -935,7 +933,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
tmdstableptr = ROM16(bios->data[bitentry->offset]);
if (!tmdstableptr) {
- NV_ERROR(drm, "Pointer to TMDS table invalid\n");
+ NV_INFO(drm, "Pointer to TMDS table not found\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 99e391be9370..e918b437af17 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -185,31 +185,24 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
*size = roundup_64(*size, PAGE_SIZE);
}
-int
-nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
- uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
- struct sg_table *sg, struct reservation_object *robj,
- struct nouveau_bo **pnvbo)
+struct nouveau_bo *
+nouveau_bo_alloc(struct nouveau_cli *cli, u64 size, u32 flags, u32 tile_mode,
+ u32 tile_flags)
{
struct nouveau_drm *drm = cli->drm;
struct nouveau_bo *nvbo;
struct nvif_mmu *mmu = &cli->mmu;
struct nvif_vmm *vmm = cli->svm.cli ? &cli->svm.vmm : &cli->vmm.vmm;
- size_t acc_size;
- int type = ttm_bo_type_device;
- int ret, i, pi = -1;
+ int i, pi = -1;
if (!size) {
NV_WARN(drm, "skipped size %016llx\n", size);
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
- if (sg)
- type = ttm_bo_type_sg;
-
nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
if (!nvbo)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&nvbo->head);
INIT_LIST_HEAD(&nvbo->entry);
INIT_LIST_HEAD(&nvbo->vma_list);
@@ -231,7 +224,7 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
kfree(nvbo);
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
@@ -241,7 +234,7 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
nvbo->comp = (tile_flags & 0x00030000) >> 16;
if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
kfree(nvbo);
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
} else {
nvbo->zeta = (tile_flags & 0x00000007);
@@ -278,7 +271,7 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
}
if (WARN_ON(pi < 0))
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
/* Disable compression if suitable settings couldn't be found. */
if (nvbo->comp && !vmm->page[pi].comp) {
@@ -288,23 +281,51 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
}
nvbo->page = vmm->page[pi].shift;
+ return nvbo;
+}
+
+int
+nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 flags,
+ struct sg_table *sg, struct dma_resv *robj)
+{
+ int type = sg ? ttm_bo_type_sg : ttm_bo_type_device;
+ size_t acc_size;
+ int ret;
+
+ acc_size = ttm_bo_dma_acc_size(nvbo->bo.bdev, size, sizeof(*nvbo));
+
nouveau_bo_fixup_align(nvbo, flags, &align, &size);
nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
nouveau_bo_placement_set(nvbo, flags, 0);
- acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size,
- sizeof(struct nouveau_bo));
-
- ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
- type, &nvbo->placement,
- align >> PAGE_SHIFT, false, acc_size, sg,
- robj, nouveau_bo_del_ttm);
-
+ ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type,
+ &nvbo->placement, align >> PAGE_SHIFT, false,
+ acc_size, sg, robj, nouveau_bo_del_ttm);
if (ret) {
/* ttm will call nouveau_bo_del_ttm if it fails.. */
return ret;
}
+ return 0;
+}
+
+int
+nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
+ uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
+ struct sg_table *sg, struct dma_resv *robj,
+ struct nouveau_bo **pnvbo)
+{
+ struct nouveau_bo *nvbo;
+ int ret;
+
+ nvbo = nouveau_bo_alloc(cli, size, flags, tile_mode, tile_flags);
+ if (IS_ERR(nvbo))
+ return PTR_ERR(nvbo);
+
+ ret = nouveau_bo_init(nvbo, size, align, flags, sg, robj);
+ if (ret)
+ return ret;
+
*pnvbo = nvbo;
return 0;
}
@@ -1324,7 +1345,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct drm_device *dev = drm->dev;
- struct dma_fence *fence = reservation_object_get_excl(bo->base.resv);
+ struct dma_fence *fence = dma_resv_get_excl(bo->base.resv);
nv10_bo_put_tile_region(dev, *old_tile, fence);
*old_tile = new_tile;
@@ -1655,12 +1676,12 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
void
nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
{
- struct reservation_object *resv = nvbo->bo.base.resv;
+ struct dma_resv *resv = nvbo->bo.base.resv;
if (exclusive)
- reservation_object_add_excl_fence(resv, &fence->base);
+ dma_resv_add_excl_fence(resv, &fence->base);
else if (fence)
- reservation_object_add_shared_fence(resv, &fence->base);
+ dma_resv_add_shared_fence(resv, &fence->base);
}
struct ttm_bo_driver nouveau_bo_driver = {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index d675efe8e7f9..62930d834fba 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -71,9 +71,13 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
extern struct ttm_bo_driver nouveau_bo_driver;
void nouveau_bo_move_init(struct nouveau_drm *);
+struct nouveau_bo *nouveau_bo_alloc(struct nouveau_cli *, u64 size, u32 flags,
+ u32 tile_mode, u32 tile_flags);
+int nouveau_bo_init(struct nouveau_bo *, u64 size, int align, u32 flags,
+ struct sg_table *sg, struct dma_resv *robj);
int nouveau_bo_new(struct nouveau_cli *, u64 size, int align, u32 flags,
u32 tile_mode, u32 tile_flags, struct sg_table *sg,
- struct reservation_object *robj,
+ struct dma_resv *robj,
struct nouveau_bo **);
int nouveau_bo_pin(struct nouveau_bo *, u32 flags, bool contig);
int nouveau_bo_unpin(struct nouveau_bo *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 330d7d29a6e3..94dfa2e5a9ab 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -29,7 +29,6 @@
#include <linux/pm_runtime.h>
#include <linux/vga_switcheroo.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_crtc_helper.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
index 366acb928f57..7f63be2ec35d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_crtc.h
+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
@@ -27,6 +27,8 @@
#ifndef __NOUVEAU_CRTC_H__
#define __NOUVEAU_CRTC_H__
+#include <drm/drm_crtc.h>
+
#include <nvif/notify.h>
struct nouveau_crtc {
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.h b/drivers/gpu/drm/nouveau/nouveau_debugfs.h
index 9420a6aca138..8909c010e8ea 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.h
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.h
@@ -2,7 +2,7 @@
#ifndef __NOUVEAU_DEBUGFS_H__
#define __NOUVEAU_DEBUGFS_H__
-#include <drm/drmP.h>
+#include <drm/drm_debugfs.h>
#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 98afc50162e9..6f038511a03a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -25,12 +25,14 @@
*/
#include <acpi/video.h>
-#include <drm/drmP.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "nouveau_fbcon.h"
#include "nouveau_crtc.h"
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 9185f01e2d9b..6e8e66882e45 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -1,9 +1,13 @@
/* SPDX-License-Identifier: MIT */
#ifndef __NOUVEAU_DISPLAY_H__
#define __NOUVEAU_DISPLAY_H__
+
#include "nouveau_drv.h"
+
#include <nvif/disp.h>
+#include <drm/drm_framebuffer.h>
+
struct nouveau_framebuffer {
struct drm_framebuffer base;
struct nouveau_bo *nvbo;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 078f65d849ce..3c430a550a51 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -118,7 +118,7 @@ nv50_dma_push_wait(struct nouveau_channel *chan, int count)
}
if ((++cnt & 0xff) == 0) {
- DRM_UDELAY(1);
+ udelay(1);
if (cnt > 100000)
return -EBUSY;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 0d052e1660f8..2674f1587457 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -22,7 +22,6 @@
* Authors: Ben Skeggs
*/
-#include <drm/drmP.h>
#include <drm/drm_dp_helper.h>
#include "nouveau_drv.h"
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 7e045580a3a4..bdc948352467 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -29,8 +29,9 @@
#include <linux/pm_runtime.h>
#include <linux/vga_switcheroo.h>
-#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_vblank.h>
#include <core/gpuobj.h>
#include <core/option.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index aae035816383..70f34cacc552 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -46,7 +46,10 @@
#include <nvif/mmu.h>
#include <nvif/vmm.h>
-#include <drm/drmP.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
@@ -127,7 +130,6 @@ nouveau_cli(struct drm_file *fpriv)
}
#include <nvif/object.h>
-#include <nvif/device.h>
struct nouveau_drm {
struct nouveau_cli master;
@@ -204,9 +206,6 @@ struct nouveau_drm {
/* led management */
struct nouveau_led *led;
- /* display power reference */
- bool have_disp_power_ref;
-
struct dev_pm_domain vga_pm_domain;
struct nouveau_svm *svm;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 73cc3217068a..f439f0a5b43a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -37,10 +37,10 @@
#include <linux/vga_switcheroo.h>
#include <linux/console.h>
-#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_atomic.h>
#include "nouveau_drv.h"
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index e5f249ab216a..9118df035b28 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -24,10 +24,9 @@
*
*/
-#include <drm/drmP.h>
-
#include <linux/ktime.h>
#include <linux/hrtimer.h>
+#include <linux/sched/signal.h>
#include <trace/events/dma_fence.h>
#include <nvif/cl826e.h>
@@ -335,20 +334,20 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
{
struct nouveau_fence_chan *fctx = chan->fence;
struct dma_fence *fence;
- struct reservation_object *resv = nvbo->bo.base.resv;
- struct reservation_object_list *fobj;
+ struct dma_resv *resv = nvbo->bo.base.resv;
+ struct dma_resv_list *fobj;
struct nouveau_fence *f;
int ret = 0, i;
if (!exclusive) {
- ret = reservation_object_reserve_shared(resv, 1);
+ ret = dma_resv_reserve_shared(resv, 1);
if (ret)
return ret;
}
- fobj = reservation_object_get_list(resv);
- fence = reservation_object_get_excl(resv);
+ fobj = dma_resv_get_list(resv);
+ fence = dma_resv_get_excl(resv);
if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
struct nouveau_channel *prev = NULL;
@@ -377,7 +376,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
bool must_wait = true;
fence = rcu_dereference_protected(fobj->shared[i],
- reservation_object_held(resv));
+ dma_resv_held(resv));
f = nouveau_local_fence(fence, chan->drm);
if (f) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index c7368aa0bdec..c2bfc0591909 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -188,11 +188,23 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
if (domain & NOUVEAU_GEM_DOMAIN_COHERENT)
flags |= TTM_PL_FLAG_UNCACHED;
- ret = nouveau_bo_new(cli, size, align, flags, tile_mode,
- tile_flags, NULL, NULL, pnvbo);
- if (ret)
+ nvbo = nouveau_bo_alloc(cli, size, flags, tile_mode, tile_flags);
+ if (IS_ERR(nvbo))
+ return PTR_ERR(nvbo);
+
+ /* Initialize the embedded gem-object. We return a single gem-reference
+ * to the caller, instead of a normal nouveau_bo ttm reference. */
+ ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, size);
+ if (ret) {
+ nouveau_bo_ref(NULL, &nvbo);
+ return ret;
+ }
+
+ ret = nouveau_bo_init(nvbo, size, align, flags, NULL, NULL);
+ if (ret) {
+ nouveau_bo_ref(NULL, &nvbo);
return ret;
- nvbo = *pnvbo;
+ }
/* we restrict allowed domains on nv50+ to only the types
* that were requested at creation time. not possibly on
@@ -203,15 +215,8 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
nvbo->valid_domains &= domain;
- /* Initialize the embedded gem-object. We return a single gem-reference
- * to the caller, instead of a normal nouveau_bo ttm reference. */
- ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, nvbo->bo.mem.size);
- if (ret) {
- nouveau_bo_ref(NULL, pnvbo);
- return -ENOMEM;
- }
-
nvbo->bo.persistent_swap_storage = nvbo->bo.base.filp;
+ *pnvbo = nvbo;
return 0;
}
@@ -887,7 +892,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
return -ENOENT;
nvbo = nouveau_gem_object(gem);
- lret = reservation_object_wait_timeout_rcu(nvbo->bo.base.resv, write, true,
+ lret = dma_resv_wait_timeout_rcu(nvbo->bo.base.resv, write, true,
no_wait ? 0 : 30 * HZ);
if (!lret)
ret = -EBUSY;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h
index 40ba0f1ba5aa..978e07591990 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.h
@@ -2,8 +2,6 @@
#ifndef __NOUVEAU_GEM_H__
#define __NOUVEAU_GEM_H__
-#include <drm/drmP.h>
-
#include "nouveau_drv.h"
#include "nouveau_bo.h"
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index 6af2d299c3f9..d445c6f3fece 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -29,8 +29,6 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
-#include <drm/drmP.h>
-
#include "nouveau_drv.h"
#include "nouveau_hwmon.h"
diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
index 462679a8fec5..adf01ca9e035 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
@@ -33,7 +33,8 @@
#include <linux/compat.h>
-#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_ioctl.h>
#include "nouveau_ioctl.h"
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index e86ad7ae622b..84658d434225 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -22,7 +22,6 @@
* Authors: Dave Airlie
*/
-#include <drm/drmP.h>
#include <linux/dma-buf.h>
#include "nouveau_drv.h"
@@ -62,29 +61,35 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_bo *nvbo;
- struct reservation_object *robj = attach->dmabuf->resv;
+ struct dma_resv *robj = attach->dmabuf->resv;
+ size_t size = attach->dmabuf->size;
u32 flags = 0;
int ret;
flags = TTM_PL_FLAG_TT;
- reservation_object_lock(robj, NULL);
- ret = nouveau_bo_new(&drm->client, attach->dmabuf->size, 0, flags, 0, 0,
- sg, robj, &nvbo);
- reservation_object_unlock(robj);
- if (ret)
- return ERR_PTR(ret);
+ dma_resv_lock(robj, NULL);
+ nvbo = nouveau_bo_alloc(&drm->client, size, flags, 0, 0);
+ dma_resv_unlock(robj);
+ if (IS_ERR(nvbo))
+ return ERR_CAST(nvbo);
nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
/* Initialize the embedded gem-object. We return a single gem-reference
* to the caller, instead of a normal nouveau_bo ttm reference. */
- ret = drm_gem_object_init(dev, &nvbo->bo.base, nvbo->bo.mem.size);
+ ret = drm_gem_object_init(dev, &nvbo->bo.base, size);
if (ret) {
nouveau_bo_ref(NULL, &nvbo);
return ERR_PTR(-ENOMEM);
}
+ ret = nouveau_bo_init(nvbo, size, 0, flags, sg, robj);
+ if (ret) {
+ nouveau_bo_ref(NULL, &nvbo);
+ return ERR_PTR(ret);
+ }
+
return &nvbo->bo.base;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index 8f4b12a8092c..d865d8aeac3c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -2,7 +2,6 @@
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
-#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
diff --git a/drivers/gpu/drm/nouveau/nvif/mmu.c b/drivers/gpu/drm/nouveau/nvif/mmu.c
index ae08a1ca8044..5641bda2046d 100644
--- a/drivers/gpu/drm/nouveau/nvif/mmu.c
+++ b/drivers/gpu/drm/nouveau/nvif/mmu.c
@@ -110,7 +110,7 @@ nvif_mmu_init(struct nvif_object *parent, s32 oclass, struct nvif_mmu *mmu)
if (mmu->kind_nr) {
struct nvif_mmu_kind_v0 *kind;
- u32 argc = sizeof(*kind) + sizeof(*kind->data) * mmu->kind_nr;
+ size_t argc = struct_size(kind, data, mmu->kind_nr);
if (ret = -ENOMEM, !(kind = kmalloc(argc, GFP_KERNEL)))
goto done;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
index 10a2e7039a75..5a39e51d42d7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
@@ -28,6 +28,7 @@
#include <core/enum.h>
#include <core/gpuobj.h>
#include <subdev/bar.h>
+#include <subdev/fault.h>
#include <engine/sw.h>
#include <nvif/class.h>
@@ -194,68 +195,6 @@ gf100_fifo_recover(struct gf100_fifo *fifo, struct nvkm_engine *engine,
}
static const struct nvkm_enum
-gf100_fifo_sched_reason[] = {
- { 0x0a, "CTXSW_TIMEOUT" },
- {}
-};
-
-static void
-gf100_fifo_intr_sched_ctxsw(struct gf100_fifo *fifo)
-{
- struct nvkm_device *device = fifo->base.engine.subdev.device;
- struct nvkm_engine *engine;
- struct gf100_fifo_chan *chan;
- unsigned long flags;
- u32 engn;
-
- spin_lock_irqsave(&fifo->base.lock, flags);
- for (engn = 0; engn < 6; engn++) {
- u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04));
- u32 busy = (stat & 0x80000000);
- u32 save = (stat & 0x00100000); /* maybe? */
- u32 unk0 = (stat & 0x00040000);
- u32 unk1 = (stat & 0x00001000);
- u32 chid = (stat & 0x0000007f);
- (void)save;
-
- if (busy && unk0 && unk1) {
- list_for_each_entry(chan, &fifo->chan, head) {
- if (chan->base.chid == chid) {
- engine = gf100_fifo_engine(fifo, engn);
- if (!engine)
- break;
- gf100_fifo_recover(fifo, engine, chan);
- break;
- }
- }
- }
- }
- spin_unlock_irqrestore(&fifo->base.lock, flags);
-}
-
-static void
-gf100_fifo_intr_sched(struct gf100_fifo *fifo)
-{
- struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
- struct nvkm_device *device = subdev->device;
- u32 intr = nvkm_rd32(device, 0x00254c);
- u32 code = intr & 0x000000ff;
- const struct nvkm_enum *en;
-
- en = nvkm_enum_find(gf100_fifo_sched_reason, code);
-
- nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
-
- switch (code) {
- case 0x0a:
- gf100_fifo_intr_sched_ctxsw(fifo);
- break;
- default:
- break;
- }
-}
-
-static const struct nvkm_enum
gf100_fifo_fault_engine[] = {
{ 0x00, "PGRAPH", NULL, NVKM_ENGINE_GR },
{ 0x03, "PEEPHOLE", NULL, NVKM_ENGINE_IFB },
@@ -315,32 +254,24 @@ gf100_fifo_fault_gpcclient[] = {
};
static void
-gf100_fifo_intr_fault(struct gf100_fifo *fifo, int unit)
+gf100_fifo_fault(struct nvkm_fifo *base, struct nvkm_fault_data *info)
{
+ struct gf100_fifo *fifo = gf100_fifo(base);
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
struct nvkm_device *device = subdev->device;
- u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
- u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
- u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
- u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10));
- u32 gpc = (stat & 0x1f000000) >> 24;
- u32 client = (stat & 0x00001f00) >> 8;
- u32 write = (stat & 0x00000080);
- u32 hub = (stat & 0x00000040);
- u32 reason = (stat & 0x0000000f);
const struct nvkm_enum *er, *eu, *ec;
struct nvkm_engine *engine = NULL;
struct nvkm_fifo_chan *chan;
unsigned long flags;
char gpcid[8] = "";
- er = nvkm_enum_find(gf100_fifo_fault_reason, reason);
- eu = nvkm_enum_find(gf100_fifo_fault_engine, unit);
- if (hub) {
- ec = nvkm_enum_find(gf100_fifo_fault_hubclient, client);
+ er = nvkm_enum_find(gf100_fifo_fault_reason, info->reason);
+ eu = nvkm_enum_find(gf100_fifo_fault_engine, info->engine);
+ if (info->hub) {
+ ec = nvkm_enum_find(gf100_fifo_fault_hubclient, info->client);
} else {
- ec = nvkm_enum_find(gf100_fifo_fault_gpcclient, client);
- snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
+ ec = nvkm_enum_find(gf100_fifo_fault_gpcclient, info->client);
+ snprintf(gpcid, sizeof(gpcid), "GPC%d/", info->gpc);
}
if (eu && eu->data2) {
@@ -360,22 +291,108 @@ gf100_fifo_intr_fault(struct gf100_fifo *fifo, int unit)
}
}
- chan = nvkm_fifo_chan_inst(&fifo->base, (u64)inst << 12, &flags);
+ chan = nvkm_fifo_chan_inst(&fifo->base, info->inst, &flags);
nvkm_error(subdev,
"%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
"reason %02x [%s] on channel %d [%010llx %s]\n",
- write ? "write" : "read", (u64)vahi << 32 | valo,
- unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "",
- reason, er ? er->name : "", chan ? chan->chid : -1,
- (u64)inst << 12,
- chan ? chan->object.client->name : "unknown");
+ info->access ? "write" : "read", info->addr,
+ info->engine, eu ? eu->name : "",
+ info->client, gpcid, ec ? ec->name : "",
+ info->reason, er ? er->name : "", chan ? chan->chid : -1,
+ info->inst, chan ? chan->object.client->name : "unknown");
if (engine && chan)
gf100_fifo_recover(fifo, engine, (void *)chan);
nvkm_fifo_chan_put(&fifo->base, flags, &chan);
}
+static const struct nvkm_enum
+gf100_fifo_sched_reason[] = {
+ { 0x0a, "CTXSW_TIMEOUT" },
+ {}
+};
+
+static void
+gf100_fifo_intr_sched_ctxsw(struct gf100_fifo *fifo)
+{
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ struct nvkm_engine *engine;
+ struct gf100_fifo_chan *chan;
+ unsigned long flags;
+ u32 engn;
+
+ spin_lock_irqsave(&fifo->base.lock, flags);
+ for (engn = 0; engn < 6; engn++) {
+ u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04));
+ u32 busy = (stat & 0x80000000);
+ u32 save = (stat & 0x00100000); /* maybe? */
+ u32 unk0 = (stat & 0x00040000);
+ u32 unk1 = (stat & 0x00001000);
+ u32 chid = (stat & 0x0000007f);
+ (void)save;
+
+ if (busy && unk0 && unk1) {
+ list_for_each_entry(chan, &fifo->chan, head) {
+ if (chan->base.chid == chid) {
+ engine = gf100_fifo_engine(fifo, engn);
+ if (!engine)
+ break;
+ gf100_fifo_recover(fifo, engine, chan);
+ break;
+ }
+ }
+ }
+ }
+ spin_unlock_irqrestore(&fifo->base.lock, flags);
+}
+
+static void
+gf100_fifo_intr_sched(struct gf100_fifo *fifo)
+{
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 intr = nvkm_rd32(device, 0x00254c);
+ u32 code = intr & 0x000000ff;
+ const struct nvkm_enum *en;
+
+ en = nvkm_enum_find(gf100_fifo_sched_reason, code);
+
+ nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
+
+ switch (code) {
+ case 0x0a:
+ gf100_fifo_intr_sched_ctxsw(fifo);
+ break;
+ default:
+ break;
+ }
+}
+
+void
+gf100_fifo_intr_fault(struct nvkm_fifo *fifo, int unit)
+{
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
+ u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
+ u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
+ u32 type = nvkm_rd32(device, 0x00280c + (unit * 0x10));
+ struct nvkm_fault_data info;
+
+ info.inst = (u64)inst << 12;
+ info.addr = ((u64)vahi << 32) | valo;
+ info.time = 0;
+ info.engine = unit;
+ info.valid = 1;
+ info.gpc = (type & 0x1f000000) >> 24;
+ info.client = (type & 0x00001f00) >> 8;
+ info.access = (type & 0x00000080) >> 7;
+ info.hub = (type & 0x00000040) >> 6;
+ info.reason = (type & 0x0000000f);
+
+ nvkm_fifo_fault(fifo, &info);
+}
+
static const struct nvkm_bitfield
gf100_fifo_pbdma_intr[] = {
/* { 0x00008000, "" } seen with null ib push */
@@ -518,7 +535,7 @@ gf100_fifo_intr(struct nvkm_fifo *base)
u32 mask = nvkm_rd32(device, 0x00259c);
while (mask) {
u32 unit = __ffs(mask);
- gf100_fifo_intr_fault(fifo, unit);
+ gf100_fifo_intr_fault(&fifo->base, unit);
nvkm_wr32(device, 0x00259c, (1 << unit));
mask &= ~(1 << unit);
}
@@ -655,6 +672,7 @@ gf100_fifo = {
.init = gf100_fifo_init,
.fini = gf100_fifo_fini,
.intr = gf100_fifo_intr,
+ .fault = gf100_fifo_fault,
.uevent_init = gf100_fifo_uevent_init,
.uevent_fini = gf100_fifo_uevent_fini,
.chan = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index 1053fe796466..5d4b695cab8e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -646,31 +646,6 @@ gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo)
nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat);
}
-static void
-gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
-{
- struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
- struct nvkm_device *device = subdev->device;
- u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
- u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
- u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
- u32 type = nvkm_rd32(device, 0x00280c + (unit * 0x10));
- struct nvkm_fault_data info;
-
- info.inst = (u64)inst << 12;
- info.addr = ((u64)vahi << 32) | valo;
- info.time = 0;
- info.engine = unit;
- info.valid = 1;
- info.gpc = (type & 0x1f000000) >> 24;
- info.client = (type & 0x00001f00) >> 8;
- info.access = (type & 0x00000080) >> 7;
- info.hub = (type & 0x00000040) >> 6;
- info.reason = (type & 0x000000ff);
-
- nvkm_fifo_fault(&fifo->base, &info);
-}
-
static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = {
{ 0x00000001, "MEMREQ" },
{ 0x00000002, "MEMACK_TIMEOUT" },
@@ -849,7 +824,7 @@ gk104_fifo_intr(struct nvkm_fifo *base)
u32 mask = nvkm_rd32(device, 0x00259c);
while (mask) {
u32 unit = __ffs(mask);
- gk104_fifo_intr_fault(fifo, unit);
+ fifo->func->intr.fault(&fifo->base, unit);
nvkm_wr32(device, 0x00259c, (1 << unit));
mask &= ~(1 << unit);
}
@@ -1204,6 +1179,7 @@ gk104_fifo_fault_gpcclient[] = {
static const struct gk104_fifo_func
gk104_fifo = {
+ .intr.fault = gf100_fifo_intr_fault,
.pbdma = &gk104_fifo_pbdma,
.fault.access = gk104_fifo_fault_access,
.fault.engine = gk104_fifo_fault_engine,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
index c33f4593cbc6..6407a4a174cf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
@@ -45,6 +45,10 @@ struct gk104_fifo {
};
struct gk104_fifo_func {
+ struct {
+ void (*fault)(struct nvkm_fifo *, int unit);
+ } intr;
+
const struct gk104_fifo_pbdma_func {
int (*nr)(struct gk104_fifo *);
void (*init)(struct gk104_fifo *);
@@ -110,12 +114,14 @@ void gk110_fifo_runlist_cgrp(struct nvkm_fifo_cgrp *,
extern const struct gk104_fifo_pbdma_func gk208_fifo_pbdma;
void gk208_fifo_pbdma_init_timeout(struct gk104_fifo *);
+void gm107_fifo_intr_fault(struct nvkm_fifo *, int);
extern const struct nvkm_enum gm107_fifo_fault_engine[];
extern const struct gk104_fifo_runlist_func gm107_fifo_runlist;
extern const struct gk104_fifo_pbdma_func gm200_fifo_pbdma;
int gm200_fifo_pbdma_nr(struct gk104_fifo *);
+void gp100_fifo_intr_fault(struct nvkm_fifo *, int);
extern const struct nvkm_enum gp100_fifo_fault_engine[];
extern const struct nvkm_enum gv100_fifo_fault_access[];
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
index 8adfa6b182cb..f820969e4405 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
@@ -48,6 +48,7 @@ gk110_fifo_runlist = {
static const struct gk104_fifo_func
gk110_fifo = {
+ .intr.fault = gf100_fifo_intr_fault,
.pbdma = &gk104_fifo_pbdma,
.fault.access = gk104_fifo_fault_access,
.fault.engine = gk104_fifo_fault_engine,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
index 9553fb4af601..2f54787b5fd0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
@@ -45,6 +45,7 @@ gk208_fifo_pbdma = {
static const struct gk104_fifo_func
gk208_fifo = {
+ .intr.fault = gf100_fifo_intr_fault,
.pbdma = &gk208_fifo_pbdma,
.fault.access = gk104_fifo_fault_access,
.fault.engine = gk104_fifo_fault_engine,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
index a4c6ac3cd6c7..a814c4e0ed3e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
@@ -26,6 +26,7 @@
static const struct gk104_fifo_func
gk20a_fifo = {
+ .intr.fault = gf100_fifo_intr_fault,
.pbdma = &gk208_fifo_pbdma,
.fault.access = gk104_fifo_fault_access,
.fault.engine = gk104_fifo_fault_engine,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
index acf230764cb0..c2a2e4572f6c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
@@ -25,6 +25,7 @@
#include "changk104.h"
#include <core/gpuobj.h>
+#include <subdev/fault.h>
#include <nvif/class.h>
@@ -67,8 +68,33 @@ gm107_fifo_fault_engine[] = {
{}
};
+void
+gm107_fifo_intr_fault(struct nvkm_fifo *fifo, int unit)
+{
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
+ u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
+ u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
+ u32 type = nvkm_rd32(device, 0x00280c + (unit * 0x10));
+ struct nvkm_fault_data info;
+
+ info.inst = (u64)inst << 12;
+ info.addr = ((u64)vahi << 32) | valo;
+ info.time = 0;
+ info.engine = unit;
+ info.valid = 1;
+ info.gpc = (type & 0x1f000000) >> 24;
+ info.client = (type & 0x00003f00) >> 8;
+ info.access = (type & 0x00000080) >> 7;
+ info.hub = (type & 0x00000040) >> 6;
+ info.reason = (type & 0x0000000f);
+
+ nvkm_fifo_fault(fifo, &info);
+}
+
static const struct gk104_fifo_func
gm107_fifo = {
+ .intr.fault = gm107_fifo_intr_fault,
.pbdma = &gk208_fifo_pbdma,
.fault.access = gk104_fifo_fault_access,
.fault.engine = gm107_fifo_fault_engine,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
index b96c1c5d6577..b8cfe3b28c4f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
@@ -42,6 +42,7 @@ gm200_fifo_pbdma = {
static const struct gk104_fifo_func
gm200_fifo = {
+ .intr.fault = gm107_fifo_intr_fault,
.pbdma = &gm200_fifo_pbdma,
.fault.access = gk104_fifo_fault_access,
.fault.engine = gm107_fifo_fault_engine,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
index a49539b9e4ec..70b4feebc1fa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
@@ -26,6 +26,7 @@
static const struct gk104_fifo_func
gm20b_fifo = {
+ .intr.fault = gm107_fifo_intr_fault,
.pbdma = &gm200_fifo_pbdma,
.fault.access = gk104_fifo_fault_access,
.fault.engine = gm107_fifo_fault_engine,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
index 54377e0f6a88..2c7a0176b3c8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
@@ -24,6 +24,8 @@
#include "gk104.h"
#include "changk104.h"
+#include <subdev/fault.h>
+
#include <nvif/class.h>
const struct nvkm_enum
@@ -50,8 +52,33 @@ gp100_fifo_fault_engine[] = {
{}
};
+void
+gp100_fifo_intr_fault(struct nvkm_fifo *fifo, int unit)
+{
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
+ u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
+ u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
+ u32 type = nvkm_rd32(device, 0x00280c + (unit * 0x10));
+ struct nvkm_fault_data info;
+
+ info.inst = (u64)inst << 12;
+ info.addr = ((u64)vahi << 32) | valo;
+ info.time = 0;
+ info.engine = unit;
+ info.valid = 1;
+ info.gpc = (type & 0x1f000000) >> 24;
+ info.hub = (type & 0x00100000) >> 20;
+ info.access = (type & 0x00070000) >> 16;
+ info.client = (type & 0x00007f00) >> 8;
+ info.reason = (type & 0x0000001f);
+
+ nvkm_fifo_fault(fifo, &info);
+}
+
static const struct gk104_fifo_func
gp100_fifo = {
+ .intr.fault = gp100_fifo_intr_fault,
.pbdma = &gm200_fifo_pbdma,
.fault.access = gk104_fifo_fault_access,
.fault.engine = gp100_fifo_fault_engine,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
index 778ba7e46fb3..8c65ad4feedb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
@@ -26,6 +26,7 @@
static const struct gk104_fifo_func
gp10b_fifo = {
+ .intr.fault = gp100_fifo_intr_fault,
.pbdma = &gm200_fifo_pbdma,
.fault.access = gk104_fifo_fault_access,
.fault.engine = gp100_fifo_fault_engine,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
index c66f5370b21f..0ef8baab513e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
@@ -37,4 +37,6 @@ struct nvkm_fifo_func {
void nv04_fifo_intr(struct nvkm_fifo *);
void nv04_fifo_pause(struct nvkm_fifo *, unsigned long *);
void nv04_fifo_start(struct nvkm_fifo *, unsigned long *);
+
+void gf100_fifo_intr_fault(struct nvkm_fifo *, int);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c
index b8578359e61b..118e33174cbe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c
@@ -46,6 +46,19 @@ extdev_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
return extdev + *hdr;
}
+bool
+nvbios_extdev_skip_probe(struct nvkm_bios *bios)
+{
+ u8 ver, hdr, len, cnt;
+ u16 data = extdev_table(bios, &ver, &hdr, &len, &cnt);
+ if (data && ver == 0x40 && hdr >= 5) {
+ u8 flags = nvbios_rd08(bios, data - hdr + 4);
+ if (flags & 1)
+ return true;
+ }
+ return false;
+}
+
static u16
nvbios_extdev_entry(struct nvkm_bios *bios, int idx, u8 *ver, u8 *len)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
index ec0e9f7224b5..9de74f41dcd2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
@@ -834,7 +834,7 @@ init_generic_condition(struct nvbios_init *init)
init_exec_set(init, false);
break;
default:
- warn("INIT_GENERIC_CONDITON: unknown 0x%02x\n", cond);
+ warn("INIT_GENERIC_CONDITION: unknown 0x%02x\n", cond);
init->offset += size;
break;
}
@@ -1935,6 +1935,28 @@ init_ram_restrict_pll(struct nvbios_init *init)
}
/**
+ * INIT_RESET_BEGUN - opcode 0x8c
+ *
+ */
+static void
+init_reset_begun(struct nvbios_init *init)
+{
+ trace("RESET_BEGUN\n");
+ init->offset += 1;
+}
+
+/**
+ * INIT_RESET_END - opcode 0x8d
+ *
+ */
+static void
+init_reset_end(struct nvbios_init *init)
+{
+ trace("RESET_END\n");
+ init->offset += 1;
+}
+
+/**
* INIT_GPIO - opcode 0x8e
*
*/
@@ -2260,8 +2282,8 @@ static struct nvbios_init_opcode {
[0x79] = { init_pll },
[0x7a] = { init_zm_reg },
[0x87] = { init_ram_restrict_pll },
- [0x8c] = { init_reserved },
- [0x8d] = { init_reserved },
+ [0x8c] = { init_reset_begun },
+ [0x8d] = { init_reset_end },
[0x8e] = { init_gpio },
[0x8f] = { init_ram_restrict_zm_reg_group },
[0x90] = { init_copy_zm_reg },
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
index 7143ea4611aa..33a9fb5ac558 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
@@ -96,6 +96,8 @@ nvbios_volt_parse(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
info->min = min(info->base,
info->base + info->step * info->vidmask);
info->max = nvbios_rd32(bios, volt + 0x0e);
+ if (!info->max)
+ info->max = max(info->base, info->base + info->step * info->vidmask);
break;
case 0x50:
info->min = nvbios_rd32(bios, volt + 0x0a);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
index 1399d923d446..914276410ef8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
@@ -23,6 +23,7 @@
*/
#include "priv.h"
+#include <core/option.h>
#include <core/notify.h>
static int
@@ -182,12 +183,43 @@ static const struct dmi_system_id gpio_reset_ids[] = {
{ }
};
+static enum dcb_gpio_func_name power_checks[] = {
+ DCB_GPIO_THERM_EXT_POWER_EVENT,
+ DCB_GPIO_POWER_ALERT,
+ DCB_GPIO_EXT_POWER_LOW,
+};
+
static int
nvkm_gpio_init(struct nvkm_subdev *subdev)
{
struct nvkm_gpio *gpio = nvkm_gpio(subdev);
+ struct dcb_gpio_func func;
+ int ret;
+ int i;
+
if (dmi_check_system(gpio_reset_ids))
nvkm_gpio_reset(gpio, DCB_GPIO_UNUSED);
+
+ if (nvkm_boolopt(subdev->device->cfgopt, "NvPowerChecks", true)) {
+ for (i = 0; i < ARRAY_SIZE(power_checks); ++i) {
+ ret = nvkm_gpio_find(gpio, 0, power_checks[i],
+ DCB_GPIO_UNUSED, &func);
+ if (ret)
+ continue;
+
+ ret = nvkm_gpio_get(gpio, 0, func.func, func.line);
+ if (!ret)
+ continue;
+
+ nvkm_error(&gpio->subdev,
+ "GPU is missing power, check its power "
+ "cables. Boot with "
+ "nouveau.config=NvPowerChecks=0 to "
+ "disable.\n");
+ return -EINVAL;
+ }
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
index ce70a193caa7..ea2e11771bca 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
@@ -26,6 +26,24 @@
#include <core/msgqueue.h>
#include <subdev/timer.h>
+bool
+nvkm_pmu_fan_controlled(struct nvkm_device *device)
+{
+ struct nvkm_pmu *pmu = device->pmu;
+
+ /* Internal PMU FW does not currently control fans in any way,
+ * allow SW control of fans instead.
+ */
+ if (pmu && pmu->func->code.size)
+ return false;
+
+ /* Default (board-loaded, or VBIOS PMU/PREOS) PMU FW on Fermi
+ * and newer automatically control the fan speed, which would
+ * interfere with SW control.
+ */
+ return (device->chipset >= 0xc0);
+}
+
void
nvkm_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
index 4fd4cfe459b8..7af971db91bc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
@@ -1088,7 +1088,7 @@ acr_r352_ls_gpccs_func_0 = {
.lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
};
-const struct acr_r352_ls_func
+static const struct acr_r352_ls_func
acr_r352_ls_gpccs_func = {
.load = acr_ls_ucode_load_gpccs,
.version_max = 0,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
index 07914e36939e..4a4d1e224126 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
@@ -21,9 +21,11 @@
*
* Authors: Martin Peres
*/
-#include <nvkm/core/option.h>
#include "priv.h"
+#include <core/option.h>
+#include <subdev/pmu.h>
+
int
nvkm_therm_temp_get(struct nvkm_therm *therm)
{
@@ -192,8 +194,7 @@ nvkm_therm_fan_mode(struct nvkm_therm *therm, int mode)
/* The default PPWR ucode on fermi interferes with fan management */
if ((mode >= ARRAY_SIZE(name)) ||
- (mode != NVKM_THERM_CTRL_NONE && device->card_type >= NV_C0 &&
- !device->pmu))
+ (mode != NVKM_THERM_CTRL_NONE && nvkm_pmu_fan_controlled(device)))
return -EINVAL;
/* do not allow automatic fan management if the thermal sensor is
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c
index 6e0ddc1bb583..03b355dabab3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c
@@ -116,6 +116,9 @@ nvkm_therm_ic_ctor(struct nvkm_therm *therm)
return;
}
+ if (nvbios_extdev_skip_probe(bios))
+ return;
+
/* The vbios doesn't provide the address of an exisiting monitoring
device. Let's try our static list.
*/
diff --git a/drivers/gpu/drm/omapdrm/displays/Kconfig b/drivers/gpu/drm/omapdrm/displays/Kconfig
index c2566da32ac4..240dda102845 100644
--- a/drivers/gpu/drm/omapdrm/displays/Kconfig
+++ b/drivers/gpu/drm/omapdrm/displays/Kconfig
@@ -29,42 +29,4 @@ config DRM_OMAP_PANEL_DSI_CM
help
Driver for generic DSI command mode panels.
-config DRM_OMAP_PANEL_SONY_ACX565AKM
- tristate "ACX565AKM Panel"
- depends on SPI && BACKLIGHT_CLASS_DEVICE
- help
- This is the LCD panel used on Nokia N900
-
-config DRM_OMAP_PANEL_LGPHILIPS_LB035Q02
- tristate "LG.Philips LB035Q02 LCD Panel"
- depends on SPI
- help
- LCD Panel used on the Gumstix Overo Palo35
-
-config DRM_OMAP_PANEL_SHARP_LS037V7DW01
- tristate "Sharp LS037V7DW01 LCD Panel"
- depends on BACKLIGHT_CLASS_DEVICE
- help
- LCD Panel used in TI's SDP3430 and EVM boards
-
-config DRM_OMAP_PANEL_TPO_TD028TTEC1
- tristate "TPO TD028TTEC1 LCD Panel"
- depends on SPI
- help
- LCD panel used in Openmoko.
-
-config DRM_OMAP_PANEL_TPO_TD043MTEA1
- tristate "TPO TD043MTEA1 LCD Panel"
- depends on SPI
- help
- LCD Panel used in OMAP3 Pandora
-
-config DRM_OMAP_PANEL_NEC_NL8048HL11
- tristate "NEC NL8048HL11 Panel"
- depends on SPI
- depends on BACKLIGHT_CLASS_DEVICE
- help
- This NEC NL8048HL11 panel is TFT LCD used in the
- Zoom2/3/3630 sdp boards.
-
endmenu
diff --git a/drivers/gpu/drm/omapdrm/displays/Makefile b/drivers/gpu/drm/omapdrm/displays/Makefile
index 1db34d4fed64..cb76859dc574 100644
--- a/drivers/gpu/drm/omapdrm/displays/Makefile
+++ b/drivers/gpu/drm/omapdrm/displays/Makefile
@@ -4,9 +4,3 @@ obj-$(CONFIG_DRM_OMAP_ENCODER_TPD12S015) += encoder-tpd12s015.o
obj-$(CONFIG_DRM_OMAP_CONNECTOR_HDMI) += connector-hdmi.o
obj-$(CONFIG_DRM_OMAP_CONNECTOR_ANALOG_TV) += connector-analog-tv.o
obj-$(CONFIG_DRM_OMAP_PANEL_DSI_CM) += panel-dsi-cm.o
-obj-$(CONFIG_DRM_OMAP_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o
-obj-$(CONFIG_DRM_OMAP_PANEL_LGPHILIPS_LB035Q02) += panel-lgphilips-lb035q02.o
-obj-$(CONFIG_DRM_OMAP_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o
-obj-$(CONFIG_DRM_OMAP_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o
-obj-$(CONFIG_DRM_OMAP_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
-obj-$(CONFIG_DRM_OMAP_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
deleted file mode 100644
index 1fd0d84e6e38..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
+++ /dev/null
@@ -1,251 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * LG.Philips LB035Q02 LCD Panel driver
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Tomi Valkeinen <[email protected]>
- * Based on a driver by: Steve Sakoman <[email protected]>
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/spi/spi.h>
-#include <linux/mutex.h>
-#include <linux/gpio.h>
-#include <linux/gpio/consumer.h>
-
-#include "../dss/omapdss.h"
-
-static const struct videomode lb035q02_vm = {
- .hactive = 320,
- .vactive = 240,
-
- .pixelclock = 6500000,
-
- .hsync_len = 2,
- .hfront_porch = 20,
- .hback_porch = 68,
-
- .vsync_len = 2,
- .vfront_porch = 4,
- .vback_porch = 18,
-
- .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
-};
-
-struct panel_drv_data {
- struct omap_dss_device dssdev;
-
- struct spi_device *spi;
-
- struct videomode vm;
-
- struct gpio_desc *enable_gpio;
-};
-
-#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
-
-static int lb035q02_write_reg(struct spi_device *spi, u8 reg, u16 val)
-{
- struct spi_message msg;
- struct spi_transfer index_xfer = {
- .len = 3,
- .cs_change = 1,
- };
- struct spi_transfer value_xfer = {
- .len = 3,
- };
- u8 buffer[16];
-
- spi_message_init(&msg);
-
- /* register index */
- buffer[0] = 0x70;
- buffer[1] = 0x00;
- buffer[2] = reg & 0x7f;
- index_xfer.tx_buf = buffer;
- spi_message_add_tail(&index_xfer, &msg);
-
- /* register value */
- buffer[4] = 0x72;
- buffer[5] = val >> 8;
- buffer[6] = val;
- value_xfer.tx_buf = buffer + 4;
- spi_message_add_tail(&value_xfer, &msg);
-
- return spi_sync(spi, &msg);
-}
-
-static void init_lb035q02_panel(struct spi_device *spi)
-{
- /* Init sequence from page 28 of the lb035q02 spec */
- lb035q02_write_reg(spi, 0x01, 0x6300);
- lb035q02_write_reg(spi, 0x02, 0x0200);
- lb035q02_write_reg(spi, 0x03, 0x0177);
- lb035q02_write_reg(spi, 0x04, 0x04c7);
- lb035q02_write_reg(spi, 0x05, 0xffc0);
- lb035q02_write_reg(spi, 0x06, 0xe806);
- lb035q02_write_reg(spi, 0x0a, 0x4008);
- lb035q02_write_reg(spi, 0x0b, 0x0000);
- lb035q02_write_reg(spi, 0x0d, 0x0030);
- lb035q02_write_reg(spi, 0x0e, 0x2800);
- lb035q02_write_reg(spi, 0x0f, 0x0000);
- lb035q02_write_reg(spi, 0x16, 0x9f80);
- lb035q02_write_reg(spi, 0x17, 0x0a0f);
- lb035q02_write_reg(spi, 0x1e, 0x00c1);
- lb035q02_write_reg(spi, 0x30, 0x0300);
- lb035q02_write_reg(spi, 0x31, 0x0007);
- lb035q02_write_reg(spi, 0x32, 0x0000);
- lb035q02_write_reg(spi, 0x33, 0x0000);
- lb035q02_write_reg(spi, 0x34, 0x0707);
- lb035q02_write_reg(spi, 0x35, 0x0004);
- lb035q02_write_reg(spi, 0x36, 0x0302);
- lb035q02_write_reg(spi, 0x37, 0x0202);
- lb035q02_write_reg(spi, 0x3a, 0x0a0d);
- lb035q02_write_reg(spi, 0x3b, 0x0806);
-}
-
-static int lb035q02_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- struct panel_drv_data *ddata = to_panel_data(dst);
-
- init_lb035q02_panel(ddata->spi);
-
- return 0;
-}
-
-static void lb035q02_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
-}
-
-static void lb035q02_enable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- if (ddata->enable_gpio)
- gpiod_set_value_cansleep(ddata->enable_gpio, 1);
-}
-
-static void lb035q02_disable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- if (ddata->enable_gpio)
- gpiod_set_value_cansleep(ddata->enable_gpio, 0);
-}
-
-static int lb035q02_get_modes(struct omap_dss_device *dssdev,
- struct drm_connector *connector)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- return omapdss_display_get_modes(connector, &ddata->vm);
-}
-
-static const struct omap_dss_device_ops lb035q02_ops = {
- .connect = lb035q02_connect,
- .disconnect = lb035q02_disconnect,
-
- .enable = lb035q02_enable,
- .disable = lb035q02_disable,
-
- .get_modes = lb035q02_get_modes,
-};
-
-static int lb035q02_probe_of(struct spi_device *spi)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
- struct gpio_desc *gpio;
-
- gpio = devm_gpiod_get(&spi->dev, "enable", GPIOD_OUT_LOW);
- if (IS_ERR(gpio)) {
- dev_err(&spi->dev, "failed to parse enable gpio\n");
- return PTR_ERR(gpio);
- }
-
- ddata->enable_gpio = gpio;
-
- return 0;
-}
-
-static int lb035q02_panel_spi_probe(struct spi_device *spi)
-{
- struct panel_drv_data *ddata;
- struct omap_dss_device *dssdev;
- int r;
-
- ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL);
- if (ddata == NULL)
- return -ENOMEM;
-
- dev_set_drvdata(&spi->dev, ddata);
-
- ddata->spi = spi;
-
- r = lb035q02_probe_of(spi);
- if (r)
- return r;
-
- ddata->vm = lb035q02_vm;
-
- dssdev = &ddata->dssdev;
- dssdev->dev = &spi->dev;
- dssdev->ops = &lb035q02_ops;
- dssdev->type = OMAP_DISPLAY_TYPE_DPI;
- dssdev->display = true;
- dssdev->owner = THIS_MODULE;
- dssdev->of_ports = BIT(0);
- dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
-
- /*
- * Note: According to the panel documentation:
- * DE is active LOW
- * DATA needs to be driven on the FALLING edge
- */
- dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH
- | DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE
- | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE;
-
- omapdss_display_init(dssdev);
- omapdss_device_register(dssdev);
-
- return 0;
-}
-
-static int lb035q02_panel_spi_remove(struct spi_device *spi)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
- struct omap_dss_device *dssdev = &ddata->dssdev;
-
- omapdss_device_unregister(dssdev);
-
- lb035q02_disable(dssdev);
-
- return 0;
-}
-
-static const struct of_device_id lb035q02_of_match[] = {
- { .compatible = "omapdss,lgphilips,lb035q02", },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, lb035q02_of_match);
-
-static struct spi_driver lb035q02_spi_driver = {
- .probe = lb035q02_panel_spi_probe,
- .remove = lb035q02_panel_spi_remove,
- .driver = {
- .name = "panel_lgphilips_lb035q02",
- .of_match_table = lb035q02_of_match,
- .suppress_bind_attrs = true,
- },
-};
-
-module_spi_driver(lb035q02_spi_driver);
-
-MODULE_ALIAS("spi:lgphilips,lb035q02");
-MODULE_AUTHOR("Tomi Valkeinen <[email protected]>");
-MODULE_DESCRIPTION("LG.Philips LB035Q02 LCD Panel driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
deleted file mode 100644
index eba5bd1d702f..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
+++ /dev/null
@@ -1,271 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * NEC NL8048HL11 Panel driver
- *
- * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Erik Gilling <[email protected]>
- * Converted to new DSS device model: Tomi Valkeinen <[email protected]>
- */
-
-#include <linux/delay.h>
-#include <linux/gpio/consumer.h>
-#include <linux/module.h>
-#include <linux/spi/spi.h>
-
-#include "../dss/omapdss.h"
-
-struct panel_drv_data {
- struct omap_dss_device dssdev;
-
- struct videomode vm;
-
- struct gpio_desc *res_gpio;
-
- struct spi_device *spi;
-};
-
-#define LCD_XRES 800
-#define LCD_YRES 480
-/*
- * NEC PIX Clock Ratings
- * MIN:21.8MHz TYP:23.8MHz MAX:25.7MHz
- */
-#define LCD_PIXEL_CLOCK 23800000
-
-static const struct {
- unsigned char addr;
- unsigned char dat;
-} nec_8048_init_seq[] = {
- { 3, 0x01 }, { 0, 0x00 }, { 1, 0x01 }, { 4, 0x00 }, { 5, 0x14 },
- { 6, 0x24 }, { 16, 0xD7 }, { 17, 0x00 }, { 18, 0x00 }, { 19, 0x55 },
- { 20, 0x01 }, { 21, 0x70 }, { 22, 0x1E }, { 23, 0x25 }, { 24, 0x25 },
- { 25, 0x02 }, { 26, 0x02 }, { 27, 0xA0 }, { 32, 0x2F }, { 33, 0x0F },
- { 34, 0x0F }, { 35, 0x0F }, { 36, 0x0F }, { 37, 0x0F }, { 38, 0x0F },
- { 39, 0x00 }, { 40, 0x02 }, { 41, 0x02 }, { 42, 0x02 }, { 43, 0x0F },
- { 44, 0x0F }, { 45, 0x0F }, { 46, 0x0F }, { 47, 0x0F }, { 48, 0x0F },
- { 49, 0x0F }, { 50, 0x00 }, { 51, 0x02 }, { 52, 0x02 }, { 53, 0x02 },
- { 80, 0x0C }, { 83, 0x42 }, { 84, 0x42 }, { 85, 0x41 }, { 86, 0x14 },
- { 89, 0x88 }, { 90, 0x01 }, { 91, 0x00 }, { 92, 0x02 }, { 93, 0x0C },
- { 94, 0x1C }, { 95, 0x27 }, { 98, 0x49 }, { 99, 0x27 }, { 102, 0x76 },
- { 103, 0x27 }, { 112, 0x01 }, { 113, 0x0E }, { 114, 0x02 },
- { 115, 0x0C }, { 118, 0x0C }, { 121, 0x30 }, { 130, 0x00 },
- { 131, 0x00 }, { 132, 0xFC }, { 134, 0x00 }, { 136, 0x00 },
- { 138, 0x00 }, { 139, 0x00 }, { 140, 0x00 }, { 141, 0xFC },
- { 143, 0x00 }, { 145, 0x00 }, { 147, 0x00 }, { 148, 0x00 },
- { 149, 0x00 }, { 150, 0xFC }, { 152, 0x00 }, { 154, 0x00 },
- { 156, 0x00 }, { 157, 0x00 }, { 2, 0x00 },
-};
-
-static const struct videomode nec_8048_panel_vm = {
- .hactive = LCD_XRES,
- .vactive = LCD_YRES,
- .pixelclock = LCD_PIXEL_CLOCK,
- .hfront_porch = 6,
- .hsync_len = 1,
- .hback_porch = 4,
- .vfront_porch = 3,
- .vsync_len = 1,
- .vback_porch = 4,
-
- .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
-};
-
-#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
-
-static int nec_8048_spi_send(struct spi_device *spi, unsigned char reg_addr,
- unsigned char reg_data)
-{
- int ret = 0;
- unsigned int cmd = 0, data = 0;
-
- cmd = 0x0000 | reg_addr; /* register address write */
- data = 0x0100 | reg_data; /* register data write */
- data = (cmd << 16) | data;
-
- ret = spi_write(spi, (unsigned char *)&data, 4);
- if (ret)
- pr_err("error in spi_write %x\n", data);
-
- return ret;
-}
-
-static int init_nec_8048_wvga_lcd(struct spi_device *spi)
-{
- unsigned int i;
- /* Initialization Sequence */
- /* nec_8048_spi_send(spi, REG, VAL) */
- for (i = 0; i < (ARRAY_SIZE(nec_8048_init_seq) - 1); i++)
- nec_8048_spi_send(spi, nec_8048_init_seq[i].addr,
- nec_8048_init_seq[i].dat);
- udelay(20);
- nec_8048_spi_send(spi, nec_8048_init_seq[i].addr,
- nec_8048_init_seq[i].dat);
- return 0;
-}
-
-static int nec_8048_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- return 0;
-}
-
-static void nec_8048_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
-}
-
-static void nec_8048_enable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- gpiod_set_value_cansleep(ddata->res_gpio, 1);
-}
-
-static void nec_8048_disable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- gpiod_set_value_cansleep(ddata->res_gpio, 0);
-}
-
-static int nec_8048_get_modes(struct omap_dss_device *dssdev,
- struct drm_connector *connector)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- return omapdss_display_get_modes(connector, &ddata->vm);
-}
-
-static const struct omap_dss_device_ops nec_8048_ops = {
- .connect = nec_8048_connect,
- .disconnect = nec_8048_disconnect,
-
- .enable = nec_8048_enable,
- .disable = nec_8048_disable,
-
- .get_modes = nec_8048_get_modes,
-};
-
-static int nec_8048_probe(struct spi_device *spi)
-{
- struct panel_drv_data *ddata;
- struct omap_dss_device *dssdev;
- struct gpio_desc *gpio;
- int r;
-
- dev_dbg(&spi->dev, "%s\n", __func__);
-
- spi->mode = SPI_MODE_0;
- spi->bits_per_word = 32;
-
- r = spi_setup(spi);
- if (r < 0) {
- dev_err(&spi->dev, "spi_setup failed: %d\n", r);
- return r;
- }
-
- init_nec_8048_wvga_lcd(spi);
-
- ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL);
- if (ddata == NULL)
- return -ENOMEM;
-
- dev_set_drvdata(&spi->dev, ddata);
-
- ddata->spi = spi;
-
- gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_LOW);
- if (IS_ERR(gpio)) {
- dev_err(&spi->dev, "failed to get reset gpio\n");
- return PTR_ERR(gpio);
- }
-
- ddata->res_gpio = gpio;
-
- ddata->vm = nec_8048_panel_vm;
-
- dssdev = &ddata->dssdev;
- dssdev->dev = &spi->dev;
- dssdev->ops = &nec_8048_ops;
- dssdev->type = OMAP_DISPLAY_TYPE_DPI;
- dssdev->display = true;
- dssdev->owner = THIS_MODULE;
- dssdev->of_ports = BIT(0);
- dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
- dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH
- | DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE
- | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE;
-
- omapdss_display_init(dssdev);
- omapdss_device_register(dssdev);
-
- return 0;
-}
-
-static int nec_8048_remove(struct spi_device *spi)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
- struct omap_dss_device *dssdev = &ddata->dssdev;
-
- dev_dbg(&ddata->spi->dev, "%s\n", __func__);
-
- omapdss_device_unregister(dssdev);
-
- nec_8048_disable(dssdev);
-
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int nec_8048_suspend(struct device *dev)
-{
- struct spi_device *spi = to_spi_device(dev);
-
- nec_8048_spi_send(spi, 2, 0x01);
- mdelay(40);
-
- return 0;
-}
-
-static int nec_8048_resume(struct device *dev)
-{
- struct spi_device *spi = to_spi_device(dev);
-
- /* reinitialize the panel */
- spi_setup(spi);
- nec_8048_spi_send(spi, 2, 0x00);
- init_nec_8048_wvga_lcd(spi);
-
- return 0;
-}
-static SIMPLE_DEV_PM_OPS(nec_8048_pm_ops, nec_8048_suspend,
- nec_8048_resume);
-#define NEC_8048_PM_OPS (&nec_8048_pm_ops)
-#else
-#define NEC_8048_PM_OPS NULL
-#endif
-
-static const struct of_device_id nec_8048_of_match[] = {
- { .compatible = "omapdss,nec,nl8048hl11", },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, nec_8048_of_match);
-
-static struct spi_driver nec_8048_driver = {
- .driver = {
- .name = "panel-nec-nl8048hl11",
- .pm = NEC_8048_PM_OPS,
- .of_match_table = nec_8048_of_match,
- .suppress_bind_attrs = true,
- },
- .probe = nec_8048_probe,
- .remove = nec_8048_remove,
-};
-
-module_spi_driver(nec_8048_driver);
-
-MODULE_ALIAS("spi:nec,nl8048hl11");
-MODULE_AUTHOR("Erik Gilling <[email protected]>");
-MODULE_DESCRIPTION("NEC-NL8048HL11 Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
deleted file mode 100644
index 3ab50fd1f3f2..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
+++ /dev/null
@@ -1,262 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * LCD panel driver for Sharp LS037V7DW01
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Tomi Valkeinen <[email protected]>
- */
-
-#include <linux/delay.h>
-#include <linux/gpio/consumer.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/regulator/consumer.h>
-
-#include "../dss/omapdss.h"
-
-struct panel_drv_data {
- struct omap_dss_device dssdev;
- struct regulator *vcc;
-
- struct videomode vm;
-
- struct gpio_desc *resb_gpio; /* low = reset active min 20 us */
- struct gpio_desc *ini_gpio; /* high = power on */
- struct gpio_desc *mo_gpio; /* low = 480x640, high = 240x320 */
- struct gpio_desc *lr_gpio; /* high = conventional horizontal scanning */
- struct gpio_desc *ud_gpio; /* high = conventional vertical scanning */
-};
-
-static const struct videomode sharp_ls_vm = {
- .hactive = 480,
- .vactive = 640,
-
- .pixelclock = 19200000,
-
- .hsync_len = 2,
- .hfront_porch = 1,
- .hback_porch = 28,
-
- .vsync_len = 1,
- .vfront_porch = 1,
- .vback_porch = 1,
-
- .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
-};
-
-#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
-
-static int sharp_ls_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- return 0;
-}
-
-static void sharp_ls_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
-}
-
-static void sharp_ls_pre_enable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- int r;
-
- if (ddata->vcc) {
- r = regulator_enable(ddata->vcc);
- if (r)
- dev_err(dssdev->dev, "%s: failed to enable regulator\n",
- __func__);
- }
-}
-
-static void sharp_ls_enable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- /* wait couple of vsyncs until enabling the LCD */
- msleep(50);
-
- if (ddata->resb_gpio)
- gpiod_set_value_cansleep(ddata->resb_gpio, 1);
-
- if (ddata->ini_gpio)
- gpiod_set_value_cansleep(ddata->ini_gpio, 1);
-}
-
-static void sharp_ls_disable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- if (ddata->ini_gpio)
- gpiod_set_value_cansleep(ddata->ini_gpio, 0);
-
- if (ddata->resb_gpio)
- gpiod_set_value_cansleep(ddata->resb_gpio, 0);
-
- /* wait at least 5 vsyncs after disabling the LCD */
- msleep(100);
-}
-
-static void sharp_ls_post_disable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- if (ddata->vcc)
- regulator_disable(ddata->vcc);
-}
-
-static int sharp_ls_get_modes(struct omap_dss_device *dssdev,
- struct drm_connector *connector)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- return omapdss_display_get_modes(connector, &ddata->vm);
-}
-
-static const struct omap_dss_device_ops sharp_ls_ops = {
- .connect = sharp_ls_connect,
- .disconnect = sharp_ls_disconnect,
-
- .pre_enable = sharp_ls_pre_enable,
- .enable = sharp_ls_enable,
- .disable = sharp_ls_disable,
- .post_disable = sharp_ls_post_disable,
-
- .get_modes = sharp_ls_get_modes,
-};
-
-static int sharp_ls_get_gpio_of(struct device *dev, int index, int val,
- const char *desc, struct gpio_desc **gpiod)
-{
- struct gpio_desc *gd;
-
- *gpiod = NULL;
-
- gd = devm_gpiod_get_index(dev, desc, index, GPIOD_OUT_LOW);
- if (IS_ERR(gd))
- return PTR_ERR(gd);
-
- *gpiod = gd;
- return 0;
-}
-
-static int sharp_ls_probe_of(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- int r;
-
- ddata->vcc = devm_regulator_get(&pdev->dev, "envdd");
- if (IS_ERR(ddata->vcc)) {
- dev_err(&pdev->dev, "failed to get regulator\n");
- return PTR_ERR(ddata->vcc);
- }
-
- /* lcd INI */
- r = sharp_ls_get_gpio_of(&pdev->dev, 0, 0, "enable", &ddata->ini_gpio);
- if (r)
- return r;
-
- /* lcd RESB */
- r = sharp_ls_get_gpio_of(&pdev->dev, 0, 0, "reset", &ddata->resb_gpio);
- if (r)
- return r;
-
- /* lcd MO */
- r = sharp_ls_get_gpio_of(&pdev->dev, 0, 0, "mode", &ddata->mo_gpio);
- if (r)
- return r;
-
- /* lcd LR */
- r = sharp_ls_get_gpio_of(&pdev->dev, 1, 1, "mode", &ddata->lr_gpio);
- if (r)
- return r;
-
- /* lcd UD */
- r = sharp_ls_get_gpio_of(&pdev->dev, 2, 1, "mode", &ddata->ud_gpio);
- if (r)
- return r;
-
- return 0;
-}
-
-static int sharp_ls_probe(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata;
- struct omap_dss_device *dssdev;
- int r;
-
- ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
- if (ddata == NULL)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, ddata);
-
- r = sharp_ls_probe_of(pdev);
- if (r)
- return r;
-
- ddata->vm = sharp_ls_vm;
-
- dssdev = &ddata->dssdev;
- dssdev->dev = &pdev->dev;
- dssdev->ops = &sharp_ls_ops;
- dssdev->type = OMAP_DISPLAY_TYPE_DPI;
- dssdev->display = true;
- dssdev->owner = THIS_MODULE;
- dssdev->of_ports = BIT(0);
- dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
-
- /*
- * Note: According to the panel documentation:
- * DATA needs to be driven on the FALLING edge
- */
- dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH
- | DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE
- | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE;
-
- omapdss_display_init(dssdev);
- omapdss_device_register(dssdev);
-
- return 0;
-}
-
-static int __exit sharp_ls_remove(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *dssdev = &ddata->dssdev;
-
- omapdss_device_unregister(dssdev);
-
- if (omapdss_device_is_enabled(dssdev)) {
- sharp_ls_disable(dssdev);
- sharp_ls_post_disable(dssdev);
- }
-
- return 0;
-}
-
-static const struct of_device_id sharp_ls_of_match[] = {
- { .compatible = "omapdss,sharp,ls037v7dw01", },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, sharp_ls_of_match);
-
-static struct platform_driver sharp_ls_driver = {
- .probe = sharp_ls_probe,
- .remove = __exit_p(sharp_ls_remove),
- .driver = {
- .name = "panel-sharp-ls037v7dw01",
- .of_match_table = sharp_ls_of_match,
- .suppress_bind_attrs = true,
- },
-};
-
-module_platform_driver(sharp_ls_driver);
-
-MODULE_AUTHOR("Tomi Valkeinen <[email protected]>");
-MODULE_DESCRIPTION("Sharp LS037V7DW01 Panel Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
deleted file mode 100644
index 588a1a6bbcc3..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
+++ /dev/null
@@ -1,755 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Sony ACX565AKM LCD Panel driver
- *
- * Copyright (C) 2010 Nokia Corporation
- *
- * Original Driver Author: Imre Deak <[email protected]>
- * Based on panel-generic.c by Tomi Valkeinen <[email protected]>
- * Adapted to new DSS2 framework: Roger Quadros <[email protected]>
- */
-
-#include <linux/backlight.h>
-#include <linux/delay.h>
-#include <linux/gpio/consumer.h>
-#include <linux/jiffies.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/sched.h>
-#include <linux/spi/spi.h>
-
-#include "../dss/omapdss.h"
-
-#define MIPID_CMD_READ_DISP_ID 0x04
-#define MIPID_CMD_READ_RED 0x06
-#define MIPID_CMD_READ_GREEN 0x07
-#define MIPID_CMD_READ_BLUE 0x08
-#define MIPID_CMD_READ_DISP_STATUS 0x09
-#define MIPID_CMD_RDDSDR 0x0F
-#define MIPID_CMD_SLEEP_IN 0x10
-#define MIPID_CMD_SLEEP_OUT 0x11
-#define MIPID_CMD_DISP_OFF 0x28
-#define MIPID_CMD_DISP_ON 0x29
-#define MIPID_CMD_WRITE_DISP_BRIGHTNESS 0x51
-#define MIPID_CMD_READ_DISP_BRIGHTNESS 0x52
-#define MIPID_CMD_WRITE_CTRL_DISP 0x53
-
-#define CTRL_DISP_BRIGHTNESS_CTRL_ON (1 << 5)
-#define CTRL_DISP_AMBIENT_LIGHT_CTRL_ON (1 << 4)
-#define CTRL_DISP_BACKLIGHT_ON (1 << 2)
-#define CTRL_DISP_AUTO_BRIGHTNESS_ON (1 << 1)
-
-#define MIPID_CMD_READ_CTRL_DISP 0x54
-#define MIPID_CMD_WRITE_CABC 0x55
-#define MIPID_CMD_READ_CABC 0x56
-
-#define MIPID_VER_LPH8923 3
-#define MIPID_VER_LS041Y3 4
-#define MIPID_VER_L4F00311 8
-#define MIPID_VER_ACX565AKM 9
-
-struct panel_drv_data {
- struct omap_dss_device dssdev;
-
- struct gpio_desc *reset_gpio;
-
- struct videomode vm;
-
- char *name;
- int enabled;
- int model;
- int revision;
- u8 display_id[3];
- unsigned has_bc:1;
- unsigned has_cabc:1;
- unsigned cabc_mode;
- unsigned long hw_guard_end; /* next value of jiffies
- when we can issue the
- next sleep in/out command */
- unsigned long hw_guard_wait; /* max guard time in jiffies */
-
- struct spi_device *spi;
- struct mutex mutex;
-
- struct backlight_device *bl_dev;
-};
-
-static const struct videomode acx565akm_panel_vm = {
- .hactive = 800,
- .vactive = 480,
- .pixelclock = 24000000,
- .hfront_porch = 28,
- .hsync_len = 4,
- .hback_porch = 24,
- .vfront_porch = 3,
- .vsync_len = 3,
- .vback_porch = 4,
-
- .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
-};
-
-#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
-
-static void acx565akm_transfer(struct panel_drv_data *ddata, int cmd,
- const u8 *wbuf, int wlen, u8 *rbuf, int rlen)
-{
- struct spi_message m;
- struct spi_transfer *x, xfer[5];
- int r;
-
- BUG_ON(ddata->spi == NULL);
-
- spi_message_init(&m);
-
- memset(xfer, 0, sizeof(xfer));
- x = &xfer[0];
-
- cmd &= 0xff;
- x->tx_buf = &cmd;
- x->bits_per_word = 9;
- x->len = 2;
-
- if (rlen > 1 && wlen == 0) {
- /*
- * Between the command and the response data there is a
- * dummy clock cycle. Add an extra bit after the command
- * word to account for this.
- */
- x->bits_per_word = 10;
- cmd <<= 1;
- }
- spi_message_add_tail(x, &m);
-
- if (wlen) {
- x++;
- x->tx_buf = wbuf;
- x->len = wlen;
- x->bits_per_word = 9;
- spi_message_add_tail(x, &m);
- }
-
- if (rlen) {
- x++;
- x->rx_buf = rbuf;
- x->len = rlen;
- spi_message_add_tail(x, &m);
- }
-
- r = spi_sync(ddata->spi, &m);
- if (r < 0)
- dev_dbg(&ddata->spi->dev, "spi_sync %d\n", r);
-}
-
-static inline void acx565akm_cmd(struct panel_drv_data *ddata, int cmd)
-{
- acx565akm_transfer(ddata, cmd, NULL, 0, NULL, 0);
-}
-
-static inline void acx565akm_write(struct panel_drv_data *ddata,
- int reg, const u8 *buf, int len)
-{
- acx565akm_transfer(ddata, reg, buf, len, NULL, 0);
-}
-
-static inline void acx565akm_read(struct panel_drv_data *ddata,
- int reg, u8 *buf, int len)
-{
- acx565akm_transfer(ddata, reg, NULL, 0, buf, len);
-}
-
-static void hw_guard_start(struct panel_drv_data *ddata, int guard_msec)
-{
- ddata->hw_guard_wait = msecs_to_jiffies(guard_msec);
- ddata->hw_guard_end = jiffies + ddata->hw_guard_wait;
-}
-
-static void hw_guard_wait(struct panel_drv_data *ddata)
-{
- unsigned long wait = ddata->hw_guard_end - jiffies;
-
- if ((long)wait > 0 && wait <= ddata->hw_guard_wait) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(wait);
- }
-}
-
-static void set_sleep_mode(struct panel_drv_data *ddata, int on)
-{
- int cmd;
-
- if (on)
- cmd = MIPID_CMD_SLEEP_IN;
- else
- cmd = MIPID_CMD_SLEEP_OUT;
- /*
- * We have to keep 120msec between sleep in/out commands.
- * (8.2.15, 8.2.16).
- */
- hw_guard_wait(ddata);
- acx565akm_cmd(ddata, cmd);
- hw_guard_start(ddata, 120);
-}
-
-static void set_display_state(struct panel_drv_data *ddata, int enabled)
-{
- int cmd = enabled ? MIPID_CMD_DISP_ON : MIPID_CMD_DISP_OFF;
-
- acx565akm_cmd(ddata, cmd);
-}
-
-static int panel_enabled(struct panel_drv_data *ddata)
-{
- __be32 v;
- u32 disp_status;
- int enabled;
-
- acx565akm_read(ddata, MIPID_CMD_READ_DISP_STATUS, (u8 *)&v, 4);
- disp_status = __be32_to_cpu(v);
- enabled = (disp_status & (1 << 17)) && (disp_status & (1 << 10));
- dev_dbg(&ddata->spi->dev,
- "LCD panel %senabled by bootloader (status 0x%04x)\n",
- enabled ? "" : "not ", disp_status);
- return enabled;
-}
-
-static int panel_detect(struct panel_drv_data *ddata)
-{
- acx565akm_read(ddata, MIPID_CMD_READ_DISP_ID, ddata->display_id, 3);
- dev_dbg(&ddata->spi->dev, "MIPI display ID: %02x%02x%02x\n",
- ddata->display_id[0],
- ddata->display_id[1],
- ddata->display_id[2]);
-
- switch (ddata->display_id[0]) {
- case 0x10:
- ddata->model = MIPID_VER_ACX565AKM;
- ddata->name = "acx565akm";
- ddata->has_bc = 1;
- ddata->has_cabc = 1;
- break;
- case 0x29:
- ddata->model = MIPID_VER_L4F00311;
- ddata->name = "l4f00311";
- break;
- case 0x45:
- ddata->model = MIPID_VER_LPH8923;
- ddata->name = "lph8923";
- break;
- case 0x83:
- ddata->model = MIPID_VER_LS041Y3;
- ddata->name = "ls041y3";
- break;
- default:
- ddata->name = "unknown";
- dev_err(&ddata->spi->dev, "invalid display ID\n");
- return -ENODEV;
- }
-
- ddata->revision = ddata->display_id[1];
-
- dev_info(&ddata->spi->dev, "omapfb: %s rev %02x LCD detected\n",
- ddata->name, ddata->revision);
-
- return 0;
-}
-
-/*----------------------Backlight Control-------------------------*/
-
-static void enable_backlight_ctrl(struct panel_drv_data *ddata, int enable)
-{
- u16 ctrl;
-
- acx565akm_read(ddata, MIPID_CMD_READ_CTRL_DISP, (u8 *)&ctrl, 1);
- if (enable) {
- ctrl |= CTRL_DISP_BRIGHTNESS_CTRL_ON |
- CTRL_DISP_BACKLIGHT_ON;
- } else {
- ctrl &= ~(CTRL_DISP_BRIGHTNESS_CTRL_ON |
- CTRL_DISP_BACKLIGHT_ON);
- }
-
- ctrl |= 1 << 8;
- acx565akm_write(ddata, MIPID_CMD_WRITE_CTRL_DISP, (u8 *)&ctrl, 2);
-}
-
-static void set_cabc_mode(struct panel_drv_data *ddata, unsigned int mode)
-{
- u16 cabc_ctrl;
-
- ddata->cabc_mode = mode;
- if (!ddata->enabled)
- return;
- cabc_ctrl = 0;
- acx565akm_read(ddata, MIPID_CMD_READ_CABC, (u8 *)&cabc_ctrl, 1);
- cabc_ctrl &= ~3;
- cabc_ctrl |= (1 << 8) | (mode & 3);
- acx565akm_write(ddata, MIPID_CMD_WRITE_CABC, (u8 *)&cabc_ctrl, 2);
-}
-
-static unsigned int get_cabc_mode(struct panel_drv_data *ddata)
-{
- return ddata->cabc_mode;
-}
-
-static unsigned int get_hw_cabc_mode(struct panel_drv_data *ddata)
-{
- u8 cabc_ctrl;
-
- acx565akm_read(ddata, MIPID_CMD_READ_CABC, &cabc_ctrl, 1);
- return cabc_ctrl & 3;
-}
-
-static void acx565akm_set_brightness(struct panel_drv_data *ddata, int level)
-{
- int bv;
-
- bv = level | (1 << 8);
- acx565akm_write(ddata, MIPID_CMD_WRITE_DISP_BRIGHTNESS, (u8 *)&bv, 2);
-
- if (level)
- enable_backlight_ctrl(ddata, 1);
- else
- enable_backlight_ctrl(ddata, 0);
-}
-
-static int acx565akm_get_actual_brightness(struct panel_drv_data *ddata)
-{
- u8 bv;
-
- acx565akm_read(ddata, MIPID_CMD_READ_DISP_BRIGHTNESS, &bv, 1);
-
- return bv;
-}
-
-
-static int acx565akm_bl_update_status(struct backlight_device *dev)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev);
- int level;
-
- dev_dbg(&ddata->spi->dev, "%s\n", __func__);
-
- if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
- dev->props.power == FB_BLANK_UNBLANK)
- level = dev->props.brightness;
- else
- level = 0;
-
- if (ddata->has_bc)
- acx565akm_set_brightness(ddata, level);
- else
- return -ENODEV;
-
- return 0;
-}
-
-static int acx565akm_bl_get_intensity(struct backlight_device *dev)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev);
-
- dev_dbg(&dev->dev, "%s\n", __func__);
-
- if (!ddata->has_bc)
- return -ENODEV;
-
- if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
- dev->props.power == FB_BLANK_UNBLANK) {
- if (ddata->has_bc)
- return acx565akm_get_actual_brightness(ddata);
- else
- return dev->props.brightness;
- }
-
- return 0;
-}
-
-static int acx565akm_bl_update_status_locked(struct backlight_device *dev)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev);
- int r;
-
- mutex_lock(&ddata->mutex);
- r = acx565akm_bl_update_status(dev);
- mutex_unlock(&ddata->mutex);
-
- return r;
-}
-
-static int acx565akm_bl_get_intensity_locked(struct backlight_device *dev)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev);
- int r;
-
- mutex_lock(&ddata->mutex);
- r = acx565akm_bl_get_intensity(dev);
- mutex_unlock(&ddata->mutex);
-
- return r;
-}
-
-static const struct backlight_ops acx565akm_bl_ops = {
- .get_brightness = acx565akm_bl_get_intensity_locked,
- .update_status = acx565akm_bl_update_status_locked,
-};
-
-/*--------------------Auto Brightness control via Sysfs---------------------*/
-
-static const char * const cabc_modes[] = {
- "off", /* always used when CABC is not supported */
- "ui",
- "still-image",
- "moving-image",
-};
-
-static ssize_t show_cabc_mode(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dev);
- const char *mode_str;
- int mode;
- int len;
-
- if (!ddata->has_cabc)
- mode = 0;
- else
- mode = get_cabc_mode(ddata);
- mode_str = "unknown";
- if (mode >= 0 && mode < ARRAY_SIZE(cabc_modes))
- mode_str = cabc_modes[mode];
- len = snprintf(buf, PAGE_SIZE, "%s\n", mode_str);
-
- return len < PAGE_SIZE - 1 ? len : PAGE_SIZE - 1;
-}
-
-static ssize_t store_cabc_mode(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dev);
- int i;
-
- for (i = 0; i < ARRAY_SIZE(cabc_modes); i++) {
- const char *mode_str = cabc_modes[i];
- int cmp_len = strlen(mode_str);
-
- if (count > 0 && buf[count - 1] == '\n')
- count--;
- if (count != cmp_len)
- continue;
-
- if (strncmp(buf, mode_str, cmp_len) == 0)
- break;
- }
-
- if (i == ARRAY_SIZE(cabc_modes))
- return -EINVAL;
-
- if (!ddata->has_cabc && i != 0)
- return -EINVAL;
-
- mutex_lock(&ddata->mutex);
- set_cabc_mode(ddata, i);
- mutex_unlock(&ddata->mutex);
-
- return count;
-}
-
-static ssize_t show_cabc_available_modes(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dev);
- int len;
- int i;
-
- if (!ddata->has_cabc)
- return snprintf(buf, PAGE_SIZE, "%s\n", cabc_modes[0]);
-
- for (i = 0, len = 0;
- len < PAGE_SIZE && i < ARRAY_SIZE(cabc_modes); i++)
- len += snprintf(&buf[len], PAGE_SIZE - len, "%s%s%s",
- i ? " " : "", cabc_modes[i],
- i == ARRAY_SIZE(cabc_modes) - 1 ? "\n" : "");
-
- return len < PAGE_SIZE ? len : PAGE_SIZE - 1;
-}
-
-static DEVICE_ATTR(cabc_mode, S_IRUGO | S_IWUSR,
- show_cabc_mode, store_cabc_mode);
-static DEVICE_ATTR(cabc_available_modes, S_IRUGO,
- show_cabc_available_modes, NULL);
-
-static struct attribute *bldev_attrs[] = {
- &dev_attr_cabc_mode.attr,
- &dev_attr_cabc_available_modes.attr,
- NULL,
-};
-
-static const struct attribute_group bldev_attr_group = {
- .attrs = bldev_attrs,
-};
-
-static int acx565akm_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- return 0;
-}
-
-static void acx565akm_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
-}
-
-static int acx565akm_panel_power_on(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- dev_dbg(&ddata->spi->dev, "%s\n", __func__);
-
- /*FIXME tweak me */
- msleep(50);
-
- if (ddata->reset_gpio)
- gpiod_set_value(ddata->reset_gpio, 1);
-
- if (ddata->enabled) {
- dev_dbg(&ddata->spi->dev, "panel already enabled\n");
- return 0;
- }
-
- /*
- * We have to meet all the following delay requirements:
- * 1. tRW: reset pulse width 10usec (7.12.1)
- * 2. tRT: reset cancel time 5msec (7.12.1)
- * 3. Providing PCLK,HS,VS signals for 2 frames = ~50msec worst
- * case (7.6.2)
- * 4. 120msec before the sleep out command (7.12.1)
- */
- msleep(120);
-
- set_sleep_mode(ddata, 0);
- ddata->enabled = 1;
-
- /* 5msec between sleep out and the next command. (8.2.16) */
- usleep_range(5000, 10000);
- set_display_state(ddata, 1);
- set_cabc_mode(ddata, ddata->cabc_mode);
-
- return acx565akm_bl_update_status(ddata->bl_dev);
-}
-
-static void acx565akm_panel_power_off(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- dev_dbg(dssdev->dev, "%s\n", __func__);
-
- if (!ddata->enabled)
- return;
-
- set_display_state(ddata, 0);
- set_sleep_mode(ddata, 1);
- ddata->enabled = 0;
- /*
- * We have to provide PCLK,HS,VS signals for 2 frames (worst case
- * ~50msec) after sending the sleep in command and asserting the
- * reset signal. We probably could assert the reset w/o the delay
- * but we still delay to avoid possible artifacts. (7.6.1)
- */
- msleep(50);
-
- if (ddata->reset_gpio)
- gpiod_set_value(ddata->reset_gpio, 0);
-
- /* FIXME need to tweak this delay */
- msleep(100);
-}
-
-static void acx565akm_enable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- mutex_lock(&ddata->mutex);
- acx565akm_panel_power_on(dssdev);
- mutex_unlock(&ddata->mutex);
-}
-
-static void acx565akm_disable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- mutex_lock(&ddata->mutex);
- acx565akm_panel_power_off(dssdev);
- mutex_unlock(&ddata->mutex);
-}
-
-static int acx565akm_get_modes(struct omap_dss_device *dssdev,
- struct drm_connector *connector)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- return omapdss_display_get_modes(connector, &ddata->vm);
-}
-
-static const struct omap_dss_device_ops acx565akm_ops = {
- .connect = acx565akm_connect,
- .disconnect = acx565akm_disconnect,
-
- .enable = acx565akm_enable,
- .disable = acx565akm_disable,
-
- .get_modes = acx565akm_get_modes,
-};
-
-static int acx565akm_probe(struct spi_device *spi)
-{
- struct panel_drv_data *ddata;
- struct omap_dss_device *dssdev;
- struct backlight_device *bldev;
- int max_brightness, brightness;
- struct backlight_properties props;
- struct gpio_desc *gpio;
- int r;
-
- dev_dbg(&spi->dev, "%s\n", __func__);
-
- spi->mode = SPI_MODE_3;
-
- ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL);
- if (ddata == NULL)
- return -ENOMEM;
-
- dev_set_drvdata(&spi->dev, ddata);
-
- ddata->spi = spi;
-
- mutex_init(&ddata->mutex);
-
- gpio = devm_gpiod_get_optional(&spi->dev, "reset", GPIOD_OUT_LOW);
- if (IS_ERR(gpio)) {
- dev_err(&spi->dev, "failed to parse reset gpio\n");
- return PTR_ERR(gpio);
- }
-
- ddata->reset_gpio = gpio;
-
- if (ddata->reset_gpio)
- gpiod_set_value(ddata->reset_gpio, 1);
-
- /*
- * After reset we have to wait 5 msec before the first
- * command can be sent.
- */
- usleep_range(5000, 10000);
-
- ddata->enabled = panel_enabled(ddata);
-
- r = panel_detect(ddata);
-
- if (!ddata->enabled && ddata->reset_gpio)
- gpiod_set_value(ddata->reset_gpio, 0);
-
- if (r) {
- dev_err(&spi->dev, "%s panel detect error\n", __func__);
- return r;
- }
-
- memset(&props, 0, sizeof(props));
- props.fb_blank = FB_BLANK_UNBLANK;
- props.power = FB_BLANK_UNBLANK;
- props.type = BACKLIGHT_RAW;
-
- bldev = backlight_device_register("acx565akm", &ddata->spi->dev,
- ddata, &acx565akm_bl_ops, &props);
- if (IS_ERR(bldev))
- return PTR_ERR(bldev);
- ddata->bl_dev = bldev;
- if (ddata->has_cabc) {
- r = sysfs_create_group(&bldev->dev.kobj, &bldev_attr_group);
- if (r) {
- dev_err(&bldev->dev,
- "%s failed to create sysfs files\n", __func__);
- goto err_backlight_unregister;
- }
- ddata->cabc_mode = get_hw_cabc_mode(ddata);
- }
-
- max_brightness = 255;
-
- if (ddata->has_bc)
- brightness = acx565akm_get_actual_brightness(ddata);
- else
- brightness = 0;
-
- bldev->props.max_brightness = max_brightness;
- bldev->props.brightness = brightness;
-
- acx565akm_bl_update_status(bldev);
-
-
- ddata->vm = acx565akm_panel_vm;
-
- dssdev = &ddata->dssdev;
- dssdev->dev = &spi->dev;
- dssdev->ops = &acx565akm_ops;
- dssdev->type = OMAP_DISPLAY_TYPE_SDI;
- dssdev->display = true;
- dssdev->owner = THIS_MODULE;
- dssdev->of_ports = BIT(0);
- dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
- dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH
- | DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE
- | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE;
-
- omapdss_display_init(dssdev);
- omapdss_device_register(dssdev);
-
- return 0;
-
-err_backlight_unregister:
- backlight_device_unregister(bldev);
- return r;
-}
-
-static int acx565akm_remove(struct spi_device *spi)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
- struct omap_dss_device *dssdev = &ddata->dssdev;
-
- dev_dbg(&ddata->spi->dev, "%s\n", __func__);
-
- sysfs_remove_group(&ddata->bl_dev->dev.kobj, &bldev_attr_group);
- backlight_device_unregister(ddata->bl_dev);
-
- omapdss_device_unregister(dssdev);
-
- if (omapdss_device_is_enabled(dssdev))
- acx565akm_disable(dssdev);
-
- return 0;
-}
-
-static const struct of_device_id acx565akm_of_match[] = {
- { .compatible = "omapdss,sony,acx565akm", },
- {},
-};
-MODULE_DEVICE_TABLE(of, acx565akm_of_match);
-
-static struct spi_driver acx565akm_driver = {
- .driver = {
- .name = "acx565akm",
- .of_match_table = acx565akm_of_match,
- .suppress_bind_attrs = true,
- },
- .probe = acx565akm_probe,
- .remove = acx565akm_remove,
-};
-
-module_spi_driver(acx565akm_driver);
-
-MODULE_ALIAS("spi:sony,acx565akm");
-MODULE_AUTHOR("Nokia Corporation");
-MODULE_DESCRIPTION("acx565akm LCD Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
deleted file mode 100644
index c885018ac6ce..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
+++ /dev/null
@@ -1,390 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Toppoly TD028TTEC1 panel support
- *
- * Copyright (C) 2008 Nokia Corporation
- * Author: Tomi Valkeinen <[email protected]>
- *
- * Neo 1973 code (jbt6k74.c):
- * Copyright (C) 2006-2007 by OpenMoko, Inc.
- * Author: Harald Welte <[email protected]>
- *
- * Ported and adapted from Neo 1973 U-Boot by:
- * H. Nikolaus Schaller <[email protected]>
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/spi/spi.h>
-
-#include "../dss/omapdss.h"
-
-struct panel_drv_data {
- struct omap_dss_device dssdev;
-
- struct videomode vm;
-
- struct backlight_device *backlight;
-
- struct spi_device *spi_dev;
-};
-
-static const struct videomode td028ttec1_panel_vm = {
- .hactive = 480,
- .vactive = 640,
- .pixelclock = 22153000,
- .hfront_porch = 24,
- .hsync_len = 8,
- .hback_porch = 8,
- .vfront_porch = 4,
- .vsync_len = 2,
- .vback_porch = 2,
-
- .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
-};
-
-#define JBT_COMMAND 0x000
-#define JBT_DATA 0x100
-
-static int jbt_ret_write_0(struct panel_drv_data *ddata, u8 reg)
-{
- int rc;
- u16 tx_buf = JBT_COMMAND | reg;
-
- rc = spi_write(ddata->spi_dev, (u8 *)&tx_buf,
- 1*sizeof(u16));
- if (rc != 0)
- dev_err(&ddata->spi_dev->dev,
- "jbt_ret_write_0 spi_write ret %d\n", rc);
-
- return rc;
-}
-
-static int jbt_reg_write_1(struct panel_drv_data *ddata, u8 reg, u8 data)
-{
- int rc;
- u16 tx_buf[2];
-
- tx_buf[0] = JBT_COMMAND | reg;
- tx_buf[1] = JBT_DATA | data;
- rc = spi_write(ddata->spi_dev, (u8 *)tx_buf,
- 2*sizeof(u16));
- if (rc != 0)
- dev_err(&ddata->spi_dev->dev,
- "jbt_reg_write_1 spi_write ret %d\n", rc);
-
- return rc;
-}
-
-static int jbt_reg_write_2(struct panel_drv_data *ddata, u8 reg, u16 data)
-{
- int rc;
- u16 tx_buf[3];
-
- tx_buf[0] = JBT_COMMAND | reg;
- tx_buf[1] = JBT_DATA | (data >> 8);
- tx_buf[2] = JBT_DATA | (data & 0xff);
-
- rc = spi_write(ddata->spi_dev, (u8 *)tx_buf,
- 3*sizeof(u16));
-
- if (rc != 0)
- dev_err(&ddata->spi_dev->dev,
- "jbt_reg_write_2 spi_write ret %d\n", rc);
-
- return rc;
-}
-
-enum jbt_register {
- JBT_REG_SLEEP_IN = 0x10,
- JBT_REG_SLEEP_OUT = 0x11,
-
- JBT_REG_DISPLAY_OFF = 0x28,
- JBT_REG_DISPLAY_ON = 0x29,
-
- JBT_REG_RGB_FORMAT = 0x3a,
- JBT_REG_QUAD_RATE = 0x3b,
-
- JBT_REG_POWER_ON_OFF = 0xb0,
- JBT_REG_BOOSTER_OP = 0xb1,
- JBT_REG_BOOSTER_MODE = 0xb2,
- JBT_REG_BOOSTER_FREQ = 0xb3,
- JBT_REG_OPAMP_SYSCLK = 0xb4,
- JBT_REG_VSC_VOLTAGE = 0xb5,
- JBT_REG_VCOM_VOLTAGE = 0xb6,
- JBT_REG_EXT_DISPL = 0xb7,
- JBT_REG_OUTPUT_CONTROL = 0xb8,
- JBT_REG_DCCLK_DCEV = 0xb9,
- JBT_REG_DISPLAY_MODE1 = 0xba,
- JBT_REG_DISPLAY_MODE2 = 0xbb,
- JBT_REG_DISPLAY_MODE = 0xbc,
- JBT_REG_ASW_SLEW = 0xbd,
- JBT_REG_DUMMY_DISPLAY = 0xbe,
- JBT_REG_DRIVE_SYSTEM = 0xbf,
-
- JBT_REG_SLEEP_OUT_FR_A = 0xc0,
- JBT_REG_SLEEP_OUT_FR_B = 0xc1,
- JBT_REG_SLEEP_OUT_FR_C = 0xc2,
- JBT_REG_SLEEP_IN_LCCNT_D = 0xc3,
- JBT_REG_SLEEP_IN_LCCNT_E = 0xc4,
- JBT_REG_SLEEP_IN_LCCNT_F = 0xc5,
- JBT_REG_SLEEP_IN_LCCNT_G = 0xc6,
-
- JBT_REG_GAMMA1_FINE_1 = 0xc7,
- JBT_REG_GAMMA1_FINE_2 = 0xc8,
- JBT_REG_GAMMA1_INCLINATION = 0xc9,
- JBT_REG_GAMMA1_BLUE_OFFSET = 0xca,
-
- JBT_REG_BLANK_CONTROL = 0xcf,
- JBT_REG_BLANK_TH_TV = 0xd0,
- JBT_REG_CKV_ON_OFF = 0xd1,
- JBT_REG_CKV_1_2 = 0xd2,
- JBT_REG_OEV_TIMING = 0xd3,
- JBT_REG_ASW_TIMING_1 = 0xd4,
- JBT_REG_ASW_TIMING_2 = 0xd5,
-
- JBT_REG_HCLOCK_VGA = 0xec,
- JBT_REG_HCLOCK_QVGA = 0xed,
-};
-
-#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
-
-static int td028ttec1_panel_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- return 0;
-}
-
-static void td028ttec1_panel_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
-}
-
-static void td028ttec1_panel_enable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- int r = 0;
-
- dev_dbg(dssdev->dev, "%s: state %d\n", __func__, dssdev->state);
-
- /* three times command zero */
- r |= jbt_ret_write_0(ddata, 0x00);
- usleep_range(1000, 2000);
- r |= jbt_ret_write_0(ddata, 0x00);
- usleep_range(1000, 2000);
- r |= jbt_ret_write_0(ddata, 0x00);
- usleep_range(1000, 2000);
-
- if (r) {
- dev_warn(dssdev->dev, "%s: transfer error\n", __func__);
- return;
- }
-
- /* deep standby out */
- r |= jbt_reg_write_1(ddata, JBT_REG_POWER_ON_OFF, 0x17);
-
- /* RGB I/F on, RAM write off, QVGA through, SIGCON enable */
- r |= jbt_reg_write_1(ddata, JBT_REG_DISPLAY_MODE, 0x80);
-
- /* Quad mode off */
- r |= jbt_reg_write_1(ddata, JBT_REG_QUAD_RATE, 0x00);
-
- /* AVDD on, XVDD on */
- r |= jbt_reg_write_1(ddata, JBT_REG_POWER_ON_OFF, 0x16);
-
- /* Output control */
- r |= jbt_reg_write_2(ddata, JBT_REG_OUTPUT_CONTROL, 0xfff9);
-
- /* Sleep mode off */
- r |= jbt_ret_write_0(ddata, JBT_REG_SLEEP_OUT);
-
- /* at this point we have like 50% grey */
-
- /* initialize register set */
- r |= jbt_reg_write_1(ddata, JBT_REG_DISPLAY_MODE1, 0x01);
- r |= jbt_reg_write_1(ddata, JBT_REG_DISPLAY_MODE2, 0x00);
- r |= jbt_reg_write_1(ddata, JBT_REG_RGB_FORMAT, 0x60);
- r |= jbt_reg_write_1(ddata, JBT_REG_DRIVE_SYSTEM, 0x10);
- r |= jbt_reg_write_1(ddata, JBT_REG_BOOSTER_OP, 0x56);
- r |= jbt_reg_write_1(ddata, JBT_REG_BOOSTER_MODE, 0x33);
- r |= jbt_reg_write_1(ddata, JBT_REG_BOOSTER_FREQ, 0x11);
- r |= jbt_reg_write_1(ddata, JBT_REG_BOOSTER_FREQ, 0x11);
- r |= jbt_reg_write_1(ddata, JBT_REG_OPAMP_SYSCLK, 0x02);
- r |= jbt_reg_write_1(ddata, JBT_REG_VSC_VOLTAGE, 0x2b);
- r |= jbt_reg_write_1(ddata, JBT_REG_VCOM_VOLTAGE, 0x40);
- r |= jbt_reg_write_1(ddata, JBT_REG_EXT_DISPL, 0x03);
- r |= jbt_reg_write_1(ddata, JBT_REG_DCCLK_DCEV, 0x04);
- /*
- * default of 0x02 in JBT_REG_ASW_SLEW responsible for 72Hz requirement
- * to avoid red / blue flicker
- */
- r |= jbt_reg_write_1(ddata, JBT_REG_ASW_SLEW, 0x04);
- r |= jbt_reg_write_1(ddata, JBT_REG_DUMMY_DISPLAY, 0x00);
-
- r |= jbt_reg_write_1(ddata, JBT_REG_SLEEP_OUT_FR_A, 0x11);
- r |= jbt_reg_write_1(ddata, JBT_REG_SLEEP_OUT_FR_B, 0x11);
- r |= jbt_reg_write_1(ddata, JBT_REG_SLEEP_OUT_FR_C, 0x11);
- r |= jbt_reg_write_2(ddata, JBT_REG_SLEEP_IN_LCCNT_D, 0x2040);
- r |= jbt_reg_write_2(ddata, JBT_REG_SLEEP_IN_LCCNT_E, 0x60c0);
- r |= jbt_reg_write_2(ddata, JBT_REG_SLEEP_IN_LCCNT_F, 0x1020);
- r |= jbt_reg_write_2(ddata, JBT_REG_SLEEP_IN_LCCNT_G, 0x60c0);
-
- r |= jbt_reg_write_2(ddata, JBT_REG_GAMMA1_FINE_1, 0x5533);
- r |= jbt_reg_write_1(ddata, JBT_REG_GAMMA1_FINE_2, 0x00);
- r |= jbt_reg_write_1(ddata, JBT_REG_GAMMA1_INCLINATION, 0x00);
- r |= jbt_reg_write_1(ddata, JBT_REG_GAMMA1_BLUE_OFFSET, 0x00);
-
- r |= jbt_reg_write_2(ddata, JBT_REG_HCLOCK_VGA, 0x1f0);
- r |= jbt_reg_write_1(ddata, JBT_REG_BLANK_CONTROL, 0x02);
- r |= jbt_reg_write_2(ddata, JBT_REG_BLANK_TH_TV, 0x0804);
-
- r |= jbt_reg_write_1(ddata, JBT_REG_CKV_ON_OFF, 0x01);
- r |= jbt_reg_write_2(ddata, JBT_REG_CKV_1_2, 0x0000);
-
- r |= jbt_reg_write_2(ddata, JBT_REG_OEV_TIMING, 0x0d0e);
- r |= jbt_reg_write_2(ddata, JBT_REG_ASW_TIMING_1, 0x11a4);
- r |= jbt_reg_write_1(ddata, JBT_REG_ASW_TIMING_2, 0x0e);
-
- r |= jbt_ret_write_0(ddata, JBT_REG_DISPLAY_ON);
-
- if (r)
- dev_err(dssdev->dev, "%s: write error\n", __func__);
-
- backlight_enable(ddata->backlight);
-}
-
-static void td028ttec1_panel_disable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- backlight_disable(ddata->backlight);
-
- dev_dbg(dssdev->dev, "td028ttec1_panel_disable()\n");
-
- jbt_ret_write_0(ddata, JBT_REG_DISPLAY_OFF);
- jbt_reg_write_2(ddata, JBT_REG_OUTPUT_CONTROL, 0x8002);
- jbt_ret_write_0(ddata, JBT_REG_SLEEP_IN);
- jbt_reg_write_1(ddata, JBT_REG_POWER_ON_OFF, 0x00);
-}
-
-static int td028ttec1_panel_get_modes(struct omap_dss_device *dssdev,
- struct drm_connector *connector)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- return omapdss_display_get_modes(connector, &ddata->vm);
-}
-
-static const struct omap_dss_device_ops td028ttec1_ops = {
- .connect = td028ttec1_panel_connect,
- .disconnect = td028ttec1_panel_disconnect,
-
- .enable = td028ttec1_panel_enable,
- .disable = td028ttec1_panel_disable,
-
- .get_modes = td028ttec1_panel_get_modes,
-};
-
-static int td028ttec1_panel_probe(struct spi_device *spi)
-{
- struct panel_drv_data *ddata;
- struct omap_dss_device *dssdev;
- int r;
-
- dev_dbg(&spi->dev, "%s\n", __func__);
-
- spi->bits_per_word = 9;
- spi->mode = SPI_MODE_3;
-
- r = spi_setup(spi);
- if (r < 0) {
- dev_err(&spi->dev, "spi_setup failed: %d\n", r);
- return r;
- }
-
- ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL);
- if (ddata == NULL)
- return -ENOMEM;
-
- ddata->backlight = devm_of_find_backlight(&spi->dev);
- if (IS_ERR(ddata->backlight))
- return PTR_ERR(ddata->backlight);
-
- dev_set_drvdata(&spi->dev, ddata);
-
- ddata->spi_dev = spi;
-
- ddata->vm = td028ttec1_panel_vm;
-
- dssdev = &ddata->dssdev;
- dssdev->dev = &spi->dev;
- dssdev->ops = &td028ttec1_ops;
- dssdev->type = OMAP_DISPLAY_TYPE_DPI;
- dssdev->display = true;
- dssdev->owner = THIS_MODULE;
- dssdev->of_ports = BIT(0);
- dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
-
- /*
- * Note: According to the panel documentation:
- * SYNC needs to be driven on the FALLING edge
- */
- dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH
- | DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE
- | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE;
-
- omapdss_display_init(dssdev);
- omapdss_device_register(dssdev);
-
- return 0;
-}
-
-static int td028ttec1_panel_remove(struct spi_device *spi)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
- struct omap_dss_device *dssdev = &ddata->dssdev;
-
- dev_dbg(&ddata->spi_dev->dev, "%s\n", __func__);
-
- omapdss_device_unregister(dssdev);
-
- td028ttec1_panel_disable(dssdev);
-
- return 0;
-}
-
-static const struct of_device_id td028ttec1_of_match[] = {
- { .compatible = "omapdss,tpo,td028ttec1", },
- /* keep to not break older DTB */
- { .compatible = "omapdss,toppoly,td028ttec1", },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, td028ttec1_of_match);
-
-static const struct spi_device_id td028ttec1_ids[] = {
- { "toppoly,td028ttec1", 0 },
- { "tpo,td028ttec1", 0},
- { /* sentinel */ }
-};
-
-MODULE_DEVICE_TABLE(spi, td028ttec1_ids);
-
-
-static struct spi_driver td028ttec1_spi_driver = {
- .probe = td028ttec1_panel_probe,
- .remove = td028ttec1_panel_remove,
- .id_table = td028ttec1_ids,
-
- .driver = {
- .name = "panel-tpo-td028ttec1",
- .of_match_table = td028ttec1_of_match,
- .suppress_bind_attrs = true,
- },
-};
-
-module_spi_driver(td028ttec1_spi_driver);
-
-MODULE_AUTHOR("H. Nikolaus Schaller <[email protected]>");
-MODULE_DESCRIPTION("Toppoly TD028TTEC1 panel driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
deleted file mode 100644
index ce09217da597..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
+++ /dev/null
@@ -1,513 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * TPO TD043MTEA1 Panel driver
- *
- * Author: Gražvydas Ignotas <[email protected]>
- * Converted to new DSS device model: Tomi Valkeinen <[email protected]>
- */
-
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/gpio/consumer.h>
-#include <linux/module.h>
-#include <linux/regulator/consumer.h>
-#include <linux/slab.h>
-#include <linux/spi/spi.h>
-
-#include "../dss/omapdss.h"
-
-#define TPO_R02_MODE(x) ((x) & 7)
-#define TPO_R02_MODE_800x480 7
-#define TPO_R02_NCLK_RISING BIT(3)
-#define TPO_R02_HSYNC_HIGH BIT(4)
-#define TPO_R02_VSYNC_HIGH BIT(5)
-
-#define TPO_R03_NSTANDBY BIT(0)
-#define TPO_R03_EN_CP_CLK BIT(1)
-#define TPO_R03_EN_VGL_PUMP BIT(2)
-#define TPO_R03_EN_PWM BIT(3)
-#define TPO_R03_DRIVING_CAP_100 BIT(4)
-#define TPO_R03_EN_PRE_CHARGE BIT(6)
-#define TPO_R03_SOFTWARE_CTL BIT(7)
-
-#define TPO_R04_NFLIP_H BIT(0)
-#define TPO_R04_NFLIP_V BIT(1)
-#define TPO_R04_CP_CLK_FREQ_1H BIT(2)
-#define TPO_R04_VGL_FREQ_1H BIT(4)
-
-#define TPO_R03_VAL_NORMAL (TPO_R03_NSTANDBY | TPO_R03_EN_CP_CLK | \
- TPO_R03_EN_VGL_PUMP | TPO_R03_EN_PWM | \
- TPO_R03_DRIVING_CAP_100 | TPO_R03_EN_PRE_CHARGE | \
- TPO_R03_SOFTWARE_CTL)
-
-#define TPO_R03_VAL_STANDBY (TPO_R03_DRIVING_CAP_100 | \
- TPO_R03_EN_PRE_CHARGE | TPO_R03_SOFTWARE_CTL)
-
-static const u16 tpo_td043_def_gamma[12] = {
- 105, 315, 381, 431, 490, 537, 579, 686, 780, 837, 880, 1023
-};
-
-struct panel_drv_data {
- struct omap_dss_device dssdev;
-
- struct videomode vm;
-
- struct spi_device *spi;
- struct regulator *vcc_reg;
- struct gpio_desc *reset_gpio;
- u16 gamma[12];
- u32 mode;
- u32 vmirror:1;
- u32 powered_on:1;
- u32 spi_suspended:1;
- u32 power_on_resume:1;
-};
-
-static const struct videomode tpo_td043_vm = {
- .hactive = 800,
- .vactive = 480,
-
- .pixelclock = 36000000,
-
- .hsync_len = 1,
- .hfront_porch = 68,
- .hback_porch = 214,
-
- .vsync_len = 1,
- .vfront_porch = 39,
- .vback_porch = 34,
-
- .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
-};
-
-#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
-
-static int tpo_td043_write(struct spi_device *spi, u8 addr, u8 data)
-{
- struct spi_message m;
- struct spi_transfer xfer;
- u16 w;
- int r;
-
- spi_message_init(&m);
-
- memset(&xfer, 0, sizeof(xfer));
-
- w = ((u16)addr << 10) | (1 << 8) | data;
- xfer.tx_buf = &w;
- xfer.bits_per_word = 16;
- xfer.len = 2;
- spi_message_add_tail(&xfer, &m);
-
- r = spi_sync(spi, &m);
- if (r < 0)
- dev_warn(&spi->dev, "failed to write to LCD reg (%d)\n", r);
- return r;
-}
-
-static void tpo_td043_write_gamma(struct spi_device *spi, u16 gamma[12])
-{
- u8 i, val;
-
- /* gamma bits [9:8] */
- for (val = i = 0; i < 4; i++)
- val |= (gamma[i] & 0x300) >> ((i + 1) * 2);
- tpo_td043_write(spi, 0x11, val);
-
- for (val = i = 0; i < 4; i++)
- val |= (gamma[i+4] & 0x300) >> ((i + 1) * 2);
- tpo_td043_write(spi, 0x12, val);
-
- for (val = i = 0; i < 4; i++)
- val |= (gamma[i+8] & 0x300) >> ((i + 1) * 2);
- tpo_td043_write(spi, 0x13, val);
-
- /* gamma bits [7:0] */
- for (val = i = 0; i < 12; i++)
- tpo_td043_write(spi, 0x14 + i, gamma[i] & 0xff);
-}
-
-static int tpo_td043_write_mirror(struct spi_device *spi, bool h, bool v)
-{
- u8 reg4 = TPO_R04_NFLIP_H | TPO_R04_NFLIP_V |
- TPO_R04_CP_CLK_FREQ_1H | TPO_R04_VGL_FREQ_1H;
- if (h)
- reg4 &= ~TPO_R04_NFLIP_H;
- if (v)
- reg4 &= ~TPO_R04_NFLIP_V;
-
- return tpo_td043_write(spi, 4, reg4);
-}
-
-static ssize_t tpo_td043_vmirror_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dev);
-
- return snprintf(buf, PAGE_SIZE, "%d\n", ddata->vmirror);
-}
-
-static ssize_t tpo_td043_vmirror_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dev);
- int val;
- int ret;
-
- ret = kstrtoint(buf, 0, &val);
- if (ret < 0)
- return ret;
-
- val = !!val;
-
- ret = tpo_td043_write_mirror(ddata->spi, false, val);
- if (ret < 0)
- return ret;
-
- ddata->vmirror = val;
-
- return count;
-}
-
-static ssize_t tpo_td043_mode_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dev);
-
- return snprintf(buf, PAGE_SIZE, "%d\n", ddata->mode);
-}
-
-static ssize_t tpo_td043_mode_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dev);
- long val;
- int ret;
-
- ret = kstrtol(buf, 0, &val);
- if (ret != 0 || val & ~7)
- return -EINVAL;
-
- ddata->mode = val;
-
- val |= TPO_R02_NCLK_RISING;
- tpo_td043_write(ddata->spi, 2, val);
-
- return count;
-}
-
-static ssize_t tpo_td043_gamma_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dev);
- ssize_t len = 0;
- int ret;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(ddata->gamma); i++) {
- ret = snprintf(buf + len, PAGE_SIZE - len, "%u ",
- ddata->gamma[i]);
- if (ret < 0)
- return ret;
- len += ret;
- }
- buf[len - 1] = '\n';
-
- return len;
-}
-
-static ssize_t tpo_td043_gamma_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dev);
- unsigned int g[12];
- int ret;
- int i;
-
- ret = sscanf(buf, "%u %u %u %u %u %u %u %u %u %u %u %u",
- &g[0], &g[1], &g[2], &g[3], &g[4], &g[5],
- &g[6], &g[7], &g[8], &g[9], &g[10], &g[11]);
-
- if (ret != 12)
- return -EINVAL;
-
- for (i = 0; i < 12; i++)
- ddata->gamma[i] = g[i];
-
- tpo_td043_write_gamma(ddata->spi, ddata->gamma);
-
- return count;
-}
-
-static DEVICE_ATTR(vmirror, S_IRUGO | S_IWUSR,
- tpo_td043_vmirror_show, tpo_td043_vmirror_store);
-static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
- tpo_td043_mode_show, tpo_td043_mode_store);
-static DEVICE_ATTR(gamma, S_IRUGO | S_IWUSR,
- tpo_td043_gamma_show, tpo_td043_gamma_store);
-
-static struct attribute *tpo_td043_attrs[] = {
- &dev_attr_vmirror.attr,
- &dev_attr_mode.attr,
- &dev_attr_gamma.attr,
- NULL,
-};
-
-static const struct attribute_group tpo_td043_attr_group = {
- .attrs = tpo_td043_attrs,
-};
-
-static int tpo_td043_power_on(struct panel_drv_data *ddata)
-{
- int r;
-
- if (ddata->powered_on)
- return 0;
-
- r = regulator_enable(ddata->vcc_reg);
- if (r != 0)
- return r;
-
- /* wait for panel to stabilize */
- msleep(160);
-
- gpiod_set_value(ddata->reset_gpio, 0);
-
- tpo_td043_write(ddata->spi, 2,
- TPO_R02_MODE(ddata->mode) | TPO_R02_NCLK_RISING);
- tpo_td043_write(ddata->spi, 3, TPO_R03_VAL_NORMAL);
- tpo_td043_write(ddata->spi, 0x20, 0xf0);
- tpo_td043_write(ddata->spi, 0x21, 0xf0);
- tpo_td043_write_mirror(ddata->spi, false, ddata->vmirror);
- tpo_td043_write_gamma(ddata->spi, ddata->gamma);
-
- ddata->powered_on = 1;
- return 0;
-}
-
-static void tpo_td043_power_off(struct panel_drv_data *ddata)
-{
- if (!ddata->powered_on)
- return;
-
- tpo_td043_write(ddata->spi, 3,
- TPO_R03_VAL_STANDBY | TPO_R03_EN_PWM);
-
- gpiod_set_value(ddata->reset_gpio, 1);
-
- /* wait for at least 2 vsyncs before cutting off power */
- msleep(50);
-
- tpo_td043_write(ddata->spi, 3, TPO_R03_VAL_STANDBY);
-
- regulator_disable(ddata->vcc_reg);
-
- ddata->powered_on = 0;
-}
-
-static int tpo_td043_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- return 0;
-}
-
-static void tpo_td043_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
-}
-
-static void tpo_td043_enable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- int r;
-
- /*
- * If we are resuming from system suspend, SPI clocks might not be
- * enabled yet, so we'll program the LCD from SPI PM resume callback.
- */
- if (!ddata->spi_suspended) {
- r = tpo_td043_power_on(ddata);
- if (r) {
- dev_err(&ddata->spi->dev, "%s: power on failed (%d)\n",
- __func__, r);
- return;
- }
- }
-}
-
-static void tpo_td043_disable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- if (!ddata->spi_suspended)
- tpo_td043_power_off(ddata);
-}
-
-static int tpo_td043_get_modes(struct omap_dss_device *dssdev,
- struct drm_connector *connector)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- return omapdss_display_get_modes(connector, &ddata->vm);
-}
-
-static const struct omap_dss_device_ops tpo_td043_ops = {
- .connect = tpo_td043_connect,
- .disconnect = tpo_td043_disconnect,
-
- .enable = tpo_td043_enable,
- .disable = tpo_td043_disable,
-
- .get_modes = tpo_td043_get_modes,
-};
-
-static int tpo_td043_probe(struct spi_device *spi)
-{
- struct panel_drv_data *ddata;
- struct omap_dss_device *dssdev;
- struct gpio_desc *gpio;
- int r;
-
- dev_dbg(&spi->dev, "%s\n", __func__);
-
- spi->bits_per_word = 16;
- spi->mode = SPI_MODE_0;
-
- r = spi_setup(spi);
- if (r < 0) {
- dev_err(&spi->dev, "spi_setup failed: %d\n", r);
- return r;
- }
-
- ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL);
- if (ddata == NULL)
- return -ENOMEM;
-
- dev_set_drvdata(&spi->dev, ddata);
-
- ddata->spi = spi;
-
- ddata->mode = TPO_R02_MODE_800x480;
- memcpy(ddata->gamma, tpo_td043_def_gamma, sizeof(ddata->gamma));
-
- ddata->vcc_reg = devm_regulator_get(&spi->dev, "vcc");
- if (IS_ERR(ddata->vcc_reg)) {
- dev_err(&spi->dev, "failed to get LCD VCC regulator\n");
- return PTR_ERR(ddata->vcc_reg);
- }
-
- gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(gpio)) {
- dev_err(&spi->dev, "failed to get reset gpio\n");
- return PTR_ERR(gpio);
- }
-
- ddata->reset_gpio = gpio;
-
- r = sysfs_create_group(&spi->dev.kobj, &tpo_td043_attr_group);
- if (r) {
- dev_err(&spi->dev, "failed to create sysfs files\n");
- return r;
- }
-
- ddata->vm = tpo_td043_vm;
-
- dssdev = &ddata->dssdev;
- dssdev->dev = &spi->dev;
- dssdev->ops = &tpo_td043_ops;
- dssdev->type = OMAP_DISPLAY_TYPE_DPI;
- dssdev->display = true;
- dssdev->owner = THIS_MODULE;
- dssdev->of_ports = BIT(0);
- dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
-
- /*
- * Note: According to the panel documentation:
- * SYNC needs to be driven on the FALLING edge
- */
- dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH
- | DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE
- | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE;
-
- omapdss_display_init(dssdev);
- omapdss_device_register(dssdev);
-
- return 0;
-}
-
-static int tpo_td043_remove(struct spi_device *spi)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
- struct omap_dss_device *dssdev = &ddata->dssdev;
-
- dev_dbg(&ddata->spi->dev, "%s\n", __func__);
-
- omapdss_device_unregister(dssdev);
-
- if (omapdss_device_is_enabled(dssdev))
- tpo_td043_disable(dssdev);
-
- sysfs_remove_group(&spi->dev.kobj, &tpo_td043_attr_group);
-
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int tpo_td043_spi_suspend(struct device *dev)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dev);
-
- dev_dbg(dev, "tpo_td043_spi_suspend, tpo %p\n", ddata);
-
- ddata->power_on_resume = ddata->powered_on;
- tpo_td043_power_off(ddata);
- ddata->spi_suspended = 1;
-
- return 0;
-}
-
-static int tpo_td043_spi_resume(struct device *dev)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dev);
- int ret;
-
- dev_dbg(dev, "tpo_td043_spi_resume\n");
-
- if (ddata->power_on_resume) {
- ret = tpo_td043_power_on(ddata);
- if (ret)
- return ret;
- }
- ddata->spi_suspended = 0;
-
- return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(tpo_td043_spi_pm,
- tpo_td043_spi_suspend, tpo_td043_spi_resume);
-
-static const struct of_device_id tpo_td043_of_match[] = {
- { .compatible = "omapdss,tpo,td043mtea1", },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, tpo_td043_of_match);
-
-static struct spi_driver tpo_td043_spi_driver = {
- .driver = {
- .name = "panel-tpo-td043mtea1",
- .pm = &tpo_td043_spi_pm,
- .of_match_table = tpo_td043_of_match,
- .suppress_bind_attrs = true,
- },
- .probe = tpo_td043_probe,
- .remove = tpo_td043_remove,
-};
-
-module_spi_driver(tpo_td043_spi_driver);
-
-MODULE_ALIAS("spi:tpo,td043mtea1");
-MODULE_AUTHOR("Gražvydas Ignotas <[email protected]>");
-MODULE_DESCRIPTION("TPO TD043MTEA1 LCD Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index 5711b7a720e6..e226324adb69 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -923,7 +923,6 @@ dss_debugfs_create_file(struct dss_device *dss, const char *name,
void *data)
{
struct dss_debugfs_entry *entry;
- struct dentry *d;
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
@@ -931,15 +930,9 @@ dss_debugfs_create_file(struct dss_device *dss, const char *name,
entry->show_fn = show_fn;
entry->data = data;
+ entry->dentry = debugfs_create_file(name, 0444, dss->debugfs.root,
+ entry, &dss_debug_fops);
- d = debugfs_create_file(name, 0444, dss->debugfs.root, entry,
- &dss_debug_fops);
- if (IS_ERR(d)) {
- kfree(entry);
- return ERR_CAST(d);
- }
-
- entry->dentry = d;
return entry;
}
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
index a140de79c50e..31502857f013 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
@@ -176,17 +176,10 @@ static const struct of_device_id omapdss_of_match[] __initconst = {
static const struct of_device_id omapdss_of_fixups_whitelist[] __initconst = {
{ .compatible = "composite-video-connector" },
{ .compatible = "hdmi-connector" },
- { .compatible = "lgphilips,lb035q02" },
- { .compatible = "nec,nl8048hl11" },
{ .compatible = "panel-dsi-cm" },
- { .compatible = "sharp,ls037v7dw01" },
- { .compatible = "sony,acx565akm" },
{ .compatible = "svideo-connector" },
{ .compatible = "ti,opa362" },
{ .compatible = "ti,tpd12s015" },
- { .compatible = "toppoly,td028ttec1" },
- { .compatible = "tpo,td028ttec1" },
- { .compatible = "tpo,td043mtea1" },
{},
};
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 84e1be981cfe..73ec99819a3d 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -53,8 +53,12 @@ static void omap_plane_atomic_update(struct drm_plane *plane,
memset(&info, 0, sizeof(info));
info.rotation_type = OMAP_DSS_ROT_NONE;
info.rotation = DRM_MODE_ROTATE_0;
- info.global_alpha = 0xff;
+ info.global_alpha = state->alpha >> 8;
info.zorder = state->normalized_zpos;
+ if (state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI)
+ info.pre_mult_alpha = 1;
+ else
+ info.pre_mult_alpha = 0;
/* update scanout: */
omap_framebuffer_update_scanout(state->fb, state, &info);
@@ -285,6 +289,9 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
omap_plane_install_properties(plane, &plane->base);
drm_plane_create_zpos_property(plane, 0, 0, num_planes - 1);
+ drm_plane_create_alpha_property(plane);
+ drm_plane_create_blend_mode_property(plane, BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE));
return plane;
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index eaecd40cc32e..f152bc4eeb53 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -103,6 +103,14 @@ config DRM_PANEL_SAMSUNG_LD9040
depends on OF && SPI
select VIDEOMODE_HELPERS
+config DRM_PANEL_LG_LB035Q02
+ tristate "LG LB035Q024573 RGB panel"
+ depends on GPIOLIB && OF && SPI
+ help
+ Say Y here if you want to enable support for the LB035Q02 RGB panel
+ (found on the Gumstix Overo Palo35 board). To compile this driver as
+ a module, choose M here.
+
config DRM_PANEL_LG_LG4573
tristate "LG4573 RGB/SPI panel"
depends on OF && SPI
@@ -111,6 +119,14 @@ config DRM_PANEL_LG_LG4573
Say Y here if you want to enable support for LG4573 RGB panel.
To compile this driver as a module, choose M here.
+config DRM_PANEL_NEC_NL8048HL11
+ tristate "NEC NL8048HL11 RGB panel"
+ depends on GPIOLIB && OF && SPI
+ help
+ Say Y here if you want to enable support for the NEC NL8048HL11 RGB
+ panel (found on the Zoom2/3/3630 SDP boards). To compile this driver
+ as a module, choose M here.
+
config DRM_PANEL_NOVATEK_NT39016
tristate "Novatek NT39016 RGB/SPI panel"
depends on OF && SPI
@@ -266,6 +282,13 @@ config DRM_PANEL_SHARP_LQ101R1SX01
To compile this driver as a module, choose M here: the module
will be called panel-sharp-lq101r1sx01.
+config DRM_PANEL_SHARP_LS037V7DW01
+ tristate "Sharp LS037V7DW01 VGA LCD panel"
+ depends on GPIOLIB && OF && REGULATOR
+ help
+ Say Y here if you want to enable support for Sharp LS037V7DW01 VGA
+ (480x640) LCD panel (found on the TI SDP3430 board).
+
config DRM_PANEL_SHARP_LS043T1LE01
tristate "Sharp LS043T1LE01 qHD video mode panel"
depends on OF
@@ -293,6 +316,29 @@ config DRM_PANEL_SITRONIX_ST7789V
Say Y here if you want to enable support for the Sitronix
ST7789V controller for 240x320 LCD panels
+config DRM_PANEL_SONY_ACX565AKM
+ tristate "Sony ACX565AKM panel"
+ depends on GPIOLIB && OF && SPI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for the Sony ACX565AKM
+ 800x600 3.5" panel (found on the Nokia N900).
+
+config DRM_PANEL_TPO_TD028TTEC1
+ tristate "Toppoly (TPO) TD028TTEC1 panel driver"
+ depends on OF && SPI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for TPO TD028TTEC1 480x640
+ 2.8" panel (found on the OpenMoko Neo FreeRunner and Neo 1973).
+
+config DRM_PANEL_TPO_TD043MTEA1
+ tristate "Toppoly (TPO) TD043MTEA1 panel driver"
+ depends on GPIOLIB && OF && REGULATOR && SPI
+ help
+ Say Y here if you want to enable support for TPO TD043MTEA1 800x480
+ 4.3" panel (found on the OMAP3 Pandora board).
+
config DRM_PANEL_TPO_TPG110
tristate "TPO TPG 800x400 panel"
depends on OF && SPI && GPIOLIB
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 62dae45f8f74..b6cd39fe0f20 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -8,7 +8,9 @@ obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o
obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o
obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o
obj-$(CONFIG_DRM_PANEL_KINGDISPLAY_KD097D04) += panel-kingdisplay-kd097d04.o
+obj-$(CONFIG_DRM_PANEL_LG_LB035Q02) += panel-lg-lb035q02.o
obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
+obj-$(CONFIG_DRM_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT39016) += panel-novatek-nt39016.o
obj-$(CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO) += panel-olimex-lcd-olinuxino.o
obj-$(CONFIG_DRM_PANEL_ORISETECH_OTM8009A) += panel-orisetech-otm8009a.o
@@ -27,8 +29,12 @@ obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0) += panel-samsung-s6e63m0.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o
obj-$(CONFIG_DRM_PANEL_SEIKO_43WVF1G) += panel-seiko-43wvf1g.o
obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o
+obj-$(CONFIG_DRM_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o
obj-$(CONFIG_DRM_PANEL_SHARP_LS043T1LE01) += panel-sharp-ls043t1le01.o
obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7701) += panel-sitronix-st7701.o
obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7789V) += panel-sitronix-st7789v.o
+obj-$(CONFIG_DRM_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o
+obj-$(CONFIG_DRM_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o
+obj-$(CONFIG_DRM_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
obj-$(CONFIG_DRM_PANEL_TPO_TPG110) += panel-tpo-tpg110.o
obj-$(CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA) += panel-truly-nt35597.o
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
index 53dd1e128795..3c58f63adbf7 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
@@ -349,7 +349,6 @@ static const struct regmap_config ili9322_regmap_config = {
static int ili9322_init(struct drm_panel *panel, struct ili9322 *ili)
{
- struct drm_connector *connector = panel->connector;
u8 reg;
int ret;
int i;
@@ -407,23 +406,11 @@ static int ili9322_init(struct drm_panel *panel, struct ili9322 *ili)
* Polarity and inverted color order for RGB input.
* None of this applies in the BT.656 mode.
*/
- if (ili->conf->dclk_active_high) {
+ reg = 0;
+ if (ili->conf->dclk_active_high)
reg = ILI9322_POL_DCLK;
- connector->display_info.bus_flags |=
- DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE;
- } else {
- reg = 0;
- connector->display_info.bus_flags |=
- DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE;
- }
- if (ili->conf->de_active_high) {
+ if (ili->conf->de_active_high)
reg |= ILI9322_POL_DE;
- connector->display_info.bus_flags |=
- DRM_BUS_FLAG_DE_HIGH;
- } else {
- connector->display_info.bus_flags |=
- DRM_BUS_FLAG_DE_LOW;
- }
if (ili->conf->hsync_active_high)
reg |= ILI9322_POL_HSYNC;
if (ili->conf->vsync_active_high)
@@ -659,9 +646,20 @@ static int ili9322_get_modes(struct drm_panel *panel)
struct drm_connector *connector = panel->connector;
struct ili9322 *ili = panel_to_ili9322(panel);
struct drm_display_mode *mode;
+ struct drm_display_info *info;
+
+ info = &connector->display_info;
+ info->width_mm = ili->conf->width_mm;
+ info->height_mm = ili->conf->height_mm;
+ if (ili->conf->dclk_active_high)
+ info->bus_flags |= DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE;
+ else
+ info->bus_flags |= DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE;
- connector->display_info.width_mm = ili->conf->width_mm;
- connector->display_info.height_mm = ili->conf->height_mm;
+ if (ili->conf->de_active_high)
+ info->bus_flags |= DRM_BUS_FLAG_DE_HIGH;
+ else
+ info->bus_flags |= DRM_BUS_FLAG_DE_LOW;
switch (ili->input) {
case ILI9322_INPUT_SRGB_DUMMY_320X240:
diff --git a/drivers/gpu/drm/panel/panel-lg-lb035q02.c b/drivers/gpu/drm/panel/panel-lg-lb035q02.c
new file mode 100644
index 000000000000..fc82a525b071
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-lg-lb035q02.c
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * LG.Philips LB035Q02 LCD Panel Driver
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated
+ *
+ * Based on the omapdrm-specific panel-lgphilips-lb035q02 driver
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated
+ * Author: Tomi Valkeinen <[email protected]>
+ *
+ * Based on a driver by: Steve Sakoman <[email protected]>
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+
+#include <drm/drm_connector.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+struct lb035q02_device {
+ struct drm_panel panel;
+
+ struct spi_device *spi;
+ struct gpio_desc *enable_gpio;
+};
+
+#define to_lb035q02_device(p) container_of(p, struct lb035q02_device, panel)
+
+static int lb035q02_write(struct lb035q02_device *lcd, u16 reg, u16 val)
+{
+ struct spi_message msg;
+ struct spi_transfer index_xfer = {
+ .len = 3,
+ .cs_change = 1,
+ };
+ struct spi_transfer value_xfer = {
+ .len = 3,
+ };
+ u8 buffer[16];
+
+ spi_message_init(&msg);
+
+ /* register index */
+ buffer[0] = 0x70;
+ buffer[1] = 0x00;
+ buffer[2] = reg & 0x7f;
+ index_xfer.tx_buf = buffer;
+ spi_message_add_tail(&index_xfer, &msg);
+
+ /* register value */
+ buffer[4] = 0x72;
+ buffer[5] = val >> 8;
+ buffer[6] = val;
+ value_xfer.tx_buf = buffer + 4;
+ spi_message_add_tail(&value_xfer, &msg);
+
+ return spi_sync(lcd->spi, &msg);
+}
+
+static int lb035q02_init(struct lb035q02_device *lcd)
+{
+ /* Init sequence from page 28 of the lb035q02 spec. */
+ static const struct {
+ u16 index;
+ u16 value;
+ } init_data[] = {
+ { 0x01, 0x6300 },
+ { 0x02, 0x0200 },
+ { 0x03, 0x0177 },
+ { 0x04, 0x04c7 },
+ { 0x05, 0xffc0 },
+ { 0x06, 0xe806 },
+ { 0x0a, 0x4008 },
+ { 0x0b, 0x0000 },
+ { 0x0d, 0x0030 },
+ { 0x0e, 0x2800 },
+ { 0x0f, 0x0000 },
+ { 0x16, 0x9f80 },
+ { 0x17, 0x0a0f },
+ { 0x1e, 0x00c1 },
+ { 0x30, 0x0300 },
+ { 0x31, 0x0007 },
+ { 0x32, 0x0000 },
+ { 0x33, 0x0000 },
+ { 0x34, 0x0707 },
+ { 0x35, 0x0004 },
+ { 0x36, 0x0302 },
+ { 0x37, 0x0202 },
+ { 0x3a, 0x0a0d },
+ { 0x3b, 0x0806 },
+ };
+
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(init_data); ++i) {
+ ret = lb035q02_write(lcd, init_data[i].index,
+ init_data[i].value);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int lb035q02_disable(struct drm_panel *panel)
+{
+ struct lb035q02_device *lcd = to_lb035q02_device(panel);
+
+ gpiod_set_value_cansleep(lcd->enable_gpio, 0);
+
+ return 0;
+}
+
+static int lb035q02_enable(struct drm_panel *panel)
+{
+ struct lb035q02_device *lcd = to_lb035q02_device(panel);
+
+ gpiod_set_value_cansleep(lcd->enable_gpio, 1);
+
+ return 0;
+}
+
+static const struct drm_display_mode lb035q02_mode = {
+ .clock = 6500,
+ .hdisplay = 320,
+ .hsync_start = 320 + 20,
+ .hsync_end = 320 + 20 + 2,
+ .htotal = 320 + 20 + 2 + 68,
+ .vdisplay = 240,
+ .vsync_start = 240 + 4,
+ .vsync_end = 240 + 4 + 2,
+ .vtotal = 240 + 4 + 2 + 18,
+ .vrefresh = 60,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .width_mm = 70,
+ .height_mm = 53,
+};
+
+static int lb035q02_get_modes(struct drm_panel *panel)
+{
+ struct drm_connector *connector = panel->connector;
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(panel->drm, &lb035q02_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+
+ connector->display_info.width_mm = lb035q02_mode.width_mm;
+ connector->display_info.height_mm = lb035q02_mode.height_mm;
+ /*
+ * FIXME: According to the datasheet pixel data is sampled on the
+ * rising edge of the clock, but the code running on the Gumstix Overo
+ * Palo35 indicates sampling on the negative edge. This should be
+ * tested on a real device.
+ */
+ connector->display_info.bus_flags = DRM_BUS_FLAG_DE_HIGH
+ | DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE
+ | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs lb035q02_funcs = {
+ .disable = lb035q02_disable,
+ .enable = lb035q02_enable,
+ .get_modes = lb035q02_get_modes,
+};
+
+static int lb035q02_probe(struct spi_device *spi)
+{
+ struct lb035q02_device *lcd;
+ int ret;
+
+ lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
+ if (!lcd)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, lcd);
+ lcd->spi = spi;
+
+ lcd->enable_gpio = devm_gpiod_get(&spi->dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(lcd->enable_gpio)) {
+ dev_err(&spi->dev, "failed to parse enable gpio\n");
+ return PTR_ERR(lcd->enable_gpio);
+ }
+
+ ret = lb035q02_init(lcd);
+ if (ret < 0)
+ return ret;
+
+ drm_panel_init(&lcd->panel);
+ lcd->panel.dev = &lcd->spi->dev;
+ lcd->panel.funcs = &lb035q02_funcs;
+
+ return drm_panel_add(&lcd->panel);
+}
+
+static int lb035q02_remove(struct spi_device *spi)
+{
+ struct lb035q02_device *lcd = spi_get_drvdata(spi);
+
+ drm_panel_remove(&lcd->panel);
+ drm_panel_disable(&lcd->panel);
+
+ return 0;
+}
+
+static const struct of_device_id lb035q02_of_match[] = {
+ { .compatible = "lgphilips,lb035q02", },
+ { /* sentinel */ },
+};
+
+MODULE_DEVICE_TABLE(of, lb035q02_of_match);
+
+static struct spi_driver lb035q02_driver = {
+ .probe = lb035q02_probe,
+ .remove = lb035q02_remove,
+ .driver = {
+ .name = "panel-lg-lb035q02",
+ .of_match_table = lb035q02_of_match,
+ },
+};
+
+module_spi_driver(lb035q02_driver);
+
+MODULE_ALIAS("spi:lgphilips,lb035q02");
+MODULE_AUTHOR("Tomi Valkeinen <[email protected]>");
+MODULE_DESCRIPTION("LG.Philips LB035Q02 LCD Panel driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
new file mode 100644
index 000000000000..299b217c83e1
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
@@ -0,0 +1,248 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * NEC NL8048HL11 Panel Driver
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated
+ *
+ * Based on the omapdrm-specific panel-nec-nl8048hl11 driver
+ *
+ * Copyright (C) 2010 Texas Instruments Incorporated
+ * Author: Erik Gilling <[email protected]>
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/spi/spi.h>
+
+#include <drm/drm_connector.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+struct nl8048_panel {
+ struct drm_panel panel;
+
+ struct spi_device *spi;
+ struct gpio_desc *reset_gpio;
+};
+
+#define to_nl8048_device(p) container_of(p, struct nl8048_panel, panel)
+
+static int nl8048_write(struct nl8048_panel *lcd, unsigned char addr,
+ unsigned char value)
+{
+ u8 data[4] = { value, 0x01, addr, 0x00 };
+ int ret;
+
+ ret = spi_write(lcd->spi, data, sizeof(data));
+ if (ret)
+ dev_err(&lcd->spi->dev, "SPI write to %u failed: %d\n",
+ addr, ret);
+
+ return ret;
+}
+
+static int nl8048_init(struct nl8048_panel *lcd)
+{
+ static const struct {
+ unsigned char addr;
+ unsigned char data;
+ } nl8048_init_seq[] = {
+ { 3, 0x01 }, { 0, 0x00 }, { 1, 0x01 }, { 4, 0x00 },
+ { 5, 0x14 }, { 6, 0x24 }, { 16, 0xd7 }, { 17, 0x00 },
+ { 18, 0x00 }, { 19, 0x55 }, { 20, 0x01 }, { 21, 0x70 },
+ { 22, 0x1e }, { 23, 0x25 }, { 24, 0x25 }, { 25, 0x02 },
+ { 26, 0x02 }, { 27, 0xa0 }, { 32, 0x2f }, { 33, 0x0f },
+ { 34, 0x0f }, { 35, 0x0f }, { 36, 0x0f }, { 37, 0x0f },
+ { 38, 0x0f }, { 39, 0x00 }, { 40, 0x02 }, { 41, 0x02 },
+ { 42, 0x02 }, { 43, 0x0f }, { 44, 0x0f }, { 45, 0x0f },
+ { 46, 0x0f }, { 47, 0x0f }, { 48, 0x0f }, { 49, 0x0f },
+ { 50, 0x00 }, { 51, 0x02 }, { 52, 0x02 }, { 53, 0x02 },
+ { 80, 0x0c }, { 83, 0x42 }, { 84, 0x42 }, { 85, 0x41 },
+ { 86, 0x14 }, { 89, 0x88 }, { 90, 0x01 }, { 91, 0x00 },
+ { 92, 0x02 }, { 93, 0x0c }, { 94, 0x1c }, { 95, 0x27 },
+ { 98, 0x49 }, { 99, 0x27 }, { 102, 0x76 }, { 103, 0x27 },
+ { 112, 0x01 }, { 113, 0x0e }, { 114, 0x02 }, { 115, 0x0c },
+ { 118, 0x0c }, { 121, 0x30 }, { 130, 0x00 }, { 131, 0x00 },
+ { 132, 0xfc }, { 134, 0x00 }, { 136, 0x00 }, { 138, 0x00 },
+ { 139, 0x00 }, { 140, 0x00 }, { 141, 0xfc }, { 143, 0x00 },
+ { 145, 0x00 }, { 147, 0x00 }, { 148, 0x00 }, { 149, 0x00 },
+ { 150, 0xfc }, { 152, 0x00 }, { 154, 0x00 }, { 156, 0x00 },
+ { 157, 0x00 },
+ };
+
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(nl8048_init_seq); ++i) {
+ ret = nl8048_write(lcd, nl8048_init_seq[i].addr,
+ nl8048_init_seq[i].data);
+ if (ret < 0)
+ return ret;
+ }
+
+ udelay(20);
+
+ return nl8048_write(lcd, 2, 0x00);
+}
+
+static int nl8048_disable(struct drm_panel *panel)
+{
+ struct nl8048_panel *lcd = to_nl8048_device(panel);
+
+ gpiod_set_value_cansleep(lcd->reset_gpio, 0);
+
+ return 0;
+}
+
+static int nl8048_enable(struct drm_panel *panel)
+{
+ struct nl8048_panel *lcd = to_nl8048_device(panel);
+
+ gpiod_set_value_cansleep(lcd->reset_gpio, 1);
+
+ return 0;
+}
+
+static const struct drm_display_mode nl8048_mode = {
+ /* NEC PIX Clock Ratings MIN:21.8MHz TYP:23.8MHz MAX:25.7MHz */
+ .clock = 23800,
+ .hdisplay = 800,
+ .hsync_start = 800 + 6,
+ .hsync_end = 800 + 6 + 1,
+ .htotal = 800 + 6 + 1 + 4,
+ .vdisplay = 480,
+ .vsync_start = 480 + 3,
+ .vsync_end = 480 + 3 + 1,
+ .vtotal = 480 + 3 + 1 + 4,
+ .vrefresh = 60,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .width_mm = 89,
+ .height_mm = 53,
+};
+
+static int nl8048_get_modes(struct drm_panel *panel)
+{
+ struct drm_connector *connector = panel->connector;
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(panel->drm, &nl8048_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+
+ connector->display_info.width_mm = nl8048_mode.width_mm;
+ connector->display_info.height_mm = nl8048_mode.height_mm;
+ connector->display_info.bus_flags = DRM_BUS_FLAG_DE_HIGH
+ | DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE
+ | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs nl8048_funcs = {
+ .disable = nl8048_disable,
+ .enable = nl8048_enable,
+ .get_modes = nl8048_get_modes,
+};
+
+static int __maybe_unused nl8048_suspend(struct device *dev)
+{
+ struct nl8048_panel *lcd = dev_get_drvdata(dev);
+
+ nl8048_write(lcd, 2, 0x01);
+ msleep(40);
+
+ return 0;
+}
+
+static int __maybe_unused nl8048_resume(struct device *dev)
+{
+ struct nl8048_panel *lcd = dev_get_drvdata(dev);
+
+ /* Reinitialize the panel. */
+ spi_setup(lcd->spi);
+ nl8048_write(lcd, 2, 0x00);
+ nl8048_init(lcd);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(nl8048_pm_ops, nl8048_suspend, nl8048_resume);
+
+static int nl8048_probe(struct spi_device *spi)
+{
+ struct nl8048_panel *lcd;
+ int ret;
+
+ lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
+ if (!lcd)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, lcd);
+ lcd->spi = spi;
+
+ lcd->reset_gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(lcd->reset_gpio)) {
+ dev_err(&spi->dev, "failed to parse reset gpio\n");
+ return PTR_ERR(lcd->reset_gpio);
+ }
+
+ spi->mode = SPI_MODE_0;
+ spi->bits_per_word = 32;
+
+ ret = spi_setup(spi);
+ if (ret < 0) {
+ dev_err(&spi->dev, "failed to setup SPI: %d\n", ret);
+ return ret;
+ }
+
+ ret = nl8048_init(lcd);
+ if (ret < 0)
+ return ret;
+
+ drm_panel_init(&lcd->panel);
+ lcd->panel.dev = &lcd->spi->dev;
+ lcd->panel.funcs = &nl8048_funcs;
+
+ return drm_panel_add(&lcd->panel);
+}
+
+static int nl8048_remove(struct spi_device *spi)
+{
+ struct nl8048_panel *lcd = spi_get_drvdata(spi);
+
+ drm_panel_remove(&lcd->panel);
+ drm_panel_disable(&lcd->panel);
+ drm_panel_unprepare(&lcd->panel);
+
+ return 0;
+}
+
+static const struct of_device_id nl8048_of_match[] = {
+ { .compatible = "nec,nl8048hl11", },
+ { /* sentinel */ },
+};
+
+MODULE_DEVICE_TABLE(of, nl8048_of_match);
+
+static struct spi_driver nl8048_driver = {
+ .probe = nl8048_probe,
+ .remove = nl8048_remove,
+ .driver = {
+ .name = "panel-nec-nl8048hl11",
+ .pm = &nl8048_pm_ops,
+ .of_match_table = nl8048_of_match,
+ },
+};
+
+module_spi_driver(nl8048_driver);
+
+MODULE_ALIAS("spi:nec,nl8048hl11");
+MODULE_AUTHOR("Erik Gilling <[email protected]>");
+MODULE_DESCRIPTION("NEC-NL8048HL11 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
new file mode 100644
index 000000000000..46cd9a250129
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sharp LS037V7DW01 LCD Panel Driver
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated
+ *
+ * Based on the omapdrm-specific panel-sharp-ls037v7dw01 driver
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated
+ * Author: Tomi Valkeinen <[email protected]>
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_connector.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+struct ls037v7dw01_panel {
+ struct drm_panel panel;
+ struct platform_device *pdev;
+
+ struct regulator *vdd;
+ struct gpio_desc *resb_gpio; /* low = reset active min 20 us */
+ struct gpio_desc *ini_gpio; /* high = power on */
+ struct gpio_desc *mo_gpio; /* low = 480x640, high = 240x320 */
+ struct gpio_desc *lr_gpio; /* high = conventional horizontal scanning */
+ struct gpio_desc *ud_gpio; /* high = conventional vertical scanning */
+};
+
+#define to_ls037v7dw01_device(p) \
+ container_of(p, struct ls037v7dw01_panel, panel)
+
+static int ls037v7dw01_disable(struct drm_panel *panel)
+{
+ struct ls037v7dw01_panel *lcd = to_ls037v7dw01_device(panel);
+
+ gpiod_set_value_cansleep(lcd->ini_gpio, 0);
+ gpiod_set_value_cansleep(lcd->resb_gpio, 0);
+
+ /* Wait at least 5 vsyncs after disabling the LCD. */
+ msleep(100);
+
+ return 0;
+}
+
+static int ls037v7dw01_unprepare(struct drm_panel *panel)
+{
+ struct ls037v7dw01_panel *lcd = to_ls037v7dw01_device(panel);
+
+ regulator_disable(lcd->vdd);
+ return 0;
+}
+
+static int ls037v7dw01_prepare(struct drm_panel *panel)
+{
+ struct ls037v7dw01_panel *lcd = to_ls037v7dw01_device(panel);
+ int ret;
+
+ ret = regulator_enable(lcd->vdd);
+ if (ret < 0)
+ dev_err(&lcd->pdev->dev, "%s: failed to enable regulator\n",
+ __func__);
+
+ return ret;
+}
+
+static int ls037v7dw01_enable(struct drm_panel *panel)
+{
+ struct ls037v7dw01_panel *lcd = to_ls037v7dw01_device(panel);
+
+ /* Wait couple of vsyncs before enabling the LCD. */
+ msleep(50);
+
+ gpiod_set_value_cansleep(lcd->resb_gpio, 1);
+ gpiod_set_value_cansleep(lcd->ini_gpio, 1);
+
+ return 0;
+}
+
+static const struct drm_display_mode ls037v7dw01_mode = {
+ .clock = 19200,
+ .hdisplay = 480,
+ .hsync_start = 480 + 1,
+ .hsync_end = 480 + 1 + 2,
+ .htotal = 480 + 1 + 2 + 28,
+ .vdisplay = 640,
+ .vsync_start = 640 + 1,
+ .vsync_end = 640 + 1 + 1,
+ .vtotal = 640 + 1 + 1 + 1,
+ .vrefresh = 58,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .width_mm = 56,
+ .height_mm = 75,
+};
+
+static int ls037v7dw01_get_modes(struct drm_panel *panel)
+{
+ struct drm_connector *connector = panel->connector;
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(panel->drm, &ls037v7dw01_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+
+ connector->display_info.width_mm = ls037v7dw01_mode.width_mm;
+ connector->display_info.height_mm = ls037v7dw01_mode.height_mm;
+ /*
+ * FIXME: According to the datasheet pixel data is sampled on the
+ * rising edge of the clock, but the code running on the SDP3430
+ * indicates sampling on the negative edge. This should be tested on a
+ * real device.
+ */
+ connector->display_info.bus_flags = DRM_BUS_FLAG_DE_HIGH
+ | DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE
+ | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs ls037v7dw01_funcs = {
+ .disable = ls037v7dw01_disable,
+ .unprepare = ls037v7dw01_unprepare,
+ .prepare = ls037v7dw01_prepare,
+ .enable = ls037v7dw01_enable,
+ .get_modes = ls037v7dw01_get_modes,
+};
+
+static int ls037v7dw01_probe(struct platform_device *pdev)
+{
+ struct ls037v7dw01_panel *lcd;
+
+ lcd = devm_kzalloc(&pdev->dev, sizeof(*lcd), GFP_KERNEL);
+ if (!lcd)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, lcd);
+ lcd->pdev = pdev;
+
+ lcd->vdd = devm_regulator_get(&pdev->dev, "envdd");
+ if (IS_ERR(lcd->vdd)) {
+ dev_err(&pdev->dev, "failed to get regulator\n");
+ return PTR_ERR(lcd->vdd);
+ }
+
+ lcd->ini_gpio = devm_gpiod_get(&pdev->dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(lcd->ini_gpio)) {
+ dev_err(&pdev->dev, "failed to get enable gpio\n");
+ return PTR_ERR(lcd->ini_gpio);
+ }
+
+ lcd->resb_gpio = devm_gpiod_get(&pdev->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(lcd->resb_gpio)) {
+ dev_err(&pdev->dev, "failed to get reset gpio\n");
+ return PTR_ERR(lcd->resb_gpio);
+ }
+
+ lcd->mo_gpio = devm_gpiod_get_index(&pdev->dev, "mode", 0,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(lcd->mo_gpio)) {
+ dev_err(&pdev->dev, "failed to get mode[0] gpio\n");
+ return PTR_ERR(lcd->mo_gpio);
+ }
+
+ lcd->lr_gpio = devm_gpiod_get_index(&pdev->dev, "mode", 1,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(lcd->lr_gpio)) {
+ dev_err(&pdev->dev, "failed to get mode[1] gpio\n");
+ return PTR_ERR(lcd->lr_gpio);
+ }
+
+ lcd->ud_gpio = devm_gpiod_get_index(&pdev->dev, "mode", 2,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(lcd->ud_gpio)) {
+ dev_err(&pdev->dev, "failed to get mode[2] gpio\n");
+ return PTR_ERR(lcd->ud_gpio);
+ }
+
+ drm_panel_init(&lcd->panel);
+ lcd->panel.dev = &pdev->dev;
+ lcd->panel.funcs = &ls037v7dw01_funcs;
+
+ return drm_panel_add(&lcd->panel);
+}
+
+static int ls037v7dw01_remove(struct platform_device *pdev)
+{
+ struct ls037v7dw01_panel *lcd = platform_get_drvdata(pdev);
+
+ drm_panel_remove(&lcd->panel);
+ drm_panel_disable(&lcd->panel);
+ drm_panel_unprepare(&lcd->panel);
+
+ return 0;
+}
+
+static const struct of_device_id ls037v7dw01_of_match[] = {
+ { .compatible = "sharp,ls037v7dw01", },
+ { /* sentinel */ },
+};
+
+MODULE_DEVICE_TABLE(of, ls037v7dw01_of_match);
+
+static struct platform_driver ls037v7dw01_driver = {
+ .probe = ls037v7dw01_probe,
+ .remove = ls037v7dw01_remove,
+ .driver = {
+ .name = "panel-sharp-ls037v7dw01",
+ .of_match_table = ls037v7dw01_of_match,
+ },
+};
+
+module_platform_driver(ls037v7dw01_driver);
+
+MODULE_AUTHOR("Tomi Valkeinen <[email protected]>");
+MODULE_DESCRIPTION("Sharp LS037V7DW01 Panel Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index bff7578f84dd..28fa6ba7b767 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -2833,6 +2833,64 @@ static const struct panel_desc tianma_tm070rvhg71 = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
};
+static const struct drm_display_mode ti_nspire_cx_lcd_mode[] = {
+ {
+ .clock = 10000,
+ .hdisplay = 320,
+ .hsync_start = 320 + 50,
+ .hsync_end = 320 + 50 + 6,
+ .htotal = 320 + 50 + 6 + 38,
+ .vdisplay = 240,
+ .vsync_start = 240 + 3,
+ .vsync_end = 240 + 3 + 1,
+ .vtotal = 240 + 3 + 1 + 17,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+ },
+};
+
+static const struct panel_desc ti_nspire_cx_lcd_panel = {
+ .modes = ti_nspire_cx_lcd_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 65,
+ .height = 49,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_NEGEDGE,
+};
+
+static const struct drm_display_mode ti_nspire_classic_lcd_mode[] = {
+ {
+ .clock = 10000,
+ .hdisplay = 320,
+ .hsync_start = 320 + 6,
+ .hsync_end = 320 + 6 + 6,
+ .htotal = 320 + 6 + 6 + 6,
+ .vdisplay = 240,
+ .vsync_start = 240 + 0,
+ .vsync_end = 240 + 0 + 1,
+ .vtotal = 240 + 0 + 1 + 0,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+ },
+};
+
+static const struct panel_desc ti_nspire_classic_lcd_panel = {
+ .modes = ti_nspire_classic_lcd_mode,
+ .num_modes = 1,
+ /* The grayscale panel has 8 bit for the color .. Y (black) */
+ .bpc = 8,
+ .size = {
+ .width = 71,
+ .height = 53,
+ },
+ /* This is the grayscale bus format */
+ .bus_format = MEDIA_BUS_FMT_Y8_1X8,
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+};
+
static const struct drm_display_mode toshiba_lt089ac29000_mode = {
.clock = 79500,
.hdisplay = 1280,
@@ -3303,6 +3361,12 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "tianma,tm070rvhg71",
.data = &tianma_tm070rvhg71,
}, {
+ .compatible = "ti,nspire-cx-lcd-panel",
+ .data = &ti_nspire_cx_lcd_panel,
+ }, {
+ .compatible = "ti,nspire-classic-lcd-panel",
+ .data = &ti_nspire_classic_lcd_panel,
+ }, {
.compatible = "toshiba,lt089ac29000",
.data = &toshiba_lt089ac29000,
}, {
diff --git a/drivers/gpu/drm/panel/panel-sony-acx565akm.c b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
new file mode 100644
index 000000000000..305259b58767
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
@@ -0,0 +1,701 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sony ACX565AKM LCD Panel driver
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated
+ *
+ * Based on the omapdrm-specific panel-sony-acx565akm driver
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Author: Imre Deak <[email protected]>
+ */
+
+/*
+ * TODO (to be addressed with hardware access to test the changes):
+ *
+ * - Update backlight support to use backlight_update_status() etc.
+ * - Use prepare/unprepare for the basic power on/off of the backligt
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/spi/spi.h>
+#include <video/mipi_display.h>
+
+#include <drm/drm_connector.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#define CTRL_DISP_BRIGHTNESS_CTRL_ON BIT(5)
+#define CTRL_DISP_AMBIENT_LIGHT_CTRL_ON BIT(4)
+#define CTRL_DISP_BACKLIGHT_ON BIT(2)
+#define CTRL_DISP_AUTO_BRIGHTNESS_ON BIT(1)
+
+#define MIPID_CMD_WRITE_CABC 0x55
+#define MIPID_CMD_READ_CABC 0x56
+
+#define MIPID_VER_LPH8923 3
+#define MIPID_VER_LS041Y3 4
+#define MIPID_VER_L4F00311 8
+#define MIPID_VER_ACX565AKM 9
+
+struct acx565akm_panel {
+ struct drm_panel panel;
+
+ struct spi_device *spi;
+ struct gpio_desc *reset_gpio;
+ struct backlight_device *backlight;
+
+ struct mutex mutex;
+
+ const char *name;
+ u8 display_id[3];
+ int model;
+ int revision;
+ bool has_bc;
+ bool has_cabc;
+
+ bool enabled;
+ unsigned int cabc_mode;
+ /*
+ * Next value of jiffies when we can issue the next sleep in/out
+ * command.
+ */
+ unsigned long hw_guard_end;
+ unsigned long hw_guard_wait; /* max guard time in jiffies */
+};
+
+#define to_acx565akm_device(p) container_of(p, struct acx565akm_panel, panel)
+
+static void acx565akm_transfer(struct acx565akm_panel *lcd, int cmd,
+ const u8 *wbuf, int wlen, u8 *rbuf, int rlen)
+{
+ struct spi_message m;
+ struct spi_transfer *x, xfer[5];
+ int ret;
+
+ spi_message_init(&m);
+
+ memset(xfer, 0, sizeof(xfer));
+ x = &xfer[0];
+
+ cmd &= 0xff;
+ x->tx_buf = &cmd;
+ x->bits_per_word = 9;
+ x->len = 2;
+
+ if (rlen > 1 && wlen == 0) {
+ /*
+ * Between the command and the response data there is a
+ * dummy clock cycle. Add an extra bit after the command
+ * word to account for this.
+ */
+ x->bits_per_word = 10;
+ cmd <<= 1;
+ }
+ spi_message_add_tail(x, &m);
+
+ if (wlen) {
+ x++;
+ x->tx_buf = wbuf;
+ x->len = wlen;
+ x->bits_per_word = 9;
+ spi_message_add_tail(x, &m);
+ }
+
+ if (rlen) {
+ x++;
+ x->rx_buf = rbuf;
+ x->len = rlen;
+ spi_message_add_tail(x, &m);
+ }
+
+ ret = spi_sync(lcd->spi, &m);
+ if (ret < 0)
+ dev_dbg(&lcd->spi->dev, "spi_sync %d\n", ret);
+}
+
+static inline void acx565akm_cmd(struct acx565akm_panel *lcd, int cmd)
+{
+ acx565akm_transfer(lcd, cmd, NULL, 0, NULL, 0);
+}
+
+static inline void acx565akm_write(struct acx565akm_panel *lcd,
+ int reg, const u8 *buf, int len)
+{
+ acx565akm_transfer(lcd, reg, buf, len, NULL, 0);
+}
+
+static inline void acx565akm_read(struct acx565akm_panel *lcd,
+ int reg, u8 *buf, int len)
+{
+ acx565akm_transfer(lcd, reg, NULL, 0, buf, len);
+}
+
+/* -----------------------------------------------------------------------------
+ * Auto Brightness Control Via sysfs
+ */
+
+static unsigned int acx565akm_get_cabc_mode(struct acx565akm_panel *lcd)
+{
+ return lcd->cabc_mode;
+}
+
+static void acx565akm_set_cabc_mode(struct acx565akm_panel *lcd,
+ unsigned int mode)
+{
+ u16 cabc_ctrl;
+
+ lcd->cabc_mode = mode;
+ if (!lcd->enabled)
+ return;
+ cabc_ctrl = 0;
+ acx565akm_read(lcd, MIPID_CMD_READ_CABC, (u8 *)&cabc_ctrl, 1);
+ cabc_ctrl &= ~3;
+ cabc_ctrl |= (1 << 8) | (mode & 3);
+ acx565akm_write(lcd, MIPID_CMD_WRITE_CABC, (u8 *)&cabc_ctrl, 2);
+}
+
+static unsigned int acx565akm_get_hw_cabc_mode(struct acx565akm_panel *lcd)
+{
+ u8 cabc_ctrl;
+
+ acx565akm_read(lcd, MIPID_CMD_READ_CABC, &cabc_ctrl, 1);
+ return cabc_ctrl & 3;
+}
+
+static const char * const acx565akm_cabc_modes[] = {
+ "off", /* always used when CABC is not supported */
+ "ui",
+ "still-image",
+ "moving-image",
+};
+
+static ssize_t cabc_mode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct acx565akm_panel *lcd = dev_get_drvdata(dev);
+ const char *mode_str;
+ int mode;
+
+ if (!lcd->has_cabc)
+ mode = 0;
+ else
+ mode = acx565akm_get_cabc_mode(lcd);
+
+ mode_str = "unknown";
+ if (mode >= 0 && mode < ARRAY_SIZE(acx565akm_cabc_modes))
+ mode_str = acx565akm_cabc_modes[mode];
+
+ return sprintf(buf, "%s\n", mode_str);
+}
+
+static ssize_t cabc_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct acx565akm_panel *lcd = dev_get_drvdata(dev);
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(acx565akm_cabc_modes); i++) {
+ const char *mode_str = acx565akm_cabc_modes[i];
+ int cmp_len = strlen(mode_str);
+
+ if (count > 0 && buf[count - 1] == '\n')
+ count--;
+ if (count != cmp_len)
+ continue;
+
+ if (strncmp(buf, mode_str, cmp_len) == 0)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(acx565akm_cabc_modes))
+ return -EINVAL;
+
+ if (!lcd->has_cabc && i != 0)
+ return -EINVAL;
+
+ mutex_lock(&lcd->mutex);
+ acx565akm_set_cabc_mode(lcd, i);
+ mutex_unlock(&lcd->mutex);
+
+ return count;
+}
+
+static ssize_t cabc_available_modes_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct acx565akm_panel *lcd = dev_get_drvdata(dev);
+ unsigned int i;
+ size_t len = 0;
+
+ if (!lcd->has_cabc)
+ return sprintf(buf, "%s\n", acx565akm_cabc_modes[0]);
+
+ for (i = 0; i < ARRAY_SIZE(acx565akm_cabc_modes); i++)
+ len += sprintf(&buf[len], "%s%s", i ? " " : "",
+ acx565akm_cabc_modes[i]);
+
+ buf[len++] = '\n';
+
+ return len;
+}
+
+static DEVICE_ATTR_RW(cabc_mode);
+static DEVICE_ATTR_RO(cabc_available_modes);
+
+static struct attribute *acx565akm_cabc_attrs[] = {
+ &dev_attr_cabc_mode.attr,
+ &dev_attr_cabc_available_modes.attr,
+ NULL,
+};
+
+static const struct attribute_group acx565akm_cabc_attr_group = {
+ .attrs = acx565akm_cabc_attrs,
+};
+
+/* -----------------------------------------------------------------------------
+ * Backlight Device
+ */
+
+static int acx565akm_get_actual_brightness(struct acx565akm_panel *lcd)
+{
+ u8 bv;
+
+ acx565akm_read(lcd, MIPI_DCS_GET_DISPLAY_BRIGHTNESS, &bv, 1);
+
+ return bv;
+}
+
+static void acx565akm_set_brightness(struct acx565akm_panel *lcd, int level)
+{
+ u16 ctrl;
+ int bv;
+
+ bv = level | (1 << 8);
+ acx565akm_write(lcd, MIPI_DCS_SET_DISPLAY_BRIGHTNESS, (u8 *)&bv, 2);
+
+ acx565akm_read(lcd, MIPI_DCS_GET_CONTROL_DISPLAY, (u8 *)&ctrl, 1);
+ if (level)
+ ctrl |= CTRL_DISP_BRIGHTNESS_CTRL_ON |
+ CTRL_DISP_BACKLIGHT_ON;
+ else
+ ctrl &= ~(CTRL_DISP_BRIGHTNESS_CTRL_ON |
+ CTRL_DISP_BACKLIGHT_ON);
+
+ ctrl |= 1 << 8;
+ acx565akm_write(lcd, MIPI_DCS_WRITE_CONTROL_DISPLAY, (u8 *)&ctrl, 2);
+}
+
+static int acx565akm_bl_update_status_locked(struct backlight_device *dev)
+{
+ struct acx565akm_panel *lcd = dev_get_drvdata(&dev->dev);
+ int level;
+
+ if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
+ dev->props.power == FB_BLANK_UNBLANK)
+ level = dev->props.brightness;
+ else
+ level = 0;
+
+ acx565akm_set_brightness(lcd, level);
+
+ return 0;
+}
+
+static int acx565akm_bl_update_status(struct backlight_device *dev)
+{
+ struct acx565akm_panel *lcd = dev_get_drvdata(&dev->dev);
+ int ret;
+
+ mutex_lock(&lcd->mutex);
+ ret = acx565akm_bl_update_status_locked(dev);
+ mutex_unlock(&lcd->mutex);
+
+ return ret;
+}
+
+static int acx565akm_bl_get_intensity(struct backlight_device *dev)
+{
+ struct acx565akm_panel *lcd = dev_get_drvdata(&dev->dev);
+ unsigned int intensity;
+
+ mutex_lock(&lcd->mutex);
+
+ if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
+ dev->props.power == FB_BLANK_UNBLANK)
+ intensity = acx565akm_get_actual_brightness(lcd);
+ else
+ intensity = 0;
+
+ mutex_unlock(&lcd->mutex);
+
+ return intensity;
+}
+
+static const struct backlight_ops acx565akm_bl_ops = {
+ .get_brightness = acx565akm_bl_get_intensity,
+ .update_status = acx565akm_bl_update_status,
+};
+
+static int acx565akm_backlight_init(struct acx565akm_panel *lcd)
+{
+ struct backlight_properties props = {
+ .fb_blank = FB_BLANK_UNBLANK,
+ .power = FB_BLANK_UNBLANK,
+ .type = BACKLIGHT_RAW,
+ };
+ int ret;
+
+ lcd->backlight = backlight_device_register(lcd->name, &lcd->spi->dev,
+ lcd, &acx565akm_bl_ops,
+ &props);
+ if (IS_ERR(lcd->backlight)) {
+ ret = PTR_ERR(lcd->backlight);
+ lcd->backlight = NULL;
+ return ret;
+ }
+
+ if (lcd->has_cabc) {
+ ret = sysfs_create_group(&lcd->backlight->dev.kobj,
+ &acx565akm_cabc_attr_group);
+ if (ret < 0) {
+ dev_err(&lcd->spi->dev,
+ "%s failed to create sysfs files\n", __func__);
+ backlight_device_unregister(lcd->backlight);
+ return ret;
+ }
+
+ lcd->cabc_mode = acx565akm_get_hw_cabc_mode(lcd);
+ }
+
+ lcd->backlight->props.max_brightness = 255;
+ lcd->backlight->props.brightness = acx565akm_get_actual_brightness(lcd);
+
+ acx565akm_bl_update_status_locked(lcd->backlight);
+
+ return 0;
+}
+
+static void acx565akm_backlight_cleanup(struct acx565akm_panel *lcd)
+{
+ if (lcd->has_cabc)
+ sysfs_remove_group(&lcd->backlight->dev.kobj,
+ &acx565akm_cabc_attr_group);
+
+ backlight_device_unregister(lcd->backlight);
+}
+
+/* -----------------------------------------------------------------------------
+ * DRM Bridge Operations
+ */
+
+static void acx565akm_set_sleep_mode(struct acx565akm_panel *lcd, int on)
+{
+ int cmd = on ? MIPI_DCS_ENTER_SLEEP_MODE : MIPI_DCS_EXIT_SLEEP_MODE;
+ unsigned long wait;
+
+ /*
+ * We have to keep 120msec between sleep in/out commands.
+ * (8.2.15, 8.2.16).
+ */
+ wait = lcd->hw_guard_end - jiffies;
+ if ((long)wait > 0 && wait <= lcd->hw_guard_wait) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(wait);
+ }
+
+ acx565akm_cmd(lcd, cmd);
+
+ lcd->hw_guard_wait = msecs_to_jiffies(120);
+ lcd->hw_guard_end = jiffies + lcd->hw_guard_wait;
+}
+
+static void acx565akm_set_display_state(struct acx565akm_panel *lcd,
+ int enabled)
+{
+ int cmd = enabled ? MIPI_DCS_SET_DISPLAY_ON : MIPI_DCS_SET_DISPLAY_OFF;
+
+ acx565akm_cmd(lcd, cmd);
+}
+
+static int acx565akm_power_on(struct acx565akm_panel *lcd)
+{
+ /*FIXME tweak me */
+ msleep(50);
+
+ gpiod_set_value(lcd->reset_gpio, 1);
+
+ if (lcd->enabled) {
+ dev_dbg(&lcd->spi->dev, "panel already enabled\n");
+ return 0;
+ }
+
+ /*
+ * We have to meet all the following delay requirements:
+ * 1. tRW: reset pulse width 10usec (7.12.1)
+ * 2. tRT: reset cancel time 5msec (7.12.1)
+ * 3. Providing PCLK,HS,VS signals for 2 frames = ~50msec worst
+ * case (7.6.2)
+ * 4. 120msec before the sleep out command (7.12.1)
+ */
+ msleep(120);
+
+ acx565akm_set_sleep_mode(lcd, 0);
+ lcd->enabled = true;
+
+ /* 5msec between sleep out and the next command. (8.2.16) */
+ usleep_range(5000, 10000);
+ acx565akm_set_display_state(lcd, 1);
+ acx565akm_set_cabc_mode(lcd, lcd->cabc_mode);
+
+ return acx565akm_bl_update_status_locked(lcd->backlight);
+}
+
+static void acx565akm_power_off(struct acx565akm_panel *lcd)
+{
+ if (!lcd->enabled)
+ return;
+
+ acx565akm_set_display_state(lcd, 0);
+ acx565akm_set_sleep_mode(lcd, 1);
+ lcd->enabled = false;
+ /*
+ * We have to provide PCLK,HS,VS signals for 2 frames (worst case
+ * ~50msec) after sending the sleep in command and asserting the
+ * reset signal. We probably could assert the reset w/o the delay
+ * but we still delay to avoid possible artifacts. (7.6.1)
+ */
+ msleep(50);
+
+ gpiod_set_value(lcd->reset_gpio, 0);
+
+ /* FIXME need to tweak this delay */
+ msleep(100);
+}
+
+static int acx565akm_disable(struct drm_panel *panel)
+{
+ struct acx565akm_panel *lcd = to_acx565akm_device(panel);
+
+ mutex_lock(&lcd->mutex);
+ acx565akm_power_off(lcd);
+ mutex_unlock(&lcd->mutex);
+
+ return 0;
+}
+
+static int acx565akm_enable(struct drm_panel *panel)
+{
+ struct acx565akm_panel *lcd = to_acx565akm_device(panel);
+
+ mutex_lock(&lcd->mutex);
+ acx565akm_power_on(lcd);
+ mutex_unlock(&lcd->mutex);
+
+ return 0;
+}
+
+static const struct drm_display_mode acx565akm_mode = {
+ .clock = 24000,
+ .hdisplay = 800,
+ .hsync_start = 800 + 28,
+ .hsync_end = 800 + 28 + 4,
+ .htotal = 800 + 28 + 4 + 24,
+ .vdisplay = 480,
+ .vsync_start = 480 + 3,
+ .vsync_end = 480 + 3 + 3,
+ .vtotal = 480 + 3 + 3 + 4,
+ .vrefresh = 57,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .width_mm = 77,
+ .height_mm = 46,
+};
+
+static int acx565akm_get_modes(struct drm_panel *panel)
+{
+ struct drm_connector *connector = panel->connector;
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(panel->drm, &acx565akm_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+
+ connector->display_info.width_mm = acx565akm_mode.width_mm;
+ connector->display_info.height_mm = acx565akm_mode.height_mm;
+ connector->display_info.bus_flags = DRM_BUS_FLAG_DE_HIGH
+ | DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE
+ | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs acx565akm_funcs = {
+ .disable = acx565akm_disable,
+ .enable = acx565akm_enable,
+ .get_modes = acx565akm_get_modes,
+};
+
+/* -----------------------------------------------------------------------------
+ * Probe, Detect and Remove
+ */
+
+static int acx565akm_detect(struct acx565akm_panel *lcd)
+{
+ __be32 value;
+ u32 status;
+ int ret = 0;
+
+ /*
+ * After being taken out of reset the panel needs 5ms before the first
+ * command can be sent.
+ */
+ gpiod_set_value(lcd->reset_gpio, 1);
+ usleep_range(5000, 10000);
+
+ acx565akm_read(lcd, MIPI_DCS_GET_DISPLAY_STATUS, (u8 *)&value, 4);
+ status = __be32_to_cpu(value);
+ lcd->enabled = (status & (1 << 17)) && (status & (1 << 10));
+
+ dev_dbg(&lcd->spi->dev,
+ "LCD panel %s by bootloader (status 0x%04x)\n",
+ lcd->enabled ? "enabled" : "disabled ", status);
+
+ acx565akm_read(lcd, MIPI_DCS_GET_DISPLAY_ID, lcd->display_id, 3);
+ dev_dbg(&lcd->spi->dev, "MIPI display ID: %02x%02x%02x\n",
+ lcd->display_id[0], lcd->display_id[1], lcd->display_id[2]);
+
+ switch (lcd->display_id[0]) {
+ case 0x10:
+ lcd->model = MIPID_VER_ACX565AKM;
+ lcd->name = "acx565akm";
+ lcd->has_bc = 1;
+ lcd->has_cabc = 1;
+ break;
+ case 0x29:
+ lcd->model = MIPID_VER_L4F00311;
+ lcd->name = "l4f00311";
+ break;
+ case 0x45:
+ lcd->model = MIPID_VER_LPH8923;
+ lcd->name = "lph8923";
+ break;
+ case 0x83:
+ lcd->model = MIPID_VER_LS041Y3;
+ lcd->name = "ls041y3";
+ break;
+ default:
+ lcd->name = "unknown";
+ dev_err(&lcd->spi->dev, "unknown display ID\n");
+ ret = -ENODEV;
+ goto done;
+ }
+
+ lcd->revision = lcd->display_id[1];
+
+ dev_info(&lcd->spi->dev, "%s rev %02x panel detected\n",
+ lcd->name, lcd->revision);
+
+done:
+ if (!lcd->enabled)
+ gpiod_set_value(lcd->reset_gpio, 0);
+
+ return ret;
+}
+
+static int acx565akm_probe(struct spi_device *spi)
+{
+ struct acx565akm_panel *lcd;
+ int ret;
+
+ lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
+ if (!lcd)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, lcd);
+ spi->mode = SPI_MODE_3;
+
+ lcd->spi = spi;
+ mutex_init(&lcd->mutex);
+
+ lcd->reset_gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(lcd->reset_gpio)) {
+ dev_err(&spi->dev, "failed to get reset GPIO\n");
+ return PTR_ERR(lcd->reset_gpio);
+ }
+
+ ret = acx565akm_detect(lcd);
+ if (ret < 0) {
+ dev_err(&spi->dev, "panel detection failed\n");
+ return ret;
+ }
+
+ if (lcd->has_bc) {
+ ret = acx565akm_backlight_init(lcd);
+ if (ret < 0)
+ return ret;
+ }
+
+ drm_panel_init(&lcd->panel);
+ lcd->panel.dev = &lcd->spi->dev;
+ lcd->panel.funcs = &acx565akm_funcs;
+
+ ret = drm_panel_add(&lcd->panel);
+ if (ret < 0) {
+ if (lcd->has_bc)
+ acx565akm_backlight_cleanup(lcd);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int acx565akm_remove(struct spi_device *spi)
+{
+ struct acx565akm_panel *lcd = spi_get_drvdata(spi);
+
+ drm_panel_remove(&lcd->panel);
+
+ if (lcd->has_bc)
+ acx565akm_backlight_cleanup(lcd);
+
+ drm_panel_disable(&lcd->panel);
+ drm_panel_unprepare(&lcd->panel);
+
+ return 0;
+}
+
+static const struct of_device_id acx565akm_of_match[] = {
+ { .compatible = "sony,acx565akm", },
+ { /* sentinel */ },
+};
+
+MODULE_DEVICE_TABLE(of, acx565akm_of_match);
+
+static struct spi_driver acx565akm_driver = {
+ .probe = acx565akm_probe,
+ .remove = acx565akm_remove,
+ .driver = {
+ .name = "panel-sony-acx565akm",
+ .of_match_table = acx565akm_of_match,
+ },
+};
+
+module_spi_driver(acx565akm_driver);
+
+MODULE_ALIAS("spi:sony,acx565akm");
+MODULE_AUTHOR("Nokia Corporation");
+MODULE_DESCRIPTION("Sony ACX565AKM LCD Panel Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
new file mode 100644
index 000000000000..d7b2e34626ef
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
@@ -0,0 +1,399 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Toppoly TD028TTEC1 Panel Driver
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated
+ *
+ * Based on the omapdrm-specific panel-tpo-td028ttec1 driver
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Author: Tomi Valkeinen <[email protected]>
+ *
+ * Neo 1973 code (jbt6k74.c):
+ * Copyright (C) 2006-2007 OpenMoko, Inc.
+ * Author: Harald Welte <[email protected]>
+ *
+ * Ported and adapted from Neo 1973 U-Boot by:
+ * H. Nikolaus Schaller <[email protected]>
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+
+#include <drm/drm_connector.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#define JBT_COMMAND 0x000
+#define JBT_DATA 0x100
+
+#define JBT_REG_SLEEP_IN 0x10
+#define JBT_REG_SLEEP_OUT 0x11
+
+#define JBT_REG_DISPLAY_OFF 0x28
+#define JBT_REG_DISPLAY_ON 0x29
+
+#define JBT_REG_RGB_FORMAT 0x3a
+#define JBT_REG_QUAD_RATE 0x3b
+
+#define JBT_REG_POWER_ON_OFF 0xb0
+#define JBT_REG_BOOSTER_OP 0xb1
+#define JBT_REG_BOOSTER_MODE 0xb2
+#define JBT_REG_BOOSTER_FREQ 0xb3
+#define JBT_REG_OPAMP_SYSCLK 0xb4
+#define JBT_REG_VSC_VOLTAGE 0xb5
+#define JBT_REG_VCOM_VOLTAGE 0xb6
+#define JBT_REG_EXT_DISPL 0xb7
+#define JBT_REG_OUTPUT_CONTROL 0xb8
+#define JBT_REG_DCCLK_DCEV 0xb9
+#define JBT_REG_DISPLAY_MODE1 0xba
+#define JBT_REG_DISPLAY_MODE2 0xbb
+#define JBT_REG_DISPLAY_MODE 0xbc
+#define JBT_REG_ASW_SLEW 0xbd
+#define JBT_REG_DUMMY_DISPLAY 0xbe
+#define JBT_REG_DRIVE_SYSTEM 0xbf
+
+#define JBT_REG_SLEEP_OUT_FR_A 0xc0
+#define JBT_REG_SLEEP_OUT_FR_B 0xc1
+#define JBT_REG_SLEEP_OUT_FR_C 0xc2
+#define JBT_REG_SLEEP_IN_LCCNT_D 0xc3
+#define JBT_REG_SLEEP_IN_LCCNT_E 0xc4
+#define JBT_REG_SLEEP_IN_LCCNT_F 0xc5
+#define JBT_REG_SLEEP_IN_LCCNT_G 0xc6
+
+#define JBT_REG_GAMMA1_FINE_1 0xc7
+#define JBT_REG_GAMMA1_FINE_2 0xc8
+#define JBT_REG_GAMMA1_INCLINATION 0xc9
+#define JBT_REG_GAMMA1_BLUE_OFFSET 0xca
+
+#define JBT_REG_BLANK_CONTROL 0xcf
+#define JBT_REG_BLANK_TH_TV 0xd0
+#define JBT_REG_CKV_ON_OFF 0xd1
+#define JBT_REG_CKV_1_2 0xd2
+#define JBT_REG_OEV_TIMING 0xd3
+#define JBT_REG_ASW_TIMING_1 0xd4
+#define JBT_REG_ASW_TIMING_2 0xd5
+
+#define JBT_REG_HCLOCK_VGA 0xec
+#define JBT_REG_HCLOCK_QVGA 0xed
+
+struct td028ttec1_panel {
+ struct drm_panel panel;
+
+ struct spi_device *spi;
+ struct backlight_device *backlight;
+};
+
+#define to_td028ttec1_device(p) container_of(p, struct td028ttec1_panel, panel)
+
+static int jbt_ret_write_0(struct td028ttec1_panel *lcd, u8 reg, int *err)
+{
+ struct spi_device *spi = lcd->spi;
+ u16 tx_buf = JBT_COMMAND | reg;
+ int ret;
+
+ if (err && *err)
+ return *err;
+
+ ret = spi_write(spi, (u8 *)&tx_buf, sizeof(tx_buf));
+ if (ret < 0) {
+ dev_err(&spi->dev, "%s: SPI write failed: %d\n", __func__, ret);
+ if (err)
+ *err = ret;
+ }
+
+ return ret;
+}
+
+static int jbt_reg_write_1(struct td028ttec1_panel *lcd,
+ u8 reg, u8 data, int *err)
+{
+ struct spi_device *spi = lcd->spi;
+ u16 tx_buf[2];
+ int ret;
+
+ if (err && *err)
+ return *err;
+
+ tx_buf[0] = JBT_COMMAND | reg;
+ tx_buf[1] = JBT_DATA | data;
+
+ ret = spi_write(spi, (u8 *)tx_buf, sizeof(tx_buf));
+ if (ret < 0) {
+ dev_err(&spi->dev, "%s: SPI write failed: %d\n", __func__, ret);
+ if (err)
+ *err = ret;
+ }
+
+ return ret;
+}
+
+static int jbt_reg_write_2(struct td028ttec1_panel *lcd,
+ u8 reg, u16 data, int *err)
+{
+ struct spi_device *spi = lcd->spi;
+ u16 tx_buf[3];
+ int ret;
+
+ if (err && *err)
+ return *err;
+
+ tx_buf[0] = JBT_COMMAND | reg;
+ tx_buf[1] = JBT_DATA | (data >> 8);
+ tx_buf[2] = JBT_DATA | (data & 0xff);
+
+ ret = spi_write(spi, (u8 *)tx_buf, sizeof(tx_buf));
+ if (ret < 0) {
+ dev_err(&spi->dev, "%s: SPI write failed: %d\n", __func__, ret);
+ if (err)
+ *err = ret;
+ }
+
+ return ret;
+}
+
+static int td028ttec1_prepare(struct drm_panel *panel)
+{
+ struct td028ttec1_panel *lcd = to_td028ttec1_device(panel);
+ unsigned int i;
+ int ret = 0;
+
+ /* Three times command zero */
+ for (i = 0; i < 3; ++i) {
+ jbt_ret_write_0(lcd, 0x00, &ret);
+ usleep_range(1000, 2000);
+ }
+
+ /* deep standby out */
+ jbt_reg_write_1(lcd, JBT_REG_POWER_ON_OFF, 0x17, &ret);
+
+ /* RGB I/F on, RAM write off, QVGA through, SIGCON enable */
+ jbt_reg_write_1(lcd, JBT_REG_DISPLAY_MODE, 0x80, &ret);
+
+ /* Quad mode off */
+ jbt_reg_write_1(lcd, JBT_REG_QUAD_RATE, 0x00, &ret);
+
+ /* AVDD on, XVDD on */
+ jbt_reg_write_1(lcd, JBT_REG_POWER_ON_OFF, 0x16, &ret);
+
+ /* Output control */
+ jbt_reg_write_2(lcd, JBT_REG_OUTPUT_CONTROL, 0xfff9, &ret);
+
+ /* Sleep mode off */
+ jbt_ret_write_0(lcd, JBT_REG_SLEEP_OUT, &ret);
+
+ /* at this point we have like 50% grey */
+
+ /* initialize register set */
+ jbt_reg_write_1(lcd, JBT_REG_DISPLAY_MODE1, 0x01, &ret);
+ jbt_reg_write_1(lcd, JBT_REG_DISPLAY_MODE2, 0x00, &ret);
+ jbt_reg_write_1(lcd, JBT_REG_RGB_FORMAT, 0x60, &ret);
+ jbt_reg_write_1(lcd, JBT_REG_DRIVE_SYSTEM, 0x10, &ret);
+ jbt_reg_write_1(lcd, JBT_REG_BOOSTER_OP, 0x56, &ret);
+ jbt_reg_write_1(lcd, JBT_REG_BOOSTER_MODE, 0x33, &ret);
+ jbt_reg_write_1(lcd, JBT_REG_BOOSTER_FREQ, 0x11, &ret);
+ jbt_reg_write_1(lcd, JBT_REG_BOOSTER_FREQ, 0x11, &ret);
+ jbt_reg_write_1(lcd, JBT_REG_OPAMP_SYSCLK, 0x02, &ret);
+ jbt_reg_write_1(lcd, JBT_REG_VSC_VOLTAGE, 0x2b, &ret);
+ jbt_reg_write_1(lcd, JBT_REG_VCOM_VOLTAGE, 0x40, &ret);
+ jbt_reg_write_1(lcd, JBT_REG_EXT_DISPL, 0x03, &ret);
+ jbt_reg_write_1(lcd, JBT_REG_DCCLK_DCEV, 0x04, &ret);
+ /*
+ * default of 0x02 in JBT_REG_ASW_SLEW responsible for 72Hz requirement
+ * to avoid red / blue flicker
+ */
+ jbt_reg_write_1(lcd, JBT_REG_ASW_SLEW, 0x04, &ret);
+ jbt_reg_write_1(lcd, JBT_REG_DUMMY_DISPLAY, 0x00, &ret);
+
+ jbt_reg_write_1(lcd, JBT_REG_SLEEP_OUT_FR_A, 0x11, &ret);
+ jbt_reg_write_1(lcd, JBT_REG_SLEEP_OUT_FR_B, 0x11, &ret);
+ jbt_reg_write_1(lcd, JBT_REG_SLEEP_OUT_FR_C, 0x11, &ret);
+ jbt_reg_write_2(lcd, JBT_REG_SLEEP_IN_LCCNT_D, 0x2040, &ret);
+ jbt_reg_write_2(lcd, JBT_REG_SLEEP_IN_LCCNT_E, 0x60c0, &ret);
+ jbt_reg_write_2(lcd, JBT_REG_SLEEP_IN_LCCNT_F, 0x1020, &ret);
+ jbt_reg_write_2(lcd, JBT_REG_SLEEP_IN_LCCNT_G, 0x60c0, &ret);
+
+ jbt_reg_write_2(lcd, JBT_REG_GAMMA1_FINE_1, 0x5533, &ret);
+ jbt_reg_write_1(lcd, JBT_REG_GAMMA1_FINE_2, 0x00, &ret);
+ jbt_reg_write_1(lcd, JBT_REG_GAMMA1_INCLINATION, 0x00, &ret);
+ jbt_reg_write_1(lcd, JBT_REG_GAMMA1_BLUE_OFFSET, 0x00, &ret);
+
+ jbt_reg_write_2(lcd, JBT_REG_HCLOCK_VGA, 0x1f0, &ret);
+ jbt_reg_write_1(lcd, JBT_REG_BLANK_CONTROL, 0x02, &ret);
+ jbt_reg_write_2(lcd, JBT_REG_BLANK_TH_TV, 0x0804, &ret);
+
+ jbt_reg_write_1(lcd, JBT_REG_CKV_ON_OFF, 0x01, &ret);
+ jbt_reg_write_2(lcd, JBT_REG_CKV_1_2, 0x0000, &ret);
+
+ jbt_reg_write_2(lcd, JBT_REG_OEV_TIMING, 0x0d0e, &ret);
+ jbt_reg_write_2(lcd, JBT_REG_ASW_TIMING_1, 0x11a4, &ret);
+ jbt_reg_write_1(lcd, JBT_REG_ASW_TIMING_2, 0x0e, &ret);
+
+ return ret;
+}
+
+static int td028ttec1_enable(struct drm_panel *panel)
+{
+ struct td028ttec1_panel *lcd = to_td028ttec1_device(panel);
+ int ret;
+
+ ret = jbt_ret_write_0(lcd, JBT_REG_DISPLAY_ON, NULL);
+ if (ret)
+ return ret;
+
+ backlight_enable(lcd->backlight);
+
+ return 0;
+}
+
+static int td028ttec1_disable(struct drm_panel *panel)
+{
+ struct td028ttec1_panel *lcd = to_td028ttec1_device(panel);
+
+ backlight_disable(lcd->backlight);
+
+ jbt_ret_write_0(lcd, JBT_REG_DISPLAY_OFF, NULL);
+
+ return 0;
+}
+
+static int td028ttec1_unprepare(struct drm_panel *panel)
+{
+ struct td028ttec1_panel *lcd = to_td028ttec1_device(panel);
+
+ jbt_reg_write_2(lcd, JBT_REG_OUTPUT_CONTROL, 0x8002, NULL);
+ jbt_ret_write_0(lcd, JBT_REG_SLEEP_IN, NULL);
+ jbt_reg_write_1(lcd, JBT_REG_POWER_ON_OFF, 0x00, NULL);
+
+ return 0;
+}
+
+static const struct drm_display_mode td028ttec1_mode = {
+ .clock = 22153,
+ .hdisplay = 480,
+ .hsync_start = 480 + 24,
+ .hsync_end = 480 + 24 + 8,
+ .htotal = 480 + 24 + 8 + 8,
+ .vdisplay = 640,
+ .vsync_start = 640 + 4,
+ .vsync_end = 640 + 4 + 2,
+ .vtotal = 640 + 4 + 2 + 2,
+ .vrefresh = 66,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .width_mm = 43,
+ .height_mm = 58,
+};
+
+static int td028ttec1_get_modes(struct drm_panel *panel)
+{
+ struct drm_connector *connector = panel->connector;
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(panel->drm, &td028ttec1_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+
+ connector->display_info.width_mm = td028ttec1_mode.width_mm;
+ connector->display_info.height_mm = td028ttec1_mode.height_mm;
+ /*
+ * FIXME: According to the datasheet sync signals are sampled on the
+ * rising edge of the clock, but the code running on the OpenMoko Neo
+ * FreeRunner and Neo 1973 indicates sampling on the falling edge. This
+ * should be tested on a real device.
+ */
+ connector->display_info.bus_flags = DRM_BUS_FLAG_DE_HIGH
+ | DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE
+ | DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs td028ttec1_funcs = {
+ .prepare = td028ttec1_prepare,
+ .enable = td028ttec1_enable,
+ .disable = td028ttec1_disable,
+ .unprepare = td028ttec1_unprepare,
+ .get_modes = td028ttec1_get_modes,
+};
+
+static int td028ttec1_probe(struct spi_device *spi)
+{
+ struct td028ttec1_panel *lcd;
+ int ret;
+
+ lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
+ if (!lcd)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, lcd);
+ lcd->spi = spi;
+
+ lcd->backlight = devm_of_find_backlight(&spi->dev);
+ if (IS_ERR(lcd->backlight))
+ return PTR_ERR(lcd->backlight);
+
+ spi->mode = SPI_MODE_3;
+ spi->bits_per_word = 9;
+
+ ret = spi_setup(spi);
+ if (ret < 0) {
+ dev_err(&spi->dev, "failed to setup SPI: %d\n", ret);
+ return ret;
+ }
+
+ drm_panel_init(&lcd->panel);
+ lcd->panel.dev = &lcd->spi->dev;
+ lcd->panel.funcs = &td028ttec1_funcs;
+
+ return drm_panel_add(&lcd->panel);
+}
+
+static int td028ttec1_remove(struct spi_device *spi)
+{
+ struct td028ttec1_panel *lcd = spi_get_drvdata(spi);
+
+ drm_panel_remove(&lcd->panel);
+ drm_panel_disable(&lcd->panel);
+ drm_panel_unprepare(&lcd->panel);
+
+ return 0;
+}
+
+static const struct of_device_id td028ttec1_of_match[] = {
+ { .compatible = "tpo,td028ttec1", },
+ /* DT backward compatibility. */
+ { .compatible = "toppoly,td028ttec1", },
+ { /* sentinel */ },
+};
+
+MODULE_DEVICE_TABLE(of, td028ttec1_of_match);
+
+static const struct spi_device_id td028ttec1_ids[] = {
+ { "tpo,td028ttec1", 0},
+ { "toppoly,td028ttec1", 0 },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(spi, td028ttec1_ids);
+
+static struct spi_driver td028ttec1_driver = {
+ .probe = td028ttec1_probe,
+ .remove = td028ttec1_remove,
+ .id_table = td028ttec1_ids,
+ .driver = {
+ .name = "panel-tpo-td028ttec1",
+ .of_match_table = td028ttec1_of_match,
+ },
+};
+
+module_spi_driver(td028ttec1_driver);
+
+MODULE_AUTHOR("H. Nikolaus Schaller <[email protected]>");
+MODULE_DESCRIPTION("Toppoly TD028TTEC1 panel driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
new file mode 100644
index 000000000000..84370562910f
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
@@ -0,0 +1,509 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Toppoly TD043MTEA1 Panel Driver
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated
+ *
+ * Based on the omapdrm-specific panel-tpo-td043mtea1 driver
+ *
+ * Author: Gražvydas Ignotas <[email protected]>
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+#include <drm/drm_connector.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#define TPO_R02_MODE(x) ((x) & 7)
+#define TPO_R02_MODE_800x480 7
+#define TPO_R02_NCLK_RISING BIT(3)
+#define TPO_R02_HSYNC_HIGH BIT(4)
+#define TPO_R02_VSYNC_HIGH BIT(5)
+
+#define TPO_R03_NSTANDBY BIT(0)
+#define TPO_R03_EN_CP_CLK BIT(1)
+#define TPO_R03_EN_VGL_PUMP BIT(2)
+#define TPO_R03_EN_PWM BIT(3)
+#define TPO_R03_DRIVING_CAP_100 BIT(4)
+#define TPO_R03_EN_PRE_CHARGE BIT(6)
+#define TPO_R03_SOFTWARE_CTL BIT(7)
+
+#define TPO_R04_NFLIP_H BIT(0)
+#define TPO_R04_NFLIP_V BIT(1)
+#define TPO_R04_CP_CLK_FREQ_1H BIT(2)
+#define TPO_R04_VGL_FREQ_1H BIT(4)
+
+#define TPO_R03_VAL_NORMAL \
+ (TPO_R03_NSTANDBY | TPO_R03_EN_CP_CLK | TPO_R03_EN_VGL_PUMP | \
+ TPO_R03_EN_PWM | TPO_R03_DRIVING_CAP_100 | TPO_R03_EN_PRE_CHARGE | \
+ TPO_R03_SOFTWARE_CTL)
+
+#define TPO_R03_VAL_STANDBY \
+ (TPO_R03_DRIVING_CAP_100 | TPO_R03_EN_PRE_CHARGE | \
+ TPO_R03_SOFTWARE_CTL)
+
+static const u16 td043mtea1_def_gamma[12] = {
+ 105, 315, 381, 431, 490, 537, 579, 686, 780, 837, 880, 1023
+};
+
+struct td043mtea1_panel {
+ struct drm_panel panel;
+
+ struct spi_device *spi;
+ struct regulator *vcc_reg;
+ struct gpio_desc *reset_gpio;
+
+ unsigned int mode;
+ u16 gamma[12];
+ bool vmirror;
+ bool powered_on;
+ bool spi_suspended;
+ bool power_on_resume;
+};
+
+#define to_td043mtea1_device(p) container_of(p, struct td043mtea1_panel, panel)
+
+/* -----------------------------------------------------------------------------
+ * Hardware Access
+ */
+
+static int td043mtea1_write(struct td043mtea1_panel *lcd, u8 addr, u8 value)
+{
+ struct spi_message msg;
+ struct spi_transfer xfer;
+ u16 data;
+ int ret;
+
+ spi_message_init(&msg);
+
+ memset(&xfer, 0, sizeof(xfer));
+
+ data = ((u16)addr << 10) | (1 << 8) | value;
+ xfer.tx_buf = &data;
+ xfer.bits_per_word = 16;
+ xfer.len = 2;
+ spi_message_add_tail(&xfer, &msg);
+
+ ret = spi_sync(lcd->spi, &msg);
+ if (ret < 0)
+ dev_warn(&lcd->spi->dev, "failed to write to LCD reg (%d)\n",
+ ret);
+
+ return ret;
+}
+
+static void td043mtea1_write_gamma(struct td043mtea1_panel *lcd)
+{
+ const u16 *gamma = lcd->gamma;
+ unsigned int i;
+ u8 val;
+
+ /* gamma bits [9:8] */
+ for (val = i = 0; i < 4; i++)
+ val |= (gamma[i] & 0x300) >> ((i + 1) * 2);
+ td043mtea1_write(lcd, 0x11, val);
+
+ for (val = i = 0; i < 4; i++)
+ val |= (gamma[i + 4] & 0x300) >> ((i + 1) * 2);
+ td043mtea1_write(lcd, 0x12, val);
+
+ for (val = i = 0; i < 4; i++)
+ val |= (gamma[i + 8] & 0x300) >> ((i + 1) * 2);
+ td043mtea1_write(lcd, 0x13, val);
+
+ /* gamma bits [7:0] */
+ for (i = 0; i < 12; i++)
+ td043mtea1_write(lcd, 0x14 + i, gamma[i] & 0xff);
+}
+
+static int td043mtea1_write_mirror(struct td043mtea1_panel *lcd)
+{
+ u8 reg4 = TPO_R04_NFLIP_H | TPO_R04_NFLIP_V |
+ TPO_R04_CP_CLK_FREQ_1H | TPO_R04_VGL_FREQ_1H;
+ if (lcd->vmirror)
+ reg4 &= ~TPO_R04_NFLIP_V;
+
+ return td043mtea1_write(lcd, 4, reg4);
+}
+
+static int td043mtea1_power_on(struct td043mtea1_panel *lcd)
+{
+ int ret;
+
+ if (lcd->powered_on)
+ return 0;
+
+ ret = regulator_enable(lcd->vcc_reg);
+ if (ret < 0)
+ return ret;
+
+ /* Wait for the panel to stabilize. */
+ msleep(160);
+
+ gpiod_set_value(lcd->reset_gpio, 0);
+
+ td043mtea1_write(lcd, 2, TPO_R02_MODE(lcd->mode) | TPO_R02_NCLK_RISING);
+ td043mtea1_write(lcd, 3, TPO_R03_VAL_NORMAL);
+ td043mtea1_write(lcd, 0x20, 0xf0);
+ td043mtea1_write(lcd, 0x21, 0xf0);
+ td043mtea1_write_mirror(lcd);
+ td043mtea1_write_gamma(lcd);
+
+ lcd->powered_on = true;
+
+ return 0;
+}
+
+static void td043mtea1_power_off(struct td043mtea1_panel *lcd)
+{
+ if (!lcd->powered_on)
+ return;
+
+ td043mtea1_write(lcd, 3, TPO_R03_VAL_STANDBY | TPO_R03_EN_PWM);
+
+ gpiod_set_value(lcd->reset_gpio, 1);
+
+ /* wait for at least 2 vsyncs before cutting off power */
+ msleep(50);
+
+ td043mtea1_write(lcd, 3, TPO_R03_VAL_STANDBY);
+
+ regulator_disable(lcd->vcc_reg);
+
+ lcd->powered_on = false;
+}
+
+/* -----------------------------------------------------------------------------
+ * sysfs
+ */
+
+static ssize_t vmirror_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct td043mtea1_panel *lcd = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", lcd->vmirror);
+}
+
+static ssize_t vmirror_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct td043mtea1_panel *lcd = dev_get_drvdata(dev);
+ int val;
+ int ret;
+
+ ret = kstrtoint(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ lcd->vmirror = !!val;
+
+ ret = td043mtea1_write_mirror(lcd);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct td043mtea1_panel *lcd = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", lcd->mode);
+}
+
+static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct td043mtea1_panel *lcd = dev_get_drvdata(dev);
+ long val;
+ int ret;
+
+ ret = kstrtol(buf, 0, &val);
+ if (ret != 0 || val & ~7)
+ return -EINVAL;
+
+ lcd->mode = val;
+
+ val |= TPO_R02_NCLK_RISING;
+ td043mtea1_write(lcd, 2, val);
+
+ return count;
+}
+
+static ssize_t gamma_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct td043mtea1_panel *lcd = dev_get_drvdata(dev);
+ ssize_t len = 0;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(lcd->gamma); i++) {
+ ret = snprintf(buf + len, PAGE_SIZE - len, "%u ",
+ lcd->gamma[i]);
+ if (ret < 0)
+ return ret;
+ len += ret;
+ }
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+static ssize_t gamma_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct td043mtea1_panel *lcd = dev_get_drvdata(dev);
+ unsigned int g[12];
+ unsigned int i;
+ int ret;
+
+ ret = sscanf(buf, "%u %u %u %u %u %u %u %u %u %u %u %u",
+ &g[0], &g[1], &g[2], &g[3], &g[4], &g[5],
+ &g[6], &g[7], &g[8], &g[9], &g[10], &g[11]);
+ if (ret != 12)
+ return -EINVAL;
+
+ for (i = 0; i < 12; i++)
+ lcd->gamma[i] = g[i];
+
+ td043mtea1_write_gamma(lcd);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(vmirror);
+static DEVICE_ATTR_RW(mode);
+static DEVICE_ATTR_RW(gamma);
+
+static struct attribute *td043mtea1_attrs[] = {
+ &dev_attr_vmirror.attr,
+ &dev_attr_mode.attr,
+ &dev_attr_gamma.attr,
+ NULL,
+};
+
+static const struct attribute_group td043mtea1_attr_group = {
+ .attrs = td043mtea1_attrs,
+};
+
+/* -----------------------------------------------------------------------------
+ * Panel Operations
+ */
+
+static int td043mtea1_unprepare(struct drm_panel *panel)
+{
+ struct td043mtea1_panel *lcd = to_td043mtea1_device(panel);
+
+ if (!lcd->spi_suspended)
+ td043mtea1_power_off(lcd);
+
+ return 0;
+}
+
+static int td043mtea1_prepare(struct drm_panel *panel)
+{
+ struct td043mtea1_panel *lcd = to_td043mtea1_device(panel);
+ int ret;
+
+ /*
+ * If we are resuming from system suspend, SPI might not be enabled
+ * yet, so we'll program the LCD from SPI PM resume callback.
+ */
+ if (lcd->spi_suspended)
+ return 0;
+
+ ret = td043mtea1_power_on(lcd);
+ if (ret) {
+ dev_err(&lcd->spi->dev, "%s: power on failed (%d)\n",
+ __func__, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct drm_display_mode td043mtea1_mode = {
+ .clock = 36000,
+ .hdisplay = 800,
+ .hsync_start = 800 + 68,
+ .hsync_end = 800 + 68 + 1,
+ .htotal = 800 + 68 + 1 + 214,
+ .vdisplay = 480,
+ .vsync_start = 480 + 39,
+ .vsync_end = 480 + 39 + 1,
+ .vtotal = 480 + 39 + 1 + 34,
+ .vrefresh = 60,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .width_mm = 94,
+ .height_mm = 56,
+};
+
+static int td043mtea1_get_modes(struct drm_panel *panel)
+{
+ struct drm_connector *connector = panel->connector;
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(panel->drm, &td043mtea1_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+
+ connector->display_info.width_mm = td043mtea1_mode.width_mm;
+ connector->display_info.height_mm = td043mtea1_mode.height_mm;
+ /*
+ * FIXME: According to the datasheet sync signals are sampled on the
+ * rising edge of the clock, but the code running on the OMAP3 Pandora
+ * indicates sampling on the falling edge. This should be tested on a
+ * real device.
+ */
+ connector->display_info.bus_flags = DRM_BUS_FLAG_DE_HIGH
+ | DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE
+ | DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs td043mtea1_funcs = {
+ .unprepare = td043mtea1_unprepare,
+ .prepare = td043mtea1_prepare,
+ .get_modes = td043mtea1_get_modes,
+};
+
+/* -----------------------------------------------------------------------------
+ * Power Management, Probe and Remove
+ */
+
+static int __maybe_unused td043mtea1_suspend(struct device *dev)
+{
+ struct td043mtea1_panel *lcd = dev_get_drvdata(dev);
+
+ if (lcd->powered_on) {
+ td043mtea1_power_off(lcd);
+ lcd->powered_on = true;
+ }
+
+ lcd->spi_suspended = true;
+
+ return 0;
+}
+
+static int __maybe_unused td043mtea1_resume(struct device *dev)
+{
+ struct td043mtea1_panel *lcd = dev_get_drvdata(dev);
+ int ret;
+
+ lcd->spi_suspended = false;
+
+ if (lcd->powered_on) {
+ lcd->powered_on = false;
+ ret = td043mtea1_power_on(lcd);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(td043mtea1_pm_ops, td043mtea1_suspend,
+ td043mtea1_resume);
+
+static int td043mtea1_probe(struct spi_device *spi)
+{
+ struct td043mtea1_panel *lcd;
+ int ret;
+
+ lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
+ if (lcd == NULL)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, lcd);
+ lcd->spi = spi;
+ lcd->mode = TPO_R02_MODE_800x480;
+ memcpy(lcd->gamma, td043mtea1_def_gamma, sizeof(lcd->gamma));
+
+ lcd->vcc_reg = devm_regulator_get(&spi->dev, "vcc");
+ if (IS_ERR(lcd->vcc_reg)) {
+ dev_err(&spi->dev, "failed to get VCC regulator\n");
+ return PTR_ERR(lcd->vcc_reg);
+ }
+
+ lcd->reset_gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(lcd->reset_gpio)) {
+ dev_err(&spi->dev, "failed to get reset GPIO\n");
+ return PTR_ERR(lcd->reset_gpio);
+ }
+
+ spi->bits_per_word = 16;
+ spi->mode = SPI_MODE_0;
+
+ ret = spi_setup(spi);
+ if (ret < 0) {
+ dev_err(&spi->dev, "failed to setup SPI: %d\n", ret);
+ return ret;
+ }
+
+ ret = sysfs_create_group(&spi->dev.kobj, &td043mtea1_attr_group);
+ if (ret < 0) {
+ dev_err(&spi->dev, "failed to create sysfs files\n");
+ return ret;
+ }
+
+ drm_panel_init(&lcd->panel);
+ lcd->panel.dev = &lcd->spi->dev;
+ lcd->panel.funcs = &td043mtea1_funcs;
+
+ ret = drm_panel_add(&lcd->panel);
+ if (ret < 0) {
+ sysfs_remove_group(&spi->dev.kobj, &td043mtea1_attr_group);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int td043mtea1_remove(struct spi_device *spi)
+{
+ struct td043mtea1_panel *lcd = spi_get_drvdata(spi);
+
+ drm_panel_remove(&lcd->panel);
+ drm_panel_disable(&lcd->panel);
+ drm_panel_unprepare(&lcd->panel);
+
+ sysfs_remove_group(&spi->dev.kobj, &td043mtea1_attr_group);
+
+ return 0;
+}
+
+static const struct of_device_id td043mtea1_of_match[] = {
+ { .compatible = "tpo,td043mtea1", },
+ { /* sentinel */ },
+};
+
+MODULE_DEVICE_TABLE(of, td043mtea1_of_match);
+
+static struct spi_driver td043mtea1_driver = {
+ .probe = td043mtea1_probe,
+ .remove = td043mtea1_remove,
+ .driver = {
+ .name = "panel-tpo-td043mtea1",
+ .pm = &td043mtea1_pm_ops,
+ .of_match_table = td043mtea1_of_match,
+ },
+};
+
+module_spi_driver(td043mtea1_driver);
+
+MODULE_ALIAS("spi:tpo,td043mtea1");
+MODULE_AUTHOR("Gražvydas Ignotas <[email protected]>");
+MODULE_DESCRIPTION("TPO TD043MTEA1 Panel Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panfrost/Makefile b/drivers/gpu/drm/panfrost/Makefile
index ecf0864cb515..b71935862417 100644
--- a/drivers/gpu/drm/panfrost/Makefile
+++ b/drivers/gpu/drm/panfrost/Makefile
@@ -5,6 +5,7 @@ panfrost-y := \
panfrost_device.o \
panfrost_devfreq.o \
panfrost_gem.o \
+ panfrost_gem_shrinker.o \
panfrost_gpu.o \
panfrost_job.o \
panfrost_mmu.o \
diff --git a/drivers/gpu/drm/panfrost/TODO b/drivers/gpu/drm/panfrost/TODO
index c2e44add37d8..536a0d4f8d29 100644
--- a/drivers/gpu/drm/panfrost/TODO
+++ b/drivers/gpu/drm/panfrost/TODO
@@ -6,22 +6,7 @@
- Bifrost specific feature and issue handling
- Coherent DMA support
-- Support for 2MB pages. The io-pgtable code already supports this. Finishing
- support involves either copying or adapting the iommu API to handle passing
- aligned addresses and sizes to the io-pgtable code.
-
-- Per FD address space support. The h/w supports multiple addresses spaces.
- The hard part is handling when more address spaces are needed than what
- the h/w provides.
-
-- Support pinning pages on demand (GPU page faults).
-
- Support userspace controlled GPU virtual addresses. Needed for Vulkan. (Tomeu)
-- Support for madvise and a shrinker.
-
- Compute job support. So called 'compute only' jobs need to be plumbed up to
userspace.
-
-- Performance counter support. (Boris)
-
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index a7c18bceb7fd..710d903f8e0d 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -39,7 +39,7 @@ static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
* If frequency scaling from low to high, adjust voltage first.
* If frequency scaling from high to low, adjust frequency first.
*/
- if (old_clk_rate < target_rate) {
+ if (old_clk_rate < target_rate && pfdev->regulator) {
err = regulator_set_voltage(pfdev->regulator, target_volt,
target_volt);
if (err) {
@@ -58,7 +58,7 @@ static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
return err;
}
- if (old_clk_rate > target_rate) {
+ if (old_clk_rate > target_rate && pfdev->regulator) {
err = regulator_set_voltage(pfdev->regulator, target_volt,
target_volt);
if (err)
@@ -136,9 +136,6 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
int ret;
struct dev_pm_opp *opp;
- if (!pfdev->regulator)
- return 0;
-
ret = dev_pm_opp_of_add_table(&pfdev->pdev->dev);
if (ret == -ENODEV) /* Optional, continue without devfreq */
return 0;
@@ -163,12 +160,18 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
DRM_DEV_ERROR(&pfdev->pdev->dev, "Couldn't initialize GPU devfreq\n");
ret = PTR_ERR(pfdev->devfreq.devfreq);
pfdev->devfreq.devfreq = NULL;
+ dev_pm_opp_of_remove_table(&pfdev->pdev->dev);
return ret;
}
return 0;
}
+void panfrost_devfreq_fini(struct panfrost_device *pfdev)
+{
+ dev_pm_opp_of_remove_table(&pfdev->pdev->dev);
+}
+
void panfrost_devfreq_resume(struct panfrost_device *pfdev)
{
int i;
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.h b/drivers/gpu/drm/panfrost/panfrost_devfreq.h
index eb999531ed90..e3bc63e82843 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.h
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.h
@@ -5,6 +5,7 @@
#define __PANFROST_DEVFREQ_H__
int panfrost_devfreq_init(struct panfrost_device *pfdev);
+void panfrost_devfreq_fini(struct panfrost_device *pfdev);
void panfrost_devfreq_resume(struct panfrost_device *pfdev);
void panfrost_devfreq_suspend(struct panfrost_device *pfdev);
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
index 8a111d7c0200..4da71bb56c20 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -123,8 +123,10 @@ int panfrost_device_init(struct panfrost_device *pfdev)
mutex_init(&pfdev->sched_lock);
mutex_init(&pfdev->reset_lock);
INIT_LIST_HEAD(&pfdev->scheduled_jobs);
+ INIT_LIST_HEAD(&pfdev->as_lru_list);
spin_lock_init(&pfdev->hwaccess_lock);
+ spin_lock_init(&pfdev->as_lock);
err = panfrost_clk_init(pfdev);
if (err) {
@@ -254,18 +256,22 @@ const char *panfrost_exception_name(struct panfrost_device *pfdev, u32 exception
return "UNKNOWN";
}
+void panfrost_device_reset(struct panfrost_device *pfdev)
+{
+ panfrost_gpu_soft_reset(pfdev);
+
+ panfrost_gpu_power_on(pfdev);
+ panfrost_mmu_reset(pfdev);
+ panfrost_job_enable_interrupts(pfdev);
+}
+
#ifdef CONFIG_PM
int panfrost_device_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct panfrost_device *pfdev = platform_get_drvdata(pdev);
- panfrost_gpu_soft_reset(pfdev);
-
- /* TODO: Re-enable all other address spaces */
- panfrost_gpu_power_on(pfdev);
- panfrost_mmu_enable(pfdev, 0);
- panfrost_job_enable_interrupts(pfdev);
+ panfrost_device_reset(pfdev);
panfrost_devfreq_resume(pfdev);
return 0;
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
index ea5948ff3647..f503c566e99f 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -5,6 +5,8 @@
#ifndef __PANFROST_DEVICE_H__
#define __PANFROST_DEVICE_H__
+#include <linux/atomic.h>
+#include <linux/io-pgtable.h>
#include <linux/spinlock.h>
#include <drm/drm_device.h>
#include <drm/drm_mm.h>
@@ -63,9 +65,6 @@ struct panfrost_device {
spinlock_t hwaccess_lock;
- struct drm_mm mm;
- spinlock_t mm_lock;
-
void __iomem *iomem;
struct clk *clock;
struct clk *bus_clock;
@@ -74,7 +73,11 @@ struct panfrost_device {
struct panfrost_features features;
- struct panfrost_mmu *mmu;
+ spinlock_t as_lock;
+ unsigned long as_in_use_mask;
+ unsigned long as_alloc_mask;
+ struct list_head as_lru_list;
+
struct panfrost_job_slot *js;
struct panfrost_job *jobs[NUM_JOB_SLOTS];
@@ -85,6 +88,10 @@ struct panfrost_device {
struct mutex sched_lock;
struct mutex reset_lock;
+ struct mutex shrinker_lock;
+ struct list_head shrinker_list;
+ struct shrinker shrinker;
+
struct {
struct devfreq *devfreq;
struct thermal_cooling_device *cooling;
@@ -94,10 +101,23 @@ struct panfrost_device {
} devfreq;
};
+struct panfrost_mmu {
+ struct io_pgtable_cfg pgtbl_cfg;
+ struct io_pgtable_ops *pgtbl_ops;
+ struct mutex lock;
+ int as;
+ atomic_t as_count;
+ struct list_head list;
+};
+
struct panfrost_file_priv {
struct panfrost_device *pfdev;
struct drm_sched_entity sched_entity[NUM_JOB_SLOTS];
+
+ struct panfrost_mmu mmu;
+ struct drm_mm mm;
+ spinlock_t mm_lock;
};
static inline struct panfrost_device *to_panfrost_device(struct drm_device *ddev)
@@ -128,6 +148,7 @@ int panfrost_unstable_ioctl_check(void);
int panfrost_device_init(struct panfrost_device *pfdev);
void panfrost_device_fini(struct panfrost_device *pfdev);
+void panfrost_device_reset(struct panfrost_device *pfdev);
int panfrost_device_resume(struct device *dev);
int panfrost_device_suspend(struct device *dev);
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index b187daa4da85..44a558c6e17e 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -78,29 +78,26 @@ static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct
static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
struct drm_file *file)
{
- int ret;
- struct drm_gem_shmem_object *shmem;
+ struct panfrost_gem_object *bo;
struct drm_panfrost_create_bo *args = data;
- if (!args->size || args->flags || args->pad)
+ if (!args->size || args->pad ||
+ (args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP)))
return -EINVAL;
- shmem = drm_gem_shmem_create_with_handle(file, dev, args->size,
- &args->handle);
- if (IS_ERR(shmem))
- return PTR_ERR(shmem);
+ /* Heaps should never be executable */
+ if ((args->flags & PANFROST_BO_HEAP) &&
+ !(args->flags & PANFROST_BO_NOEXEC))
+ return -EINVAL;
- ret = panfrost_mmu_map(to_panfrost_bo(&shmem->base));
- if (ret)
- goto err_free;
+ bo = panfrost_gem_create_with_handle(file, dev, args->size, args->flags,
+ &args->handle);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
- args->offset = to_panfrost_bo(&shmem->base)->node.start << PAGE_SHIFT;
+ args->offset = bo->node.start << PAGE_SHIFT;
return 0;
-
-err_free:
- drm_gem_handle_delete(file, args->handle);
- return ret;
}
/**
@@ -277,7 +274,7 @@ panfrost_ioctl_wait_bo(struct drm_device *dev, void *data,
if (!gem_obj)
return -ENOENT;
- ret = reservation_object_wait_timeout_rcu(gem_obj->resv, true,
+ ret = dma_resv_wait_timeout_rcu(gem_obj->resv, true,
true, timeout);
if (!ret)
ret = timeout ? -ETIMEDOUT : -EBUSY;
@@ -305,6 +302,10 @@ static int panfrost_ioctl_mmap_bo(struct drm_device *dev, void *data,
return -ENOENT;
}
+ /* Don't allow mmapping of heap objects as pages are not pinned. */
+ if (to_panfrost_bo(gem_obj)->is_heap)
+ return -EINVAL;
+
ret = drm_gem_create_mmap_offset(gem_obj);
if (ret == 0)
args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
@@ -333,6 +334,38 @@ static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
return 0;
}
+static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_panfrost_madvise *args = data;
+ struct panfrost_device *pfdev = dev->dev_private;
+ struct drm_gem_object *gem_obj;
+
+ gem_obj = drm_gem_object_lookup(file_priv, args->handle);
+ if (!gem_obj) {
+ DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
+ return -ENOENT;
+ }
+
+ args->retained = drm_gem_shmem_madvise(gem_obj, args->madv);
+
+ if (args->retained) {
+ struct panfrost_gem_object *bo = to_panfrost_bo(gem_obj);
+
+ mutex_lock(&pfdev->shrinker_lock);
+
+ if (args->madv == PANFROST_MADV_DONTNEED)
+ list_add_tail(&bo->base.madv_list, &pfdev->shrinker_list);
+ else if (args->madv == PANFROST_MADV_WILLNEED)
+ list_del_init(&bo->base.madv_list);
+
+ mutex_unlock(&pfdev->shrinker_lock);
+ }
+
+ drm_gem_object_put_unlocked(gem_obj);
+ return 0;
+}
+
int panfrost_unstable_ioctl_check(void)
{
if (!unstable_ioctls)
@@ -341,9 +374,36 @@ int panfrost_unstable_ioctl_check(void)
return 0;
}
+#define PFN_4G (SZ_4G >> PAGE_SHIFT)
+#define PFN_4G_MASK (PFN_4G - 1)
+#define PFN_16M (SZ_16M >> PAGE_SHIFT)
+
+static void panfrost_drm_mm_color_adjust(const struct drm_mm_node *node,
+ unsigned long color,
+ u64 *start, u64 *end)
+{
+ /* Executable buffers can't start or end on a 4GB boundary */
+ if (!(color & PANFROST_BO_NOEXEC)) {
+ u64 next_seg;
+
+ if ((*start & PFN_4G_MASK) == 0)
+ (*start)++;
+
+ if ((*end & PFN_4G_MASK) == 0)
+ (*end)--;
+
+ next_seg = ALIGN(*start, PFN_4G);
+ if (next_seg - *start <= PFN_16M)
+ *start = next_seg + 1;
+
+ *end = min(*end, ALIGN(*start, PFN_4G) - 1);
+ }
+}
+
static int
panfrost_open(struct drm_device *dev, struct drm_file *file)
{
+ int ret;
struct panfrost_device *pfdev = dev->dev_private;
struct panfrost_file_priv *panfrost_priv;
@@ -354,7 +414,28 @@ panfrost_open(struct drm_device *dev, struct drm_file *file)
panfrost_priv->pfdev = pfdev;
file->driver_priv = panfrost_priv;
- return panfrost_job_open(panfrost_priv);
+ spin_lock_init(&panfrost_priv->mm_lock);
+
+ /* 4G enough for now. can be 48-bit */
+ drm_mm_init(&panfrost_priv->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT);
+ panfrost_priv->mm.color_adjust = panfrost_drm_mm_color_adjust;
+
+ ret = panfrost_mmu_pgtable_alloc(panfrost_priv);
+ if (ret)
+ goto err_pgtable;
+
+ ret = panfrost_job_open(panfrost_priv);
+ if (ret)
+ goto err_job;
+
+ return 0;
+
+err_job:
+ panfrost_mmu_pgtable_free(panfrost_priv);
+err_pgtable:
+ drm_mm_takedown(&panfrost_priv->mm);
+ kfree(panfrost_priv);
+ return ret;
}
static void
@@ -365,6 +446,8 @@ panfrost_postclose(struct drm_device *dev, struct drm_file *file)
panfrost_perfcnt_close(panfrost_priv);
panfrost_job_close(panfrost_priv);
+ panfrost_mmu_pgtable_free(panfrost_priv);
+ drm_mm_takedown(&panfrost_priv->mm);
kfree(panfrost_priv);
}
@@ -384,10 +467,16 @@ static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = {
PANFROST_IOCTL(GET_BO_OFFSET, get_bo_offset, DRM_RENDER_ALLOW),
PANFROST_IOCTL(PERFCNT_ENABLE, perfcnt_enable, DRM_RENDER_ALLOW),
PANFROST_IOCTL(PERFCNT_DUMP, perfcnt_dump, DRM_RENDER_ALLOW),
+ PANFROST_IOCTL(MADVISE, madvise, DRM_RENDER_ALLOW),
};
DEFINE_DRM_GEM_SHMEM_FOPS(panfrost_drm_driver_fops);
+/*
+ * Panfrost driver version:
+ * - 1.0 - initial interface
+ * - 1.1 - adds HEAP and NOEXEC flags for CREATE_BO
+ */
static struct drm_driver panfrost_drm_driver = {
.driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ,
.open = panfrost_open,
@@ -399,7 +488,7 @@ static struct drm_driver panfrost_drm_driver = {
.desc = "panfrost DRM",
.date = "20180908",
.major = 1,
- .minor = 0,
+ .minor = 1,
.gem_create_object = panfrost_gem_create_object,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
@@ -431,10 +520,8 @@ static int panfrost_probe(struct platform_device *pdev)
ddev->dev_private = pfdev;
pfdev->ddev = ddev;
- spin_lock_init(&pfdev->mm_lock);
-
- /* 4G enough for now. can be 48-bit */
- drm_mm_init(&pfdev->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT);
+ mutex_init(&pfdev->shrinker_lock);
+ INIT_LIST_HEAD(&pfdev->shrinker_list);
pm_runtime_use_autosuspend(pfdev->dev);
pm_runtime_set_autosuspend_delay(pfdev->dev, 50); /* ~3 frames */
@@ -460,10 +547,14 @@ static int panfrost_probe(struct platform_device *pdev)
*/
err = drm_dev_register(ddev, 0);
if (err < 0)
- goto err_out1;
+ goto err_out2;
+
+ panfrost_gem_shrinker_init(ddev);
return 0;
+err_out2:
+ panfrost_devfreq_fini(pfdev);
err_out1:
panfrost_device_fini(pfdev);
err_out0:
@@ -478,9 +569,11 @@ static int panfrost_remove(struct platform_device *pdev)
struct drm_device *ddev = pfdev->ddev;
drm_dev_unregister(ddev);
+ panfrost_gem_shrinker_cleanup(ddev);
pm_runtime_get_sync(pfdev->dev);
pm_runtime_put_sync_autosuspend(pfdev->dev);
pm_runtime_disable(pfdev->dev);
+ panfrost_devfreq_fini(pfdev);
panfrost_device_fini(pfdev);
drm_dev_put(ddev);
return 0;
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index 543ab1b81bd5..acb07fe06580 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -19,20 +19,95 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
struct panfrost_device *pfdev = obj->dev->dev_private;
+ if (bo->sgts) {
+ int i;
+ int n_sgt = bo->base.base.size / SZ_2M;
+
+ for (i = 0; i < n_sgt; i++) {
+ if (bo->sgts[i].sgl) {
+ dma_unmap_sg(pfdev->dev, bo->sgts[i].sgl,
+ bo->sgts[i].nents, DMA_BIDIRECTIONAL);
+ sg_free_table(&bo->sgts[i]);
+ }
+ }
+ kfree(bo->sgts);
+ }
+
+ mutex_lock(&pfdev->shrinker_lock);
+ if (!list_empty(&bo->base.madv_list))
+ list_del(&bo->base.madv_list);
+ mutex_unlock(&pfdev->shrinker_lock);
+
+ drm_gem_shmem_free_object(obj);
+}
+
+static int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
+{
+ int ret;
+ size_t size = obj->size;
+ u64 align;
+ struct panfrost_gem_object *bo = to_panfrost_bo(obj);
+ unsigned long color = bo->noexec ? PANFROST_BO_NOEXEC : 0;
+ struct panfrost_file_priv *priv = file_priv->driver_priv;
+
+ /*
+ * Executable buffers cannot cross a 16MB boundary as the program
+ * counter is 24-bits. We assume executable buffers will be less than
+ * 16MB and aligning executable buffers to their size will avoid
+ * crossing a 16MB boundary.
+ */
+ if (!bo->noexec)
+ align = size >> PAGE_SHIFT;
+ else
+ align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0;
+
+ bo->mmu = &priv->mmu;
+ spin_lock(&priv->mm_lock);
+ ret = drm_mm_insert_node_generic(&priv->mm, &bo->node,
+ size >> PAGE_SHIFT, align, color, 0);
+ spin_unlock(&priv->mm_lock);
+ if (ret)
+ return ret;
+
+ if (!bo->is_heap) {
+ ret = panfrost_mmu_map(bo);
+ if (ret) {
+ spin_lock(&priv->mm_lock);
+ drm_mm_remove_node(&bo->node);
+ spin_unlock(&priv->mm_lock);
+ }
+ }
+ return ret;
+}
+
+static void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
+{
+ struct panfrost_gem_object *bo = to_panfrost_bo(obj);
+ struct panfrost_file_priv *priv = file_priv->driver_priv;
+
if (bo->is_mapped)
panfrost_mmu_unmap(bo);
- spin_lock(&pfdev->mm_lock);
- drm_mm_remove_node(&bo->node);
- spin_unlock(&pfdev->mm_lock);
+ spin_lock(&priv->mm_lock);
+ if (drm_mm_node_allocated(&bo->node))
+ drm_mm_remove_node(&bo->node);
+ spin_unlock(&priv->mm_lock);
+}
- drm_gem_shmem_free_object(obj);
+static int panfrost_gem_pin(struct drm_gem_object *obj)
+{
+ if (to_panfrost_bo(obj)->is_heap)
+ return -EINVAL;
+
+ return drm_gem_shmem_pin(obj);
}
static const struct drm_gem_object_funcs panfrost_gem_funcs = {
.free = panfrost_gem_free_object,
+ .open = panfrost_gem_open,
+ .close = panfrost_gem_close,
.print_info = drm_gem_shmem_print_info,
- .pin = drm_gem_shmem_pin,
+ .pin = panfrost_gem_pin,
.unpin = drm_gem_shmem_unpin,
.get_sg_table = drm_gem_shmem_get_sg_table,
.vmap = drm_gem_shmem_vmap,
@@ -50,10 +125,7 @@ static const struct drm_gem_object_funcs panfrost_gem_funcs = {
*/
struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size)
{
- int ret;
- struct panfrost_device *pfdev = dev->dev_private;
struct panfrost_gem_object *obj;
- u64 align;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj)
@@ -61,21 +133,42 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t
obj->base.base.funcs = &panfrost_gem_funcs;
- size = roundup(size, PAGE_SIZE);
- align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0;
+ return &obj->base.base;
+}
- spin_lock(&pfdev->mm_lock);
- ret = drm_mm_insert_node_generic(&pfdev->mm, &obj->node,
- size >> PAGE_SHIFT, align, 0, 0);
- spin_unlock(&pfdev->mm_lock);
+struct panfrost_gem_object *
+panfrost_gem_create_with_handle(struct drm_file *file_priv,
+ struct drm_device *dev, size_t size,
+ u32 flags,
+ uint32_t *handle)
+{
+ int ret;
+ struct drm_gem_shmem_object *shmem;
+ struct panfrost_gem_object *bo;
+
+ /* Round up heap allocations to 2MB to keep fault handling simple */
+ if (flags & PANFROST_BO_HEAP)
+ size = roundup(size, SZ_2M);
+
+ shmem = drm_gem_shmem_create(dev, size);
+ if (IS_ERR(shmem))
+ return ERR_CAST(shmem);
+
+ bo = to_panfrost_bo(&shmem->base);
+ bo->noexec = !!(flags & PANFROST_BO_NOEXEC);
+ bo->is_heap = !!(flags & PANFROST_BO_HEAP);
+
+ /*
+ * Allocate an id of idr table where the obj is registered
+ * and handle has the id what user can see.
+ */
+ ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
+ /* drop reference from allocate - handle holds it now. */
+ drm_gem_object_put_unlocked(&shmem->base);
if (ret)
- goto free_obj;
+ return ERR_PTR(ret);
- return &obj->base.base;
-
-free_obj:
- kfree(obj);
- return ERR_PTR(ret);
+ return bo;
}
struct drm_gem_object *
@@ -84,15 +177,14 @@ panfrost_gem_prime_import_sg_table(struct drm_device *dev,
struct sg_table *sgt)
{
struct drm_gem_object *obj;
- struct panfrost_gem_object *pobj;
+ struct panfrost_gem_object *bo;
obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
if (IS_ERR(obj))
return ERR_CAST(obj);
- pobj = to_panfrost_bo(obj);
-
- panfrost_mmu_map(pobj);
+ bo = to_panfrost_bo(obj);
+ bo->noexec = true;
return obj;
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h
index 6dbcaba020fc..50920819cc16 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.h
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
@@ -7,11 +7,17 @@
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_mm.h>
+struct panfrost_mmu;
+
struct panfrost_gem_object {
struct drm_gem_shmem_object base;
+ struct sg_table *sgts;
+ struct panfrost_mmu *mmu;
struct drm_mm_node node;
- bool is_mapped;
+ bool is_mapped :1;
+ bool noexec :1;
+ bool is_heap :1;
};
static inline
@@ -20,6 +26,12 @@ struct panfrost_gem_object *to_panfrost_bo(struct drm_gem_object *obj)
return container_of(to_drm_gem_shmem_obj(obj), struct panfrost_gem_object, base);
}
+static inline
+struct panfrost_gem_object *drm_mm_node_to_panfrost_bo(struct drm_mm_node *node)
+{
+ return container_of(node, struct panfrost_gem_object, node);
+}
+
struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size);
struct drm_gem_object *
@@ -27,4 +39,13 @@ panfrost_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt);
+struct panfrost_gem_object *
+panfrost_gem_create_with_handle(struct drm_file *file_priv,
+ struct drm_device *dev, size_t size,
+ u32 flags,
+ uint32_t *handle);
+
+void panfrost_gem_shrinker_init(struct drm_device *dev);
+void panfrost_gem_shrinker_cleanup(struct drm_device *dev);
+
#endif /* __PANFROST_GEM_H__ */
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
new file mode 100644
index 000000000000..d191632b6197
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019 Arm Ltd.
+ *
+ * Based on msm_gem_freedreno.c:
+ * Copyright (C) 2016 Red Hat
+ * Author: Rob Clark <[email protected]>
+ */
+
+#include <linux/list.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_gem_shmem_helper.h>
+
+#include "panfrost_device.h"
+#include "panfrost_gem.h"
+#include "panfrost_mmu.h"
+
+static unsigned long
+panfrost_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
+{
+ struct panfrost_device *pfdev =
+ container_of(shrinker, struct panfrost_device, shrinker);
+ struct drm_gem_shmem_object *shmem;
+ unsigned long count = 0;
+
+ if (!mutex_trylock(&pfdev->shrinker_lock))
+ return 0;
+
+ list_for_each_entry(shmem, &pfdev->shrinker_list, madv_list) {
+ if (drm_gem_shmem_is_purgeable(shmem))
+ count += shmem->base.size >> PAGE_SHIFT;
+ }
+
+ mutex_unlock(&pfdev->shrinker_lock);
+
+ return count;
+}
+
+static void panfrost_gem_purge(struct drm_gem_object *obj)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+ mutex_lock(&shmem->pages_lock);
+
+ panfrost_mmu_unmap(to_panfrost_bo(obj));
+ drm_gem_shmem_purge_locked(obj);
+
+ mutex_unlock(&shmem->pages_lock);
+}
+
+static unsigned long
+panfrost_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
+{
+ struct panfrost_device *pfdev =
+ container_of(shrinker, struct panfrost_device, shrinker);
+ struct drm_gem_shmem_object *shmem, *tmp;
+ unsigned long freed = 0;
+
+ if (!mutex_trylock(&pfdev->shrinker_lock))
+ return SHRINK_STOP;
+
+ list_for_each_entry_safe(shmem, tmp, &pfdev->shrinker_list, madv_list) {
+ if (freed >= sc->nr_to_scan)
+ break;
+ if (drm_gem_shmem_is_purgeable(shmem)) {
+ panfrost_gem_purge(&shmem->base);
+ freed += shmem->base.size >> PAGE_SHIFT;
+ list_del_init(&shmem->madv_list);
+ }
+ }
+
+ mutex_unlock(&pfdev->shrinker_lock);
+
+ if (freed > 0)
+ pr_info_ratelimited("Purging %lu bytes\n", freed << PAGE_SHIFT);
+
+ return freed;
+}
+
+/**
+ * panfrost_gem_shrinker_init - Initialize panfrost shrinker
+ * @dev: DRM device
+ *
+ * This function registers and sets up the panfrost shrinker.
+ */
+void panfrost_gem_shrinker_init(struct drm_device *dev)
+{
+ struct panfrost_device *pfdev = dev->dev_private;
+ pfdev->shrinker.count_objects = panfrost_gem_shrinker_count;
+ pfdev->shrinker.scan_objects = panfrost_gem_shrinker_scan;
+ pfdev->shrinker.seeks = DEFAULT_SEEKS;
+ WARN_ON(register_shrinker(&pfdev->shrinker));
+}
+
+/**
+ * panfrost_gem_shrinker_cleanup - Clean up panfrost shrinker
+ * @dev: DRM device
+ *
+ * This function unregisters the panfrost shrinker.
+ */
+void panfrost_gem_shrinker_cleanup(struct drm_device *dev)
+{
+ struct panfrost_device *pfdev = dev->dev_private;
+
+ if (pfdev->shrinker.nr_deferred) {
+ unregister_shrinker(&pfdev->shrinker);
+ }
+}
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 9bb9260d9181..05c85f45a0de 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -6,7 +6,7 @@
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
#include <drm/gpu_scheduler.h>
#include <drm/panfrost_drm.h>
@@ -153,6 +153,8 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
if (WARN_ON(job_read(pfdev, JS_COMMAND_NEXT(js))))
goto end;
+ cfg = panfrost_mmu_as_get(pfdev, &job->file_priv->mmu);
+
panfrost_devfreq_record_transition(pfdev, js);
spin_lock_irqsave(&pfdev->hwaccess_lock, flags);
@@ -163,8 +165,7 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
/* start MMU, medium priority, cache clean/flush on end, clean/flush on
* start */
- /* TODO: different address spaces */
- cfg = JS_CONFIG_THREAD_PRI(8) |
+ cfg |= JS_CONFIG_THREAD_PRI(8) |
JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE |
JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE;
@@ -199,7 +200,7 @@ static void panfrost_acquire_object_fences(struct drm_gem_object **bos,
int i;
for (i = 0; i < bo_count; i++)
- implicit_fences[i] = reservation_object_get_excl_rcu(bos[i]->resv);
+ implicit_fences[i] = dma_resv_get_excl_rcu(bos[i]->resv);
}
static void panfrost_attach_object_fences(struct drm_gem_object **bos,
@@ -209,7 +210,7 @@ static void panfrost_attach_object_fences(struct drm_gem_object **bos,
int i;
for (i = 0; i < bo_count; i++)
- reservation_object_add_excl_fence(bos[i]->resv, fence);
+ dma_resv_add_excl_fence(bos[i]->resv, fence);
}
int panfrost_job_push(struct panfrost_job *job)
@@ -377,8 +378,9 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job)
if (dma_fence_is_signaled(job->done_fence))
return;
- dev_err(pfdev->dev, "gpu sched timeout, js=%d, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p",
+ dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p",
js,
+ job_read(pfdev, JS_CONFIG(js)),
job_read(pfdev, JS_STATUS(js)),
job_read(pfdev, JS_HEAD_LO(js)),
job_read(pfdev, JS_TAIL_LO(js)),
@@ -395,12 +397,7 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job)
/* panfrost_core_dump(pfdev); */
panfrost_devfreq_record_transition(pfdev, js);
- panfrost_gpu_soft_reset(pfdev);
-
- /* TODO: Re-enable all other address spaces */
- panfrost_mmu_enable(pfdev, 0);
- panfrost_gpu_power_on(pfdev);
- panfrost_job_enable_interrupts(pfdev);
+ panfrost_device_reset(pfdev);
for (i = 0; i < NUM_JOB_SLOTS; i++)
drm_sched_resubmit_jobs(&pfdev->js->queue[i].sched);
@@ -453,8 +450,12 @@ static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
}
if (status & JOB_INT_MASK_DONE(j)) {
+ struct panfrost_job *job = pfdev->jobs[j];
+
+ pfdev->jobs[j] = NULL;
+ panfrost_mmu_as_put(pfdev, &job->file_priv->mmu);
panfrost_devfreq_record_transition(pfdev, j);
- dma_fence_signal(pfdev->jobs[j]->done_fence);
+ dma_fence_signal(job->done_fence);
}
status &= ~mask;
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 92ac995dd9c6..842bdd7cf6be 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -1,7 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2019 Linaro, Ltd, Rob Herring <[email protected]> */
+#include <linux/atomic.h>
#include <linux/bitfield.h>
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
@@ -9,6 +11,7 @@
#include <linux/iommu.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/shmem_fs.h>
#include <linux/sizes.h>
#include "panfrost_device.h"
@@ -20,12 +23,6 @@
#define mmu_write(dev, reg, data) writel(data, dev->iomem + reg)
#define mmu_read(dev, reg) readl(dev->iomem + reg)
-struct panfrost_mmu {
- struct io_pgtable_cfg pgtbl_cfg;
- struct io_pgtable_ops *pgtbl_ops;
- struct mutex lock;
-};
-
static int wait_ready(struct panfrost_device *pfdev, u32 as_nr)
{
int ret;
@@ -83,13 +80,19 @@ static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
}
-static int mmu_hw_do_operation(struct panfrost_device *pfdev, u32 as_nr,
- u64 iova, size_t size, u32 op)
+static int mmu_hw_do_operation(struct panfrost_device *pfdev,
+ struct panfrost_mmu *mmu,
+ u64 iova, size_t size, u32 op)
{
- unsigned long flags;
- int ret;
+ int ret, as_nr;
- spin_lock_irqsave(&pfdev->hwaccess_lock, flags);
+ spin_lock(&pfdev->as_lock);
+ as_nr = mmu->as;
+
+ if (as_nr < 0) {
+ spin_unlock(&pfdev->as_lock);
+ return 0;
+ }
if (op != AS_COMMAND_UNLOCK)
lock_region(pfdev, as_nr, iova, size);
@@ -100,20 +103,18 @@ static int mmu_hw_do_operation(struct panfrost_device *pfdev, u32 as_nr,
/* Wait for the flush to complete */
ret = wait_ready(pfdev, as_nr);
- spin_unlock_irqrestore(&pfdev->hwaccess_lock, flags);
+ spin_unlock(&pfdev->as_lock);
return ret;
}
-void panfrost_mmu_enable(struct panfrost_device *pfdev, u32 as_nr)
+static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
{
- struct io_pgtable_cfg *cfg = &pfdev->mmu->pgtbl_cfg;
+ int as_nr = mmu->as;
+ struct io_pgtable_cfg *cfg = &mmu->pgtbl_cfg;
u64 transtab = cfg->arm_mali_lpae_cfg.transtab;
u64 memattr = cfg->arm_mali_lpae_cfg.memattr;
- mmu_write(pfdev, MMU_INT_CLEAR, ~0);
- mmu_write(pfdev, MMU_INT_MASK, ~0);
-
mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xffffffffUL);
mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32);
@@ -137,6 +138,80 @@ static void mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
}
+u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
+{
+ int as;
+
+ spin_lock(&pfdev->as_lock);
+
+ as = mmu->as;
+ if (as >= 0) {
+ int en = atomic_inc_return(&mmu->as_count);
+ WARN_ON(en >= NUM_JOB_SLOTS);
+
+ list_move(&mmu->list, &pfdev->as_lru_list);
+ goto out;
+ }
+
+ /* Check for a free AS */
+ as = ffz(pfdev->as_alloc_mask);
+ if (!(BIT(as) & pfdev->features.as_present)) {
+ struct panfrost_mmu *lru_mmu;
+
+ list_for_each_entry_reverse(lru_mmu, &pfdev->as_lru_list, list) {
+ if (!atomic_read(&lru_mmu->as_count))
+ break;
+ }
+ WARN_ON(&lru_mmu->list == &pfdev->as_lru_list);
+
+ list_del_init(&lru_mmu->list);
+ as = lru_mmu->as;
+
+ WARN_ON(as < 0);
+ lru_mmu->as = -1;
+ }
+
+ /* Assign the free or reclaimed AS to the FD */
+ mmu->as = as;
+ set_bit(as, &pfdev->as_alloc_mask);
+ atomic_set(&mmu->as_count, 1);
+ list_add(&mmu->list, &pfdev->as_lru_list);
+
+ dev_dbg(pfdev->dev, "Assigned AS%d to mmu %p, alloc_mask=%lx", as, mmu, pfdev->as_alloc_mask);
+
+ panfrost_mmu_enable(pfdev, mmu);
+
+out:
+ spin_unlock(&pfdev->as_lock);
+ return as;
+}
+
+void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
+{
+ atomic_dec(&mmu->as_count);
+ WARN_ON(atomic_read(&mmu->as_count) < 0);
+}
+
+void panfrost_mmu_reset(struct panfrost_device *pfdev)
+{
+ struct panfrost_mmu *mmu, *mmu_tmp;
+
+ spin_lock(&pfdev->as_lock);
+
+ pfdev->as_alloc_mask = 0;
+
+ list_for_each_entry_safe(mmu, mmu_tmp, &pfdev->as_lru_list, list) {
+ mmu->as = -1;
+ atomic_set(&mmu->as_count, 0);
+ list_del_init(&mmu->list);
+ }
+
+ spin_unlock(&pfdev->as_lock);
+
+ mmu_write(pfdev, MMU_INT_CLEAR, ~0);
+ mmu_write(pfdev, MMU_INT_MASK, ~0);
+}
+
static size_t get_pgsize(u64 addr, size_t size)
{
if (addr & (SZ_2M - 1) || size < SZ_2M)
@@ -145,50 +220,63 @@ static size_t get_pgsize(u64 addr, size_t size)
return SZ_2M;
}
-int panfrost_mmu_map(struct panfrost_gem_object *bo)
+static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
+ u64 iova, int prot, struct sg_table *sgt)
{
- struct drm_gem_object *obj = &bo->base.base;
- struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
- struct io_pgtable_ops *ops = pfdev->mmu->pgtbl_ops;
- u64 iova = bo->node.start << PAGE_SHIFT;
unsigned int count;
struct scatterlist *sgl;
- struct sg_table *sgt;
- int ret;
+ struct io_pgtable_ops *ops = mmu->pgtbl_ops;
+ u64 start_iova = iova;
- if (WARN_ON(bo->is_mapped))
- return 0;
-
- sgt = drm_gem_shmem_get_pages_sgt(obj);
- if (WARN_ON(IS_ERR(sgt)))
- return PTR_ERR(sgt);
-
- ret = pm_runtime_get_sync(pfdev->dev);
- if (ret < 0)
- return ret;
-
- mutex_lock(&pfdev->mmu->lock);
+ mutex_lock(&mmu->lock);
for_each_sg(sgt->sgl, sgl, sgt->nents, count) {
unsigned long paddr = sg_dma_address(sgl);
size_t len = sg_dma_len(sgl);
- dev_dbg(pfdev->dev, "map: iova=%llx, paddr=%lx, len=%zx", iova, paddr, len);
+ dev_dbg(pfdev->dev, "map: as=%d, iova=%llx, paddr=%lx, len=%zx", mmu->as, iova, paddr, len);
while (len) {
size_t pgsize = get_pgsize(iova | paddr, len);
- ops->map(ops, iova, paddr, pgsize, IOMMU_WRITE | IOMMU_READ);
+ ops->map(ops, iova, paddr, pgsize, prot);
iova += pgsize;
paddr += pgsize;
len -= pgsize;
}
}
- mmu_hw_do_operation(pfdev, 0, bo->node.start << PAGE_SHIFT,
- bo->node.size << PAGE_SHIFT, AS_COMMAND_FLUSH_PT);
+ mmu_hw_do_operation(pfdev, mmu, start_iova, iova - start_iova,
+ AS_COMMAND_FLUSH_PT);
- mutex_unlock(&pfdev->mmu->lock);
+ mutex_unlock(&mmu->lock);
+
+ return 0;
+}
+
+int panfrost_mmu_map(struct panfrost_gem_object *bo)
+{
+ struct drm_gem_object *obj = &bo->base.base;
+ struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
+ struct sg_table *sgt;
+ int ret;
+ int prot = IOMMU_READ | IOMMU_WRITE;
+
+ if (WARN_ON(bo->is_mapped))
+ return 0;
+
+ if (bo->noexec)
+ prot |= IOMMU_NOEXEC;
+
+ sgt = drm_gem_shmem_get_pages_sgt(obj);
+ if (WARN_ON(IS_ERR(sgt)))
+ return PTR_ERR(sgt);
+
+ ret = pm_runtime_get_sync(pfdev->dev);
+ if (ret < 0)
+ return ret;
+
+ mmu_map_sg(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, prot, sgt);
pm_runtime_mark_last_busy(pfdev->dev);
pm_runtime_put_autosuspend(pfdev->dev);
@@ -201,7 +289,7 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
{
struct drm_gem_object *obj = &bo->base.base;
struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
- struct io_pgtable_ops *ops = pfdev->mmu->pgtbl_ops;
+ struct io_pgtable_ops *ops = bo->mmu->pgtbl_ops;
u64 iova = bo->node.start << PAGE_SHIFT;
size_t len = bo->node.size << PAGE_SHIFT;
size_t unmapped_len = 0;
@@ -210,30 +298,30 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
if (WARN_ON(!bo->is_mapped))
return;
- dev_dbg(pfdev->dev, "unmap: iova=%llx, len=%zx", iova, len);
+ dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx", bo->mmu->as, iova, len);
ret = pm_runtime_get_sync(pfdev->dev);
if (ret < 0)
return;
- mutex_lock(&pfdev->mmu->lock);
+ mutex_lock(&bo->mmu->lock);
while (unmapped_len < len) {
size_t unmapped_page;
size_t pgsize = get_pgsize(iova, len - unmapped_len);
- unmapped_page = ops->unmap(ops, iova, pgsize);
- if (!unmapped_page)
- break;
-
- iova += unmapped_page;
- unmapped_len += unmapped_page;
+ if (ops->iova_to_phys(ops, iova)) {
+ unmapped_page = ops->unmap(ops, iova, pgsize);
+ WARN_ON(unmapped_page != pgsize);
+ }
+ iova += pgsize;
+ unmapped_len += pgsize;
}
- mmu_hw_do_operation(pfdev, 0, bo->node.start << PAGE_SHIFT,
+ mmu_hw_do_operation(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT,
bo->node.size << PAGE_SHIFT, AS_COMMAND_FLUSH_PT);
- mutex_unlock(&pfdev->mmu->lock);
+ mutex_unlock(&bo->mmu->lock);
pm_runtime_mark_last_busy(pfdev->dev);
pm_runtime_put_autosuspend(pfdev->dev);
@@ -242,9 +330,9 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
static void mmu_tlb_inv_context_s1(void *cookie)
{
- struct panfrost_device *pfdev = cookie;
+ struct panfrost_file_priv *priv = cookie;
- mmu_hw_do_operation(pfdev, 0, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
+ mmu_hw_do_operation(priv->pfdev, &priv->mmu, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
}
static void mmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
@@ -263,6 +351,163 @@ static const struct iommu_gather_ops mmu_tlb_ops = {
.tlb_sync = mmu_tlb_sync_context,
};
+int panfrost_mmu_pgtable_alloc(struct panfrost_file_priv *priv)
+{
+ struct panfrost_mmu *mmu = &priv->mmu;
+ struct panfrost_device *pfdev = priv->pfdev;
+
+ mutex_init(&mmu->lock);
+ INIT_LIST_HEAD(&mmu->list);
+ mmu->as = -1;
+
+ mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
+ .pgsize_bitmap = SZ_4K | SZ_2M,
+ .ias = FIELD_GET(0xff, pfdev->features.mmu_features),
+ .oas = FIELD_GET(0xff00, pfdev->features.mmu_features),
+ .tlb = &mmu_tlb_ops,
+ .iommu_dev = pfdev->dev,
+ };
+
+ mmu->pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &mmu->pgtbl_cfg,
+ priv);
+ if (!mmu->pgtbl_ops)
+ return -EINVAL;
+
+ return 0;
+}
+
+void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv)
+{
+ struct panfrost_device *pfdev = priv->pfdev;
+ struct panfrost_mmu *mmu = &priv->mmu;
+
+ spin_lock(&pfdev->as_lock);
+ if (mmu->as >= 0) {
+ clear_bit(mmu->as, &pfdev->as_alloc_mask);
+ clear_bit(mmu->as, &pfdev->as_in_use_mask);
+ list_del(&mmu->list);
+ }
+ spin_unlock(&pfdev->as_lock);
+
+ free_io_pgtable_ops(mmu->pgtbl_ops);
+}
+
+static struct drm_mm_node *addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr)
+{
+ struct drm_mm_node *node = NULL;
+ u64 offset = addr >> PAGE_SHIFT;
+ struct panfrost_mmu *mmu;
+
+ spin_lock(&pfdev->as_lock);
+ list_for_each_entry(mmu, &pfdev->as_lru_list, list) {
+ struct panfrost_file_priv *priv;
+ if (as != mmu->as)
+ continue;
+
+ priv = container_of(mmu, struct panfrost_file_priv, mmu);
+ drm_mm_for_each_node(node, &priv->mm) {
+ if (offset >= node->start && offset < (node->start + node->size))
+ goto out;
+ }
+ }
+
+out:
+ spin_unlock(&pfdev->as_lock);
+ return node;
+}
+
+#define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
+
+int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr)
+{
+ int ret, i;
+ struct drm_mm_node *node;
+ struct panfrost_gem_object *bo;
+ struct address_space *mapping;
+ pgoff_t page_offset;
+ struct sg_table *sgt;
+ struct page **pages;
+
+ node = addr_to_drm_mm_node(pfdev, as, addr);
+ if (!node)
+ return -ENOENT;
+
+ bo = drm_mm_node_to_panfrost_bo(node);
+ if (!bo->is_heap) {
+ dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)",
+ node->start << PAGE_SHIFT);
+ return -EINVAL;
+ }
+ WARN_ON(bo->mmu->as != as);
+
+ /* Assume 2MB alignment and size multiple */
+ addr &= ~((u64)SZ_2M - 1);
+ page_offset = addr >> PAGE_SHIFT;
+ page_offset -= node->start;
+
+ mutex_lock(&bo->base.pages_lock);
+
+ if (!bo->base.pages) {
+ bo->sgts = kvmalloc_array(bo->base.base.size / SZ_2M,
+ sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO);
+ if (!bo->sgts) {
+ mutex_unlock(&bo->base.pages_lock);
+ return -ENOMEM;
+ }
+
+ pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
+ sizeof(struct page *), GFP_KERNEL | __GFP_ZERO);
+ if (!pages) {
+ kfree(bo->sgts);
+ bo->sgts = NULL;
+ mutex_unlock(&bo->base.pages_lock);
+ return -ENOMEM;
+ }
+ bo->base.pages = pages;
+ bo->base.pages_use_count = 1;
+ } else
+ pages = bo->base.pages;
+
+ mapping = bo->base.base.filp->f_mapping;
+ mapping_set_unevictable(mapping);
+
+ for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) {
+ pages[i] = shmem_read_mapping_page(mapping, i);
+ if (IS_ERR(pages[i])) {
+ mutex_unlock(&bo->base.pages_lock);
+ ret = PTR_ERR(pages[i]);
+ goto err_pages;
+ }
+ }
+
+ mutex_unlock(&bo->base.pages_lock);
+
+ sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)];
+ ret = sg_alloc_table_from_pages(sgt, pages + page_offset,
+ NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL);
+ if (ret)
+ goto err_pages;
+
+ if (!dma_map_sg(pfdev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL)) {
+ ret = -EINVAL;
+ goto err_map;
+ }
+
+ mmu_map_sg(pfdev, bo->mmu, addr, IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
+
+ bo->is_mapped = true;
+
+ dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
+
+ return 0;
+
+err_map:
+ sg_free_table(sgt);
+err_pages:
+ drm_gem_shmem_put_pages(&bo->base);
+ return ret;
+}
+
static const char *access_type_name(struct panfrost_device *pfdev,
u32 fault_status)
{
@@ -287,13 +532,19 @@ static const char *access_type_name(struct panfrost_device *pfdev,
static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data)
{
struct panfrost_device *pfdev = data;
- u32 status = mmu_read(pfdev, MMU_INT_STAT);
- int i;
- if (!status)
+ if (!mmu_read(pfdev, MMU_INT_STAT))
return IRQ_NONE;
- dev_err(pfdev->dev, "mmu irq status=%x\n", status);
+ mmu_write(pfdev, MMU_INT_MASK, 0);
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
+{
+ struct panfrost_device *pfdev = data;
+ u32 status = mmu_read(pfdev, MMU_INT_RAWSTAT);
+ int i, ret;
for (i = 0; status; i++) {
u32 mask = BIT(i) | BIT(i + 16);
@@ -315,6 +566,18 @@ static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data)
access_type = (fault_status >> 8) & 0x3;
source_id = (fault_status >> 16);
+ /* Page fault only */
+ if ((status & mask) == BIT(i)) {
+ WARN_ON(exception_type < 0xC1 || exception_type > 0xC4);
+
+ ret = panfrost_mmu_map_fault_addr(pfdev, i, addr);
+ if (!ret) {
+ mmu_write(pfdev, MMU_INT_CLEAR, BIT(i));
+ status &= ~mask;
+ continue;
+ }
+ }
+
/* terminal fault, print info about the fault */
dev_err(pfdev->dev,
"Unhandled Page fault in AS%d at VA 0x%016llX\n"
@@ -337,50 +600,26 @@ static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data)
status &= ~mask;
}
+ mmu_write(pfdev, MMU_INT_MASK, ~0);
return IRQ_HANDLED;
};
int panfrost_mmu_init(struct panfrost_device *pfdev)
{
- struct io_pgtable_ops *pgtbl_ops;
int err, irq;
- pfdev->mmu = devm_kzalloc(pfdev->dev, sizeof(*pfdev->mmu), GFP_KERNEL);
- if (!pfdev->mmu)
- return -ENOMEM;
-
- mutex_init(&pfdev->mmu->lock);
-
irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu");
if (irq <= 0)
return -ENODEV;
- err = devm_request_irq(pfdev->dev, irq, panfrost_mmu_irq_handler,
- IRQF_SHARED, "mmu", pfdev);
+ err = devm_request_threaded_irq(pfdev->dev, irq, panfrost_mmu_irq_handler,
+ panfrost_mmu_irq_handler_thread,
+ IRQF_SHARED, "mmu", pfdev);
if (err) {
dev_err(pfdev->dev, "failed to request mmu irq");
return err;
}
- mmu_write(pfdev, MMU_INT_CLEAR, ~0);
- mmu_write(pfdev, MMU_INT_MASK, ~0);
-
- pfdev->mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
- .pgsize_bitmap = SZ_4K | SZ_2M,
- .ias = FIELD_GET(0xff, pfdev->features.mmu_features),
- .oas = FIELD_GET(0xff00, pfdev->features.mmu_features),
- .tlb = &mmu_tlb_ops,
- .iommu_dev = pfdev->dev,
- };
-
- pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &pfdev->mmu->pgtbl_cfg,
- pfdev);
- if (!pgtbl_ops)
- return -ENOMEM;
-
- pfdev->mmu->pgtbl_ops = pgtbl_ops;
-
- panfrost_mmu_enable(pfdev, 0);
return 0;
}
@@ -389,6 +628,4 @@ void panfrost_mmu_fini(struct panfrost_device *pfdev)
{
mmu_write(pfdev, MMU_INT_MASK, 0);
mmu_disable(pfdev, 0);
-
- free_io_pgtable_ops(pfdev->mmu->pgtbl_ops);
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.h b/drivers/gpu/drm/panfrost/panfrost_mmu.h
index f5878d86a5ce..7c5b6775ae23 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.h
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.h
@@ -5,13 +5,20 @@
#define __PANFROST_MMU_H__
struct panfrost_gem_object;
+struct panfrost_file_priv;
+struct panfrost_mmu;
int panfrost_mmu_map(struct panfrost_gem_object *bo);
void panfrost_mmu_unmap(struct panfrost_gem_object *bo);
int panfrost_mmu_init(struct panfrost_device *pfdev);
void panfrost_mmu_fini(struct panfrost_device *pfdev);
+void panfrost_mmu_reset(struct panfrost_device *pfdev);
-void panfrost_mmu_enable(struct panfrost_device *pfdev, u32 as_nr);
+u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu);
+void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu);
+
+int panfrost_mmu_pgtable_alloc(struct panfrost_file_priv *priv);
+void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv);
#endif
diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c
index 9a153125e5d2..024771a4083e 100644
--- a/drivers/gpu/drm/pl111/pl111_display.c
+++ b/drivers/gpu/drm/pl111/pl111_display.c
@@ -128,6 +128,7 @@ static void pl111_display_enable(struct drm_simple_display_pipe *pipe,
struct drm_framebuffer *fb = plane->state->fb;
struct drm_connector *connector = priv->connector;
struct drm_bridge *bridge = priv->bridge;
+ bool grayscale = false;
u32 cntl;
u32 ppl, hsw, hfp, hbp;
u32 lpp, vsw, vfp, vbp;
@@ -187,6 +188,20 @@ static void pl111_display_enable(struct drm_simple_display_pipe *pipe,
if (connector->display_info.bus_flags &
DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
tim2 |= TIM2_IPC;
+
+ if (connector->display_info.num_bus_formats == 1 &&
+ connector->display_info.bus_formats[0] ==
+ MEDIA_BUS_FMT_Y8_1X8)
+ grayscale = true;
+
+ /*
+ * The AC pin bias frequency is set to max count when using
+ * grayscale so at least once in a while we will reverse
+ * polarity and get rid of any DC built up that could
+ * damage the display.
+ */
+ if (grayscale)
+ tim2 |= TIM2_ACB_MASK;
}
if (bridge) {
@@ -218,8 +233,18 @@ static void pl111_display_enable(struct drm_simple_display_pipe *pipe,
writel(0, priv->regs + CLCD_TIM3);
- /* Hard-code TFT panel */
- cntl = CNTL_LCDEN | CNTL_LCDTFT | CNTL_LCDVCOMP(1);
+ /*
+ * Detect grayscale bus format. We do not support a grayscale mode
+ * toward userspace, instead we expose an RGB24 buffer and then the
+ * hardware will activate its grayscaler to convert to the grayscale
+ * format.
+ */
+ if (grayscale)
+ cntl = CNTL_LCDEN | CNTL_LCDMONO8;
+ else
+ /* Else we assume TFT display */
+ cntl = CNTL_LCDEN | CNTL_LCDTFT | CNTL_LCDVCOMP(1);
+
/* On the ST Micro variant, assume all 24 bits are connected */
if (priv->variant->st_bitmux_control)
cntl |= CNTL_ST_CDWID_24;
diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
index 94439212a5c5..a4f4175bbdbe 100644
--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
@@ -57,7 +57,7 @@ qxl_debugfs_buffers_info(struct seq_file *m, void *data)
struct qxl_bo *bo;
list_for_each_entry(bo, &qdev->gem.objects, list) {
- struct reservation_object_list *fobj;
+ struct dma_resv_list *fobj;
int rel;
rcu_read_lock();
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index df55b83e0a55..312216caeea2 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -238,7 +238,7 @@ static int qxl_release_validate_bo(struct qxl_bo *bo)
return ret;
}
- ret = reservation_object_reserve_shared(bo->tbo.base.resv, 1);
+ ret = dma_resv_reserve_shared(bo->tbo.base.resv, 1);
if (ret)
return ret;
@@ -458,9 +458,9 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
list_for_each_entry(entry, &release->bos, head) {
bo = entry->bo;
- reservation_object_add_shared_fence(bo->base.resv, &release->base);
+ dma_resv_add_shared_fence(bo->base.resv, &release->base);
ttm_bo_add_to_lru(bo);
- reservation_object_unlock(bo->base.resv);
+ dma_resv_unlock(bo->base.resv);
}
spin_unlock(&glob->lru_lock);
ww_acquire_fini(&release->ticket);
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 40f4d29edfe2..62eab82a64f9 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -3659,7 +3659,7 @@ bool cik_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
- struct reservation_object *resv)
+ struct dma_resv *resv)
{
struct radeon_fence *fence;
struct radeon_sync sync;
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index 589217a7e435..35b9dc6ce46a 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -579,7 +579,7 @@ void cik_sdma_fini(struct radeon_device *rdev)
struct radeon_fence *cik_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
- struct reservation_object *resv)
+ struct dma_resv *resv)
{
struct radeon_fence *fence;
struct radeon_sync sync;
diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c
index 5505a04ca402..a46ee6c2099d 100644
--- a/drivers/gpu/drm/radeon/evergreen_dma.c
+++ b/drivers/gpu/drm/radeon/evergreen_dma.c
@@ -108,7 +108,7 @@ struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
- struct reservation_object *resv)
+ struct dma_resv *resv)
{
struct radeon_fence *fence;
struct radeon_sync sync;
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 5c05193da520..7089dfc8c2a9 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -891,7 +891,7 @@ struct radeon_fence *r100_copy_blit(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
- struct reservation_object *resv)
+ struct dma_resv *resv)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
struct radeon_fence *fence;
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index 9ce6dd83d284..840401413c58 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -84,7 +84,7 @@ struct radeon_fence *r200_copy_dma(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
- struct reservation_object *resv)
+ struct dma_resv *resv)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
struct radeon_fence *fence;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 7d175a9e8330..e937cc01910d 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2963,7 +2963,7 @@ bool r600_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
- struct reservation_object *resv)
+ struct dma_resv *resv)
{
struct radeon_fence *fence;
struct radeon_sync sync;
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
index 35d92ef8a0d4..af6c0da45f28 100644
--- a/drivers/gpu/drm/radeon/r600_dma.c
+++ b/drivers/gpu/drm/radeon/r600_dma.c
@@ -444,7 +444,7 @@ void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
struct radeon_fence *r600_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
- struct reservation_object *resv)
+ struct dma_resv *resv)
{
struct radeon_fence *fence;
struct radeon_sync sync;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 7fc40345b607..05b88491ccb9 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -619,7 +619,7 @@ void radeon_sync_fence(struct radeon_sync *sync,
struct radeon_fence *fence);
int radeon_sync_resv(struct radeon_device *rdev,
struct radeon_sync *sync,
- struct reservation_object *resv,
+ struct dma_resv *resv,
bool shared);
int radeon_sync_rings(struct radeon_device *rdev,
struct radeon_sync *sync,
@@ -1912,20 +1912,20 @@ struct radeon_asic {
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
- struct reservation_object *resv);
+ struct dma_resv *resv);
u32 blit_ring_index;
struct radeon_fence *(*dma)(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
- struct reservation_object *resv);
+ struct dma_resv *resv);
u32 dma_ring_index;
/* method used for bo copy */
struct radeon_fence *(*copy)(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
- struct reservation_object *resv);
+ struct dma_resv *resv);
/* ring used for bo copies */
u32 copy_ring_index;
} copy;
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index e3f036c20d64..a74fa18cd27b 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -86,7 +86,7 @@ struct radeon_fence *r100_copy_blit(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
- struct reservation_object *resv);
+ struct dma_resv *resv);
int r100_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size);
@@ -157,7 +157,7 @@ struct radeon_fence *r200_copy_dma(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
- struct reservation_object *resv);
+ struct dma_resv *resv);
void r200_set_safe_registers(struct radeon_device *rdev);
/*
@@ -347,11 +347,11 @@ int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
- struct reservation_object *resv);
+ struct dma_resv *resv);
struct radeon_fence *r600_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
- struct reservation_object *resv);
+ struct dma_resv *resv);
void r600_hpd_init(struct radeon_device *rdev);
void r600_hpd_fini(struct radeon_device *rdev);
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -473,7 +473,7 @@ void r700_cp_fini(struct radeon_device *rdev);
struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
- struct reservation_object *resv);
+ struct dma_resv *resv);
u32 rv770_get_xclk(struct radeon_device *rdev);
int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
int rv770_get_temp(struct radeon_device *rdev);
@@ -547,7 +547,7 @@ void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
- struct reservation_object *resv);
+ struct dma_resv *resv);
int evergreen_get_temp(struct radeon_device *rdev);
int evergreen_get_allowed_info_register(struct radeon_device *rdev,
u32 reg, u32 *val);
@@ -725,7 +725,7 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
- struct reservation_object *resv);
+ struct dma_resv *resv);
void si_dma_vm_copy_pages(struct radeon_device *rdev,
struct radeon_ib *ib,
@@ -796,11 +796,11 @@ void cik_sdma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
struct radeon_fence *cik_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
- struct reservation_object *resv);
+ struct dma_resv *resv);
struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
- struct reservation_object *resv);
+ struct dma_resv *resv);
int cik_sdma_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 1ea50ce16312..ac9a5ec481c3 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -35,7 +35,7 @@
static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
uint64_t saddr, uint64_t daddr,
int flag, int n,
- struct reservation_object *resv)
+ struct dma_resv *resv)
{
unsigned long start_jiffies;
unsigned long end_jiffies;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 7e5254a34e84..7b5460678382 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -255,7 +255,7 @@ static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
int r;
list_for_each_entry(reloc, &p->validated, tv.head) {
- struct reservation_object *resv;
+ struct dma_resv *resv;
resv = reloc->robj->tbo.base.resv;
r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 7bf73230ac0b..e81b01f8db90 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -533,7 +533,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
DRM_ERROR("failed to pin new rbo buffer before flip\n");
goto cleanup;
}
- work->fence = dma_fence_get(reservation_object_get_excl(new_rbo->tbo.base.resv));
+ work->fence = dma_fence_get(dma_resv_get_excl(new_rbo->tbo.base.resv));
radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
radeon_bo_unreserve(new_rbo);
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 03873f21a734..4cf58dbbe439 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -114,7 +114,7 @@ static int radeon_gem_set_domain(struct drm_gem_object *gobj,
}
if (domain == RADEON_GEM_DOMAIN_CPU) {
/* Asking for cpu access wait for object idle */
- r = reservation_object_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
+ r = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
if (!r)
r = -EBUSY;
@@ -449,7 +449,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
}
robj = gem_to_radeon_bo(gobj);
- r = reservation_object_test_signaled_rcu(robj->tbo.base.resv, true);
+ r = dma_resv_test_signaled_rcu(robj->tbo.base.resv, true);
if (r == 0)
r = -EBUSY;
else
@@ -478,7 +478,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
}
robj = gem_to_radeon_bo(gobj);
- ret = reservation_object_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
+ ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
if (ret == 0)
r = -EBUSY;
else if (ret < 0)
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
index 0d64ace0e6c1..6902f998ede9 100644
--- a/drivers/gpu/drm/radeon/radeon_mn.c
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -163,7 +163,7 @@ static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
continue;
}
- r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv,
+ r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
true, false, MAX_SCHEDULE_TIMEOUT);
if (r <= 0)
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 9db8ba29ef68..2abe1eab471f 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -183,7 +183,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
int radeon_bo_create(struct radeon_device *rdev,
unsigned long size, int byte_align, bool kernel,
u32 domain, u32 flags, struct sg_table *sg,
- struct reservation_object *resv,
+ struct dma_resv *resv,
struct radeon_bo **bo_ptr)
{
struct radeon_bo *bo;
@@ -610,7 +610,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo)
int steal;
int i;
- reservation_object_assert_held(bo->tbo.base.resv);
+ dma_resv_assert_held(bo->tbo.base.resv);
if (!bo->tiling_flags)
return 0;
@@ -736,7 +736,7 @@ void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
uint32_t *tiling_flags,
uint32_t *pitch)
{
- reservation_object_assert_held(bo->tbo.base.resv);
+ dma_resv_assert_held(bo->tbo.base.resv);
if (tiling_flags)
*tiling_flags = bo->tiling_flags;
@@ -748,7 +748,7 @@ int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
bool force_drop)
{
if (!force_drop)
- reservation_object_assert_held(bo->tbo.base.resv);
+ dma_resv_assert_held(bo->tbo.base.resv);
if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
return 0;
@@ -870,10 +870,10 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
bool shared)
{
- struct reservation_object *resv = bo->tbo.base.resv;
+ struct dma_resv *resv = bo->tbo.base.resv;
if (shared)
- reservation_object_add_shared_fence(resv, &fence->base);
+ dma_resv_add_shared_fence(resv, &fence->base);
else
- reservation_object_add_excl_fence(resv, &fence->base);
+ dma_resv_add_excl_fence(resv, &fence->base);
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index e5554bf9140e..d23f2ed4126e 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -126,7 +126,7 @@ extern int radeon_bo_create(struct radeon_device *rdev,
unsigned long size, int byte_align,
bool kernel, u32 domain, u32 flags,
struct sg_table *sg,
- struct reservation_object *resv,
+ struct dma_resv *resv,
struct radeon_bo **bo_ptr);
extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
extern void radeon_bo_kunmap(struct radeon_bo *bo);
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index 52b0d0cd8cbe..b906e8fbd5f3 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -63,15 +63,15 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sg)
{
- struct reservation_object *resv = attach->dmabuf->resv;
+ struct dma_resv *resv = attach->dmabuf->resv;
struct radeon_device *rdev = dev->dev_private;
struct radeon_bo *bo;
int ret;
- reservation_object_lock(resv, NULL);
+ dma_resv_lock(resv, NULL);
ret = radeon_bo_create(rdev, attach->dmabuf->size, PAGE_SIZE, false,
RADEON_GEM_DOMAIN_GTT, 0, sg, resv, &bo);
- reservation_object_unlock(resv);
+ dma_resv_unlock(resv);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c
index 8c9780b5a884..55cc77a73c7b 100644
--- a/drivers/gpu/drm/radeon/radeon_sync.c
+++ b/drivers/gpu/drm/radeon/radeon_sync.c
@@ -87,30 +87,30 @@ void radeon_sync_fence(struct radeon_sync *sync,
*/
int radeon_sync_resv(struct radeon_device *rdev,
struct radeon_sync *sync,
- struct reservation_object *resv,
+ struct dma_resv *resv,
bool shared)
{
- struct reservation_object_list *flist;
+ struct dma_resv_list *flist;
struct dma_fence *f;
struct radeon_fence *fence;
unsigned i;
int r = 0;
/* always sync to the exclusive fence */
- f = reservation_object_get_excl(resv);
+ f = dma_resv_get_excl(resv);
fence = f ? to_radeon_fence(f) : NULL;
if (fence && fence->rdev == rdev)
radeon_sync_fence(sync, fence);
else if (f)
r = dma_fence_wait(f, true);
- flist = reservation_object_get_list(resv);
+ flist = dma_resv_get_list(resv);
if (shared || !flist || r)
return r;
for (i = 0; i < flist->shared_count; ++i) {
f = rcu_dereference_protected(flist->shared[i],
- reservation_object_held(resv));
+ dma_resv_held(resv));
fence = to_radeon_fence(f);
if (fence && fence->rdev == rdev)
radeon_sync_fence(sync, fence);
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 311e69c2ed7f..1ad5c3b86b64 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -477,7 +477,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
return -EINVAL;
}
- f = reservation_object_get_excl(bo->tbo.base.resv);
+ f = dma_resv_get_excl(bo->tbo.base.resv);
if (f) {
r = radeon_fence_wait((struct radeon_fence *)f, false);
if (r) {
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index e48a05533126..e0ad547786e8 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -831,7 +831,7 @@ static int radeon_vm_update_ptes(struct radeon_device *rdev,
int r;
radeon_sync_resv(rdev, &ib->sync, pt->tbo.base.resv, true);
- r = reservation_object_reserve_shared(pt->tbo.base.resv, 1);
+ r = dma_resv_reserve_shared(pt->tbo.base.resv, 1);
if (r)
return r;
diff --git a/drivers/gpu/drm/radeon/rv770_dma.c b/drivers/gpu/drm/radeon/rv770_dma.c
index 0866b38ef264..4c91614b5e70 100644
--- a/drivers/gpu/drm/radeon/rv770_dma.c
+++ b/drivers/gpu/drm/radeon/rv770_dma.c
@@ -42,7 +42,7 @@
struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
- struct reservation_object *resv)
+ struct dma_resv *resv)
{
struct radeon_fence *fence;
struct radeon_sync sync;
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index 4773bb7d947e..d2fa302a5be9 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -231,7 +231,7 @@ void si_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
- struct reservation_object *resv)
+ struct dma_resv *resv)
{
struct radeon_fence *fence;
struct radeon_sync sync;
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index 1c62578590f4..52c5f1ab8277 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -71,11 +71,11 @@ struct rcar_lvds {
bool dual_link;
};
-#define bridge_to_rcar_lvds(bridge) \
- container_of(bridge, struct rcar_lvds, bridge)
+#define bridge_to_rcar_lvds(b) \
+ container_of(b, struct rcar_lvds, bridge)
-#define connector_to_rcar_lvds(connector) \
- container_of(connector, struct rcar_lvds, connector)
+#define connector_to_rcar_lvds(c) \
+ container_of(c, struct rcar_lvds, connector)
static void rcar_lvds_write(struct rcar_lvds *lvds, u32 reg, u32 data)
{
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
index 8ca5af0c912f..a44dca4b0219 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
@@ -97,10 +97,34 @@ crtcs_exit:
return crtcs;
}
+static int sun8i_dw_hdmi_find_connector_pdev(struct device *dev,
+ struct platform_device **pdev_out)
+{
+ struct platform_device *pdev;
+ struct device_node *remote;
+
+ remote = of_graph_get_remote_node(dev->of_node, 1, -1);
+ if (!remote)
+ return -ENODEV;
+
+ if (!of_device_is_compatible(remote, "hdmi-connector")) {
+ of_node_put(remote);
+ return -ENODEV;
+ }
+
+ pdev = of_find_device_by_node(remote);
+ of_node_put(remote);
+ if (!pdev)
+ return -ENODEV;
+
+ *pdev_out = pdev;
+ return 0;
+}
+
static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
void *data)
{
- struct platform_device *pdev = to_platform_device(dev);
+ struct platform_device *pdev = to_platform_device(dev), *connector_pdev;
struct dw_hdmi_plat_data *plat_data;
struct drm_device *drm = data;
struct device_node *phy_node;
@@ -150,16 +174,30 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
return PTR_ERR(hdmi->regulator);
}
+ ret = sun8i_dw_hdmi_find_connector_pdev(dev, &connector_pdev);
+ if (!ret) {
+ hdmi->ddc_en = gpiod_get_optional(&connector_pdev->dev,
+ "ddc-en", GPIOD_OUT_HIGH);
+ platform_device_put(connector_pdev);
+
+ if (IS_ERR(hdmi->ddc_en)) {
+ dev_err(dev, "Couldn't get ddc-en gpio\n");
+ return PTR_ERR(hdmi->ddc_en);
+ }
+ }
+
ret = regulator_enable(hdmi->regulator);
if (ret) {
dev_err(dev, "Failed to enable regulator\n");
- return ret;
+ goto err_unref_ddc_en;
}
+ gpiod_set_value(hdmi->ddc_en, 1);
+
ret = reset_control_deassert(hdmi->rst_ctrl);
if (ret) {
dev_err(dev, "Could not deassert ctrl reset control\n");
- goto err_disable_regulator;
+ goto err_disable_ddc_en;
}
ret = clk_prepare_enable(hdmi->clk_tmds);
@@ -212,8 +250,12 @@ err_disable_clk_tmds:
clk_disable_unprepare(hdmi->clk_tmds);
err_assert_ctrl_reset:
reset_control_assert(hdmi->rst_ctrl);
-err_disable_regulator:
+err_disable_ddc_en:
+ gpiod_set_value(hdmi->ddc_en, 0);
regulator_disable(hdmi->regulator);
+err_unref_ddc_en:
+ if (hdmi->ddc_en)
+ gpiod_put(hdmi->ddc_en);
return ret;
}
@@ -227,7 +269,11 @@ static void sun8i_dw_hdmi_unbind(struct device *dev, struct device *master,
sun8i_hdmi_phy_remove(hdmi);
clk_disable_unprepare(hdmi->clk_tmds);
reset_control_assert(hdmi->rst_ctrl);
+ gpiod_set_value(hdmi->ddc_en, 0);
regulator_disable(hdmi->regulator);
+
+ if (hdmi->ddc_en)
+ gpiod_put(hdmi->ddc_en);
}
static const struct component_ops sun8i_dw_hdmi_ops = {
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
index 720c5aa8adc1..d707c9171824 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
@@ -9,6 +9,7 @@
#include <drm/bridge/dw_hdmi.h>
#include <drm/drm_encoder.h>
#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
@@ -190,6 +191,7 @@ struct sun8i_dw_hdmi {
struct regulator *regulator;
const struct sun8i_dw_hdmi_quirks *quirks;
struct reset_control *rst_ctrl;
+ struct gpio_desc *ddc_en;
};
static inline struct sun8i_dw_hdmi *
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 4a75d149e368..fbf57bc3cdab 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -6,23 +6,28 @@
#include <linux/clk.h>
#include <linux/debugfs.h>
+#include <linux/delay.h>
#include <linux/iommu.h>
+#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <soc/tegra/pmc.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_vblank.h>
+
#include "dc.h"
#include "drm.h"
#include "gem.h"
#include "hub.h"
#include "plane.h"
-#include <drm/drm_atomic.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
-
static void tegra_crtc_atomic_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state);
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index 2d94da225e51..a0f6f9b0d258 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -8,14 +8,15 @@
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/module.h>
#include <linux/of_gpio.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
-#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
-#include <linux/reset.h>
+#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
#include <linux/workqueue.h>
#include <drm/drm_dp_helper.h>
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 870904bfad78..6fb7d74ff553 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -8,9 +8,17 @@
#include <linux/host1x.h>
#include <linux/idr.h>
#include <linux/iommu.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_prime.h>
+#include <drm/drm_vblank.h>
#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
#include <asm/dma-iommu.h>
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 86daa19fcf24..29911eff9ceb 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -7,18 +7,17 @@
#ifndef HOST1X_DRM_H
#define HOST1X_DRM_H 1
-#include <uapi/drm/tegra_drm.h>
#include <linux/host1x.h>
#include <linux/iova.h>
#include <linux/of_gpio.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fixed.h>
#include <drm/drm_probe_helper.h>
+#include <uapi/drm/tegra_drm.h>
#include "gem.h"
#include "hub.h"
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index 2fbfefe9cb42..a5d47e301c5f 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -5,22 +5,24 @@
#include <linux/clk.h>
#include <linux/debugfs.h>
+#include <linux/delay.h>
#include <linux/host1x.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
#include <linux/reset.h>
-#include <linux/regulator/consumer.h>
+#include <video/mipi_display.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_file.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
-#include <video/mipi_display.h>
-
#include "dc.h"
#include "drm.h"
#include "dsi.h"
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 888ed0d74ccd..e34325c83d28 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -9,11 +9,13 @@
#include <linux/console.h>
-#include "drm.h"
-#include "gem.h"
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper.h>
+#include "drm.h"
+#include "gem.h"
+
#ifdef CONFIG_DRM_FBDEV_EMULATION
static inline struct tegra_fbdev *to_tegra_fbdev(struct drm_fb_helper *helper)
{
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 0a3d925d5284..fb7667c8dd4c 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -12,6 +12,9 @@
#include <linux/dma-buf.h>
#include <linux/iommu.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_prime.h>
#include <drm/tegra_drm.h>
#include "drm.h"
diff --git a/drivers/gpu/drm/tegra/gem.h b/drivers/gpu/drm/tegra/gem.h
index f1f758b25886..83ffb1e14ca3 100644
--- a/drivers/gpu/drm/tegra/gem.h
+++ b/drivers/gpu/drm/tegra/gem.h
@@ -11,7 +11,6 @@
#include <linux/host1x.h>
#include <drm/drm.h>
-#include <drm/drmP.h>
#include <drm/drm_gem.h>
#define TEGRA_BO_BOTTOM_UP (1 << 0)
diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c
index 8dbfb30344e7..641299cc85b8 100644
--- a/drivers/gpu/drm/tegra/gr2d.c
+++ b/drivers/gpu/drm/tegra/gr2d.c
@@ -5,6 +5,7 @@
#include <linux/clk.h>
#include <linux/iommu.h>
+#include <linux/module.h>
#include <linux/of_device.h>
#include "drm.h"
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 334c4d7d238b..50269ffbcb6b 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -6,9 +6,11 @@
#include <linux/clk.h>
#include <linux/debugfs.h>
+#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/hdmi.h>
#include <linux/math64.h>
+#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
@@ -16,6 +18,9 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_file.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_probe_helper.h>
#include "hda.h"
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index 92f202ec0577..839b49c40e51 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -4,6 +4,7 @@
*/
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/host1x.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -13,9 +14,9 @@
#include <linux/pm_runtime.h>
#include <linux/reset.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_probe_helper.h>
#include "drm.h"
diff --git a/drivers/gpu/drm/tegra/hub.h b/drivers/gpu/drm/tegra/hub.h
index 41541e261c91..767a60d9313c 100644
--- a/drivers/gpu/drm/tegra/hub.h
+++ b/drivers/gpu/drm/tegra/hub.h
@@ -6,7 +6,6 @@
#ifndef TEGRA_HUB_H
#define TEGRA_HUB_H 1
-#include <drm/drmP.h>
#include <drm/drm_plane.h>
#include "plane.h"
diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c
index df80ca07e46e..6bab71d6e81d 100644
--- a/drivers/gpu/drm/tegra/plane.c
+++ b/drivers/gpu/drm/tegra/plane.c
@@ -5,6 +5,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
#include "dc.h"
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 4ffe3794e6d3..e1669ada0a40 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -8,6 +8,7 @@
#include <linux/debugfs.h>
#include <linux/gpio.h>
#include <linux/io.h>
+#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -17,7 +18,9 @@
#include <soc/tegra/pmc.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_debugfs.h>
#include <drm/drm_dp_helper.h>
+#include <drm/drm_file.h>
#include <drm/drm_panel.h>
#include <drm/drm_scdc_helper.h>
diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
index 958548ef69e7..cd0399fd8c63 100644
--- a/drivers/gpu/drm/tegra/vic.c
+++ b/drivers/gpu/drm/tegra/vic.c
@@ -4,6 +4,7 @@
*/
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/host1x.h>
#include <linux/iommu.h>
#include <linux/module.h>
diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c
index b6f47b8cf240..03d0e2df6774 100644
--- a/drivers/gpu/drm/tiny/gm12u320.c
+++ b/drivers/gpu/drm/tiny/gm12u320.c
@@ -33,7 +33,6 @@ MODULE_PARM_DESC(eco_mode, "Turn on Eco mode (less bright, more silent)");
#define DRIVER_DATE "2019"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
-#define DRIVER_PATCHLEVEL 1
/*
* The DLP has an actual width of 854 pixels, but that is not a multiple
@@ -45,6 +44,9 @@ MODULE_PARM_DESC(eco_mode, "Turn on Eco mode (less bright, more silent)");
#define GM12U320_BLOCK_COUNT 20
+#define GM12U320_ERR(fmt, ...) \
+ DRM_DEV_ERROR(&gm12u320->udev->dev, fmt, ##__VA_ARGS__)
+
#define MISC_RCV_EPT 1
#define DATA_RCV_EPT 2
#define DATA_SND_EPT 3
@@ -220,7 +222,7 @@ static int gm12u320_misc_request(struct gm12u320_device *gm12u320,
usb_sndbulkpipe(gm12u320->udev, MISC_SND_EPT),
gm12u320->cmd_buf, CMD_SIZE, &len, CMD_TIMEOUT);
if (ret || len != CMD_SIZE) {
- dev_err(&gm12u320->udev->dev, "Misc. req. error %d\n", ret);
+ GM12U320_ERR("Misc. req. error %d\n", ret);
return -EIO;
}
@@ -230,7 +232,7 @@ static int gm12u320_misc_request(struct gm12u320_device *gm12u320,
gm12u320->cmd_buf, MISC_VALUE_SIZE, &len,
DATA_TIMEOUT);
if (ret || len != MISC_VALUE_SIZE) {
- dev_err(&gm12u320->udev->dev, "Misc. value error %d\n", ret);
+ GM12U320_ERR("Misc. value error %d\n", ret);
return -EIO;
}
/* cmd_buf[0] now contains the read value, which we don't use */
@@ -241,7 +243,7 @@ static int gm12u320_misc_request(struct gm12u320_device *gm12u320,
gm12u320->cmd_buf, READ_STATUS_SIZE, &len,
CMD_TIMEOUT);
if (ret || len != READ_STATUS_SIZE) {
- dev_err(&gm12u320->udev->dev, "Misc. status error %d\n", ret);
+ GM12U320_ERR("Misc. status error %d\n", ret);
return -EIO;
}
@@ -278,7 +280,7 @@ static void gm12u320_copy_fb_to_blocks(struct gm12u320_device *gm12u320)
vaddr = drm_gem_shmem_vmap(fb->obj[0]);
if (IS_ERR(vaddr)) {
- DRM_ERROR("failed to vmap fb: %ld\n", PTR_ERR(vaddr));
+ GM12U320_ERR("failed to vmap fb: %ld\n", PTR_ERR(vaddr));
goto put_fb;
}
@@ -286,7 +288,7 @@ static void gm12u320_copy_fb_to_blocks(struct gm12u320_device *gm12u320)
ret = dma_buf_begin_cpu_access(
fb->obj[0]->import_attach->dmabuf, DMA_FROM_DEVICE);
if (ret) {
- DRM_ERROR("dma_buf_begin_cpu_access err: %d\n", ret);
+ GM12U320_ERR("dma_buf_begin_cpu_access err: %d\n", ret);
goto vunmap;
}
}
@@ -329,7 +331,7 @@ static void gm12u320_copy_fb_to_blocks(struct gm12u320_device *gm12u320)
ret = dma_buf_end_cpu_access(fb->obj[0]->import_attach->dmabuf,
DMA_FROM_DEVICE);
if (ret)
- DRM_ERROR("dma_buf_end_cpu_access err: %d\n", ret);
+ GM12U320_ERR("dma_buf_end_cpu_access err: %d\n", ret);
}
vunmap:
drm_gem_shmem_vunmap(fb->obj[0], vaddr);
@@ -340,17 +342,6 @@ unlock:
mutex_unlock(&gm12u320->fb_update.lock);
}
-static int gm12u320_fb_update_ready(struct gm12u320_device *gm12u320)
-{
- int ret;
-
- mutex_lock(&gm12u320->fb_update.lock);
- ret = !gm12u320->fb_update.run || gm12u320->fb_update.fb != NULL;
- mutex_unlock(&gm12u320->fb_update.lock);
-
- return ret;
-}
-
static void gm12u320_fb_update_work(struct work_struct *work)
{
struct gm12u320_device *gm12u320 =
@@ -424,14 +415,15 @@ static void gm12u320_fb_update_work(struct work_struct *work)
* switches back to showing its logo.
*/
wait_event_timeout(gm12u320->fb_update.waitq,
- gm12u320_fb_update_ready(gm12u320),
+ !gm12u320->fb_update.run ||
+ gm12u320->fb_update.fb != NULL,
IDLE_TIMEOUT);
}
return;
err:
/* Do not log errors caused by module unload or device unplug */
- if (ret != -ECONNRESET && ret != -ESHUTDOWN)
- dev_err(&gm12u320->udev->dev, "Frame update error: %d\n", ret);
+ if (ret != -ENODEV && ret != -ECONNRESET && ret != -ESHUTDOWN)
+ GM12U320_ERR("Frame update error: %d\n", ret);
}
static void gm12u320_fb_mark_dirty(struct drm_framebuffer *fb,
@@ -746,7 +738,7 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
if (ret)
goto err_put;
- drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth);
+ drm_fbdev_generic_setup(dev, 0);
return 0;
@@ -765,9 +757,8 @@ static void gm12u320_usb_disconnect(struct usb_interface *interface)
drm_dev_put(dev);
}
-#ifdef CONFIG_PM
-static int gm12u320_suspend(struct usb_interface *interface,
- pm_message_t message)
+static __maybe_unused int gm12u320_suspend(struct usb_interface *interface,
+ pm_message_t message)
{
struct drm_device *dev = usb_get_intfdata(interface);
struct gm12u320_device *gm12u320 = dev->dev_private;
@@ -778,7 +769,7 @@ static int gm12u320_suspend(struct usb_interface *interface,
return 0;
}
-static int gm12u320_resume(struct usb_interface *interface)
+static __maybe_unused int gm12u320_resume(struct usb_interface *interface)
{
struct drm_device *dev = usb_get_intfdata(interface);
struct gm12u320_device *gm12u320 = dev->dev_private;
@@ -789,7 +780,6 @@ static int gm12u320_resume(struct usb_interface *interface)
return 0;
}
-#endif
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1de1, 0xc102) },
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 9157dcc897a2..20ff56f27aa4 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -41,7 +41,7 @@
#include <linux/file.h>
#include <linux/module.h>
#include <linux/atomic.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
static void ttm_bo_global_kobj_release(struct kobject *kobj);
@@ -161,7 +161,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
atomic_dec(&bo->bdev->glob->bo_count);
dma_fence_put(bo->moving);
if (!ttm_bo_uses_embedded_gem_object(bo))
- reservation_object_fini(&bo->base._resv);
+ dma_resv_fini(&bo->base._resv);
mutex_destroy(&bo->wu_mutex);
bo->destroy(bo);
ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
@@ -173,7 +173,7 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man;
- reservation_object_assert_held(bo->base.resv);
+ dma_resv_assert_held(bo->base.resv);
if (!list_empty(&bo->lru))
return;
@@ -244,7 +244,7 @@ static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos,
void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
struct ttm_lru_bulk_move *bulk)
{
- reservation_object_assert_held(bo->base.resv);
+ dma_resv_assert_held(bo->base.resv);
ttm_bo_del_from_lru(bo);
ttm_bo_add_to_lru(bo);
@@ -277,8 +277,8 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
if (!pos->first)
continue;
- reservation_object_assert_held(pos->first->base.resv);
- reservation_object_assert_held(pos->last->base.resv);
+ dma_resv_assert_held(pos->first->base.resv);
+ dma_resv_assert_held(pos->last->base.resv);
man = &pos->first->bdev->man[TTM_PL_TT];
list_bulk_move_tail(&man->lru[i], &pos->first->lru,
@@ -292,8 +292,8 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
if (!pos->first)
continue;
- reservation_object_assert_held(pos->first->base.resv);
- reservation_object_assert_held(pos->last->base.resv);
+ dma_resv_assert_held(pos->first->base.resv);
+ dma_resv_assert_held(pos->last->base.resv);
man = &pos->first->bdev->man[TTM_PL_VRAM];
list_bulk_move_tail(&man->lru[i], &pos->first->lru,
@@ -307,8 +307,8 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
if (!pos->first)
continue;
- reservation_object_assert_held(pos->first->base.resv);
- reservation_object_assert_held(pos->last->base.resv);
+ dma_resv_assert_held(pos->first->base.resv);
+ dma_resv_assert_held(pos->last->base.resv);
lru = &pos->first->bdev->glob->swap_lru[i];
list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap);
@@ -442,29 +442,29 @@ static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
if (bo->base.resv == &bo->base._resv)
return 0;
- BUG_ON(!reservation_object_trylock(&bo->base._resv));
+ BUG_ON(!dma_resv_trylock(&bo->base._resv));
- r = reservation_object_copy_fences(&bo->base._resv, bo->base.resv);
+ r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv);
if (r)
- reservation_object_unlock(&bo->base._resv);
+ dma_resv_unlock(&bo->base._resv);
return r;
}
static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
{
- struct reservation_object_list *fobj;
+ struct dma_resv_list *fobj;
struct dma_fence *fence;
int i;
- fobj = reservation_object_get_list(&bo->base._resv);
- fence = reservation_object_get_excl(&bo->base._resv);
+ fobj = dma_resv_get_list(&bo->base._resv);
+ fence = dma_resv_get_excl(&bo->base._resv);
if (fence && !fence->ops->signaled)
dma_fence_enable_sw_signaling(fence);
for (i = 0; fobj && i < fobj->shared_count; ++i) {
fence = rcu_dereference_protected(fobj->shared[i],
- reservation_object_held(bo->base.resv));
+ dma_resv_held(bo->base.resv));
if (!fence->ops->signaled)
dma_fence_enable_sw_signaling(fence);
@@ -482,23 +482,23 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
/* Last resort, if we fail to allocate memory for the
* fences block for the BO to become idle
*/
- reservation_object_wait_timeout_rcu(bo->base.resv, true, false,
+ dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
30 * HZ);
spin_lock(&glob->lru_lock);
goto error;
}
spin_lock(&glob->lru_lock);
- ret = reservation_object_trylock(bo->base.resv) ? 0 : -EBUSY;
+ ret = dma_resv_trylock(bo->base.resv) ? 0 : -EBUSY;
if (!ret) {
- if (reservation_object_test_signaled_rcu(&bo->base._resv, true)) {
+ if (dma_resv_test_signaled_rcu(&bo->base._resv, true)) {
ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
if (bo->base.resv != &bo->base._resv)
- reservation_object_unlock(&bo->base._resv);
+ dma_resv_unlock(&bo->base._resv);
ttm_bo_cleanup_memtype_use(bo);
- reservation_object_unlock(bo->base.resv);
+ dma_resv_unlock(bo->base.resv);
return;
}
@@ -514,10 +514,10 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
ttm_bo_add_to_lru(bo);
}
- reservation_object_unlock(bo->base.resv);
+ dma_resv_unlock(bo->base.resv);
}
if (bo->base.resv != &bo->base._resv)
- reservation_object_unlock(&bo->base._resv);
+ dma_resv_unlock(&bo->base._resv);
error:
kref_get(&bo->list_kref);
@@ -546,7 +546,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
bool unlock_resv)
{
struct ttm_bo_global *glob = bo->bdev->glob;
- struct reservation_object *resv;
+ struct dma_resv *resv;
int ret;
if (unlikely(list_empty(&bo->ddestroy)))
@@ -554,7 +554,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
else
resv = &bo->base._resv;
- if (reservation_object_test_signaled_rcu(resv, true))
+ if (dma_resv_test_signaled_rcu(resv, true))
ret = 0;
else
ret = -EBUSY;
@@ -563,10 +563,10 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
long lret;
if (unlock_resv)
- reservation_object_unlock(bo->base.resv);
+ dma_resv_unlock(bo->base.resv);
spin_unlock(&glob->lru_lock);
- lret = reservation_object_wait_timeout_rcu(resv, true,
+ lret = dma_resv_wait_timeout_rcu(resv, true,
interruptible,
30 * HZ);
@@ -576,7 +576,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
return -EBUSY;
spin_lock(&glob->lru_lock);
- if (unlock_resv && !reservation_object_trylock(bo->base.resv)) {
+ if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
/*
* We raced, and lost, someone else holds the reservation now,
* and is probably busy in ttm_bo_cleanup_memtype_use.
@@ -593,7 +593,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
if (ret || unlikely(list_empty(&bo->ddestroy))) {
if (unlock_resv)
- reservation_object_unlock(bo->base.resv);
+ dma_resv_unlock(bo->base.resv);
spin_unlock(&glob->lru_lock);
return ret;
}
@@ -606,7 +606,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
ttm_bo_cleanup_memtype_use(bo);
if (unlock_resv)
- reservation_object_unlock(bo->base.resv);
+ dma_resv_unlock(bo->base.resv);
return 0;
}
@@ -634,12 +634,12 @@ static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
if (remove_all || bo->base.resv != &bo->base._resv) {
spin_unlock(&glob->lru_lock);
- reservation_object_lock(bo->base.resv, NULL);
+ dma_resv_lock(bo->base.resv, NULL);
spin_lock(&glob->lru_lock);
ttm_bo_cleanup_refs(bo, false, !remove_all, true);
- } else if (reservation_object_trylock(bo->base.resv)) {
+ } else if (dma_resv_trylock(bo->base.resv)) {
ttm_bo_cleanup_refs(bo, false, !remove_all, true);
} else {
spin_unlock(&glob->lru_lock);
@@ -711,7 +711,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
struct ttm_placement placement;
int ret = 0;
- reservation_object_assert_held(bo->base.resv);
+ dma_resv_assert_held(bo->base.resv);
placement.num_placement = 0;
placement.num_busy_placement = 0;
@@ -782,7 +782,7 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
bool ret = false;
if (bo->base.resv == ctx->resv) {
- reservation_object_assert_held(bo->base.resv);
+ dma_resv_assert_held(bo->base.resv);
if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT
|| !list_empty(&bo->ddestroy))
ret = true;
@@ -790,7 +790,7 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
if (busy)
*busy = false;
} else {
- ret = reservation_object_trylock(bo->base.resv);
+ ret = dma_resv_trylock(bo->base.resv);
*locked = ret;
if (busy)
*busy = !ret;
@@ -818,10 +818,10 @@ static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
return -EBUSY;
if (ctx->interruptible)
- r = reservation_object_lock_interruptible(busy_bo->base.resv,
+ r = dma_resv_lock_interruptible(busy_bo->base.resv,
ticket);
else
- r = reservation_object_lock(busy_bo->base.resv, ticket);
+ r = dma_resv_lock(busy_bo->base.resv, ticket);
/*
* TODO: It would be better to keep the BO locked until allocation is at
@@ -829,7 +829,7 @@ static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
* of TTM.
*/
if (!r)
- reservation_object_unlock(busy_bo->base.resv);
+ dma_resv_unlock(busy_bo->base.resv);
return r == -EDEADLK ? -EBUSY : r;
}
@@ -855,7 +855,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
&busy)) {
if (busy && !busy_bo && ticket !=
- reservation_object_locking_ctx(bo->base.resv))
+ dma_resv_locking_ctx(bo->base.resv))
busy_bo = bo;
continue;
}
@@ -863,7 +863,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
if (place && !bdev->driver->eviction_valuable(bo,
place)) {
if (locked)
- reservation_object_unlock(bo->base.resv);
+ dma_resv_unlock(bo->base.resv);
continue;
}
break;
@@ -935,9 +935,9 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
spin_unlock(&man->move_lock);
if (fence) {
- reservation_object_add_shared_fence(bo->base.resv, fence);
+ dma_resv_add_shared_fence(bo->base.resv, fence);
- ret = reservation_object_reserve_shared(bo->base.resv, 1);
+ ret = dma_resv_reserve_shared(bo->base.resv, 1);
if (unlikely(ret)) {
dma_fence_put(fence);
return ret;
@@ -964,7 +964,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
struct ww_acquire_ctx *ticket;
int ret;
- ticket = reservation_object_locking_ctx(bo->base.resv);
+ ticket = dma_resv_locking_ctx(bo->base.resv);
do {
ret = (*man->func->get_node)(man, bo, place, mem);
if (unlikely(ret != 0))
@@ -1094,7 +1094,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
bool type_found = false;
int i, ret;
- ret = reservation_object_reserve_shared(bo->base.resv, 1);
+ ret = dma_resv_reserve_shared(bo->base.resv, 1);
if (unlikely(ret))
return ret;
@@ -1175,7 +1175,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
int ret = 0;
struct ttm_mem_reg mem;
- reservation_object_assert_held(bo->base.resv);
+ dma_resv_assert_held(bo->base.resv);
mem.num_pages = bo->num_pages;
mem.size = mem.num_pages << PAGE_SHIFT;
@@ -1245,7 +1245,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
int ret;
uint32_t new_flags;
- reservation_object_assert_held(bo->base.resv);
+ dma_resv_assert_held(bo->base.resv);
/*
* Check whether we need to move buffer.
*/
@@ -1282,7 +1282,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
struct ttm_operation_ctx *ctx,
size_t acc_size,
struct sg_table *sg,
- struct reservation_object *resv,
+ struct dma_resv *resv,
void (*destroy) (struct ttm_buffer_object *))
{
int ret = 0;
@@ -1336,7 +1336,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
bo->sg = sg;
if (resv) {
bo->base.resv = resv;
- reservation_object_assert_held(bo->base.resv);
+ dma_resv_assert_held(bo->base.resv);
} else {
bo->base.resv = &bo->base._resv;
}
@@ -1345,7 +1345,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
* bo.gem is not initialized, so we have to setup the
* struct elements we want use regardless.
*/
- reservation_object_init(&bo->base._resv);
+ dma_resv_init(&bo->base._resv);
drm_vma_node_reset(&bo->base.vma_node);
}
atomic_inc(&bo->bdev->glob->bo_count);
@@ -1363,7 +1363,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
* since otherwise lockdep will be angered in radeon.
*/
if (!resv) {
- locked = reservation_object_trylock(bo->base.resv);
+ locked = dma_resv_trylock(bo->base.resv);
WARN_ON(!locked);
}
@@ -1397,7 +1397,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
bool interruptible,
size_t acc_size,
struct sg_table *sg,
- struct reservation_object *resv,
+ struct dma_resv *resv,
void (*destroy) (struct ttm_buffer_object *))
{
struct ttm_operation_ctx ctx = { interruptible, false };
@@ -1807,13 +1807,13 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
long timeout = 15 * HZ;
if (no_wait) {
- if (reservation_object_test_signaled_rcu(bo->base.resv, true))
+ if (dma_resv_test_signaled_rcu(bo->base.resv, true))
return 0;
else
return -EBUSY;
}
- timeout = reservation_object_wait_timeout_rcu(bo->base.resv, true,
+ timeout = dma_resv_wait_timeout_rcu(bo->base.resv, true,
interruptible, timeout);
if (timeout < 0)
return timeout;
@@ -1821,7 +1821,7 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
if (timeout == 0)
return -EBUSY;
- reservation_object_add_excl_fence(bo->base.resv, NULL);
+ dma_resv_add_excl_fence(bo->base.resv, NULL);
return 0;
}
EXPORT_SYMBOL(ttm_bo_wait);
@@ -1937,7 +1937,7 @@ out:
* already swapped buffer.
*/
if (locked)
- reservation_object_unlock(bo->base.resv);
+ dma_resv_unlock(bo->base.resv);
kref_put(&bo->list_kref, ttm_bo_release_list);
return ret;
}
@@ -1975,14 +1975,14 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
ret = mutex_lock_interruptible(&bo->wu_mutex);
if (unlikely(ret != 0))
return -ERESTARTSYS;
- if (!reservation_object_is_locked(bo->base.resv))
+ if (!dma_resv_is_locked(bo->base.resv))
goto out_unlock;
- ret = reservation_object_lock_interruptible(bo->base.resv, NULL);
+ ret = dma_resv_lock_interruptible(bo->base.resv, NULL);
if (ret == -EINTR)
ret = -ERESTARTSYS;
if (unlikely(ret != 0))
goto out_unlock;
- reservation_object_unlock(bo->base.resv);
+ dma_resv_unlock(bo->base.resv);
out_unlock:
mutex_unlock(&bo->wu_mutex);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 425a6d627b30..fe81c565e7ef 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -38,7 +38,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/module.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
struct ttm_transfer_obj {
struct ttm_buffer_object base;
@@ -518,8 +518,8 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
fbo->base.destroy = &ttm_transfered_destroy;
fbo->base.acc_size = 0;
fbo->base.base.resv = &fbo->base.base._resv;
- reservation_object_init(fbo->base.base.resv);
- ret = reservation_object_trylock(fbo->base.base.resv);
+ dma_resv_init(fbo->base.base.resv);
+ ret = dma_resv_trylock(fbo->base.base.resv);
WARN_ON(!ret);
*new_obj = &fbo->base;
@@ -689,7 +689,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
int ret;
struct ttm_buffer_object *ghost_obj;
- reservation_object_add_excl_fence(bo->base.resv, fence);
+ dma_resv_add_excl_fence(bo->base.resv, fence);
if (evict) {
ret = ttm_bo_wait(bo, false, false);
if (ret)
@@ -716,7 +716,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
if (ret)
return ret;
- reservation_object_add_excl_fence(ghost_obj->base.resv, fence);
+ dma_resv_add_excl_fence(ghost_obj->base.resv, fence);
/**
* If we're not moving to fixed memory, the TTM object
@@ -752,7 +752,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
int ret;
- reservation_object_add_excl_fence(bo->base.resv, fence);
+ dma_resv_add_excl_fence(bo->base.resv, fence);
if (!evict) {
struct ttm_buffer_object *ghost_obj;
@@ -772,7 +772,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
if (ret)
return ret;
- reservation_object_add_excl_fence(ghost_obj->base.resv, fence);
+ dma_resv_add_excl_fence(ghost_obj->base.resv, fence);
/**
* If we're not moving to fixed memory, the TTM object
@@ -841,7 +841,7 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
if (ret)
return ret;
- ret = reservation_object_copy_fences(ghost->base.resv, bo->base.resv);
+ ret = dma_resv_copy_fences(ghost->base.resv, bo->base.resv);
/* Last resort, wait for the BO to be idle when we are OOM */
if (ret)
ttm_bo_wait(bo, false, false);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 85f5bcbe0c76..76eedb963693 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -71,7 +71,7 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
ttm_bo_get(bo);
up_read(&vmf->vma->vm_mm->mmap_sem);
(void) dma_fence_wait(bo->moving, true);
- reservation_object_unlock(bo->base.resv);
+ dma_resv_unlock(bo->base.resv);
ttm_bo_put(bo);
goto out_unlock;
}
@@ -131,7 +131,7 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
* for reserve, and if it fails, retry the fault after waiting
* for the buffer to become unreserved.
*/
- if (unlikely(!reservation_object_trylock(bo->base.resv))) {
+ if (unlikely(!dma_resv_trylock(bo->base.resv))) {
if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
ttm_bo_get(bo);
@@ -296,7 +296,7 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
out_io_unlock:
ttm_mem_io_unlock(man);
out_unlock:
- reservation_object_unlock(bo->base.resv);
+ dma_resv_unlock(bo->base.resv);
return ret;
}
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 3aefe72fb5cb..131dae8f4170 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -39,7 +39,7 @@ static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
list_for_each_entry_continue_reverse(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
- reservation_object_unlock(bo->base.resv);
+ dma_resv_unlock(bo->base.resv);
}
}
@@ -71,7 +71,7 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
if (list_empty(&bo->lru))
ttm_bo_add_to_lru(bo);
- reservation_object_unlock(bo->base.resv);
+ dma_resv_unlock(bo->base.resv);
}
spin_unlock(&glob->lru_lock);
@@ -114,7 +114,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
- reservation_object_unlock(bo->base.resv);
+ dma_resv_unlock(bo->base.resv);
ret = -EBUSY;
@@ -130,7 +130,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
if (!entry->num_shared)
continue;
- ret = reservation_object_reserve_shared(bo->base.resv,
+ ret = dma_resv_reserve_shared(bo->base.resv,
entry->num_shared);
if (!ret)
continue;
@@ -144,16 +144,16 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
if (ret == -EDEADLK) {
if (intr) {
- ret = reservation_object_lock_slow_interruptible(bo->base.resv,
+ ret = dma_resv_lock_slow_interruptible(bo->base.resv,
ticket);
} else {
- reservation_object_lock_slow(bo->base.resv, ticket);
+ dma_resv_lock_slow(bo->base.resv, ticket);
ret = 0;
}
}
if (!ret && entry->num_shared)
- ret = reservation_object_reserve_shared(bo->base.resv,
+ ret = dma_resv_reserve_shared(bo->base.resv,
entry->num_shared);
if (unlikely(ret != 0)) {
@@ -201,14 +201,14 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
list_for_each_entry(entry, list, head) {
bo = entry->bo;
if (entry->num_shared)
- reservation_object_add_shared_fence(bo->base.resv, fence);
+ dma_resv_add_shared_fence(bo->base.resv, fence);
else
- reservation_object_add_excl_fence(bo->base.resv, fence);
+ dma_resv_add_excl_fence(bo->base.resv, fence);
if (list_empty(&bo->lru))
ttm_bo_add_to_lru(bo);
else
ttm_bo_move_to_lru_tail(bo, NULL);
- reservation_object_unlock(bo->base.resv);
+ dma_resv_unlock(bo->base.resv);
}
spin_unlock(&glob->lru_lock);
if (ticket)
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 00b4a3337840..e0e9b4f69db6 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -48,7 +48,7 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
struct ttm_bo_device *bdev = bo->bdev;
uint32_t page_flags = 0;
- reservation_object_assert_held(bo->base.resv);
+ dma_resv_assert_held(bo->base.resv);
if (bdev->need_dma32)
page_flags |= TTM_PAGE_FLAG_DMA32;
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 79744137d89f..5d80507b539b 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -409,7 +409,7 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
if (args->pad != 0)
return -EINVAL;
- ret = drm_gem_reservation_object_wait(file_priv, args->handle,
+ ret = drm_gem_dma_resv_wait(file_priv, args->handle,
true, timeout_jiffies);
/* Decrement the user's timeout, in case we got interrupted
@@ -495,7 +495,7 @@ v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv,
for (i = 0; i < job->bo_count; i++) {
/* XXX: Use shared fences for read-only objects. */
- reservation_object_add_excl_fence(job->bo[i]->resv,
+ dma_resv_add_excl_fence(job->bo[i]->resv,
job->done_fence);
}
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c
index 6189ea89bb71..862db495d111 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c
@@ -32,7 +32,7 @@ static const struct pci_device_id pciidlist[] = {
};
MODULE_DEVICE_TABLE(pci, pciidlist);
-static struct drm_fb_helper_funcs vbox_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs vbox_fb_helper_funcs = {
.fb_probe = vboxfb_create,
};
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index b72b760e3018..7a06cb6e31c5 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -543,7 +543,7 @@ vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
bo = to_vc4_bo(&exec->bo[i]->base);
bo->seqno = seqno;
- reservation_object_add_shared_fence(bo->base.base.resv, exec->fence);
+ dma_resv_add_shared_fence(bo->base.base.resv, exec->fence);
}
list_for_each_entry(bo, &exec->unref_list, unref_head) {
@@ -554,7 +554,7 @@ vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
bo = to_vc4_bo(&exec->rcl_write_bo[i]->base);
bo->write_seqno = seqno;
- reservation_object_add_excl_fence(bo->base.base.resv, exec->fence);
+ dma_resv_add_excl_fence(bo->base.base.resv, exec->fence);
}
}
@@ -642,7 +642,7 @@ retry:
for (i = 0; i < exec->bo_count; i++) {
bo = &exec->bo[i]->base;
- ret = reservation_object_reserve_shared(bo->resv, 1);
+ ret = dma_resv_reserve_shared(bo->resv, 1);
if (ret) {
vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
return ret;
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index d8630467549c..9268f6fc3f66 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -21,7 +21,7 @@
*/
#include <linux/dma-buf.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
#include <drm/drm_file.h>
@@ -128,7 +128,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
{
struct drm_vgem_fence_attach *arg = data;
struct vgem_file *vfile = file->driver_priv;
- struct reservation_object *resv;
+ struct dma_resv *resv;
struct drm_gem_object *obj;
struct dma_fence *fence;
int ret;
@@ -151,7 +151,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
/* Check for a conflicting fence */
resv = obj->resv;
- if (!reservation_object_test_signaled_rcu(resv,
+ if (!dma_resv_test_signaled_rcu(resv,
arg->flags & VGEM_FENCE_WRITE)) {
ret = -EBUSY;
goto err_fence;
@@ -159,12 +159,12 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
/* Expose the fence via the dma-buf */
ret = 0;
- reservation_object_lock(resv, NULL);
+ dma_resv_lock(resv, NULL);
if (arg->flags & VGEM_FENCE_WRITE)
- reservation_object_add_excl_fence(resv, fence);
- else if ((ret = reservation_object_reserve_shared(resv, 1)) == 0)
- reservation_object_add_shared_fence(resv, fence);
- reservation_object_unlock(resv);
+ dma_resv_add_excl_fence(resv, fence);
+ else if ((ret = dma_resv_reserve_shared(resv, 1)) == 0)
+ dma_resv_add_shared_fence(resv, fence);
+ dma_resv_unlock(resv);
/* Record the fence in our idr for later signaling */
if (ret == 0) {
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 3c430dd65f67..0a88ef11b9d3 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -396,7 +396,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
(vgdev, qobj->hw_res_handle,
vfpriv->ctx_id, offset, args->level,
&box, fence);
- reservation_object_add_excl_fence(qobj->tbo.base.resv,
+ dma_resv_add_excl_fence(qobj->tbo.base.resv,
&fence->f);
dma_fence_put(&fence->f);
@@ -450,7 +450,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
(vgdev, qobj,
vfpriv ? vfpriv->ctx_id : 0, offset,
args->level, &box, fence);
- reservation_object_add_excl_fence(qobj->tbo.base.resv,
+ dma_resv_add_excl_fence(qobj->tbo.base.resv,
&fence->f);
dma_fence_put(&fence->f);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 3dc08f991a8d..a492ac3f4a7e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -212,7 +212,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
0, 0, vgfb->fence);
ret = virtio_gpu_object_reserve(bo, false);
if (!ret) {
- reservation_object_add_excl_fence(bo->tbo.base.resv,
+ dma_resv_add_excl_fence(bo->tbo.base.resv,
&vgfb->fence->f);
dma_fence_put(&vgfb->fence->f);
vgfb->fence = NULL;
diff --git a/drivers/gpu/drm/vmwgfx/ttm_lock.c b/drivers/gpu/drm/vmwgfx/ttm_lock.c
index 16b2083cb9d4..5971c72e6d10 100644
--- a/drivers/gpu/drm/vmwgfx/ttm_lock.c
+++ b/drivers/gpu/drm/vmwgfx/ttm_lock.c
@@ -29,7 +29,6 @@
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
-#include <drm/ttm/ttm_module.h>
#include <linux/atomic.h>
#include <linux/errno.h>
#include <linux/wait.h>
@@ -49,8 +48,6 @@ void ttm_lock_init(struct ttm_lock *lock)
init_waitqueue_head(&lock->queue);
lock->rw = 0;
lock->flags = 0;
- lock->kill_takers = false;
- lock->signal = SIGKILL;
}
void ttm_read_unlock(struct ttm_lock *lock)
@@ -66,11 +63,6 @@ static bool __ttm_read_lock(struct ttm_lock *lock)
bool locked = false;
spin_lock(&lock->lock);
- if (unlikely(lock->kill_takers)) {
- send_sig(lock->signal, current, 0);
- spin_unlock(&lock->lock);
- return false;
- }
if (lock->rw >= 0 && lock->flags == 0) {
++lock->rw;
locked = true;
@@ -98,11 +90,6 @@ static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked)
*locked = false;
spin_lock(&lock->lock);
- if (unlikely(lock->kill_takers)) {
- send_sig(lock->signal, current, 0);
- spin_unlock(&lock->lock);
- return false;
- }
if (lock->rw >= 0 && lock->flags == 0) {
++lock->rw;
block = false;
@@ -147,11 +134,6 @@ static bool __ttm_write_lock(struct ttm_lock *lock)
bool locked = false;
spin_lock(&lock->lock);
- if (unlikely(lock->kill_takers)) {
- send_sig(lock->signal, current, 0);
- spin_unlock(&lock->lock);
- return false;
- }
if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) {
lock->rw = -1;
lock->flags &= ~TTM_WRITE_LOCK_PENDING;
@@ -182,88 +164,6 @@ int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
return ret;
}
-static int __ttm_vt_unlock(struct ttm_lock *lock)
-{
- int ret = 0;
-
- spin_lock(&lock->lock);
- if (unlikely(!(lock->flags & TTM_VT_LOCK)))
- ret = -EINVAL;
- lock->flags &= ~TTM_VT_LOCK;
- wake_up_all(&lock->queue);
- spin_unlock(&lock->lock);
-
- return ret;
-}
-
-static void ttm_vt_lock_remove(struct ttm_base_object **p_base)
-{
- struct ttm_base_object *base = *p_base;
- struct ttm_lock *lock = container_of(base, struct ttm_lock, base);
- int ret;
-
- *p_base = NULL;
- ret = __ttm_vt_unlock(lock);
- BUG_ON(ret != 0);
-}
-
-static bool __ttm_vt_lock(struct ttm_lock *lock)
-{
- bool locked = false;
-
- spin_lock(&lock->lock);
- if (lock->rw == 0) {
- lock->flags &= ~TTM_VT_LOCK_PENDING;
- lock->flags |= TTM_VT_LOCK;
- locked = true;
- } else {
- lock->flags |= TTM_VT_LOCK_PENDING;
- }
- spin_unlock(&lock->lock);
- return locked;
-}
-
-int ttm_vt_lock(struct ttm_lock *lock,
- bool interruptible,
- struct ttm_object_file *tfile)
-{
- int ret = 0;
-
- if (interruptible) {
- ret = wait_event_interruptible(lock->queue,
- __ttm_vt_lock(lock));
- if (unlikely(ret != 0)) {
- spin_lock(&lock->lock);
- lock->flags &= ~TTM_VT_LOCK_PENDING;
- wake_up_all(&lock->queue);
- spin_unlock(&lock->lock);
- return ret;
- }
- } else
- wait_event(lock->queue, __ttm_vt_lock(lock));
-
- /*
- * Add a base-object, the destructor of which will
- * make sure the lock is released if the client dies
- * while holding it.
- */
-
- ret = ttm_base_object_init(tfile, &lock->base, false,
- ttm_lock_type, &ttm_vt_lock_remove, NULL);
- if (ret)
- (void)__ttm_vt_unlock(lock);
- else
- lock->vt_holder = tfile;
-
- return ret;
-}
-
-int ttm_vt_unlock(struct ttm_lock *lock)
-{
- return ttm_ref_object_base_unref(lock->vt_holder,
- lock->base.handle, TTM_REF_USAGE);
-}
-
void ttm_suspend_unlock(struct ttm_lock *lock)
{
spin_lock(&lock->lock);
diff --git a/drivers/gpu/drm/vmwgfx/ttm_lock.h b/drivers/gpu/drm/vmwgfx/ttm_lock.h
index 0c3af9836863..af8b28ca546f 100644
--- a/drivers/gpu/drm/vmwgfx/ttm_lock.h
+++ b/drivers/gpu/drm/vmwgfx/ttm_lock.h
@@ -49,8 +49,8 @@
#ifndef _TTM_LOCK_H_
#define _TTM_LOCK_H_
-#include <linux/wait.h>
#include <linux/atomic.h>
+#include <linux/wait.h>
#include "ttm_object.h"
@@ -63,8 +63,6 @@
* @lock: Spinlock protecting some lock members.
* @rw: Read-write lock counter. Protected by @lock.
* @flags: Lock state. Protected by @lock.
- * @kill_takers: Boolean whether to kill takers of the lock.
- * @signal: Signal to send when kill_takers is true.
*/
struct ttm_lock {
@@ -73,9 +71,6 @@ struct ttm_lock {
spinlock_t lock;
int32_t rw;
uint32_t flags;
- bool kill_takers;
- int signal;
- struct ttm_object_file *vt_holder;
};
@@ -220,29 +215,4 @@ extern void ttm_write_unlock(struct ttm_lock *lock);
*/
extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible);
-/**
- * ttm_lock_set_kill
- *
- * @lock: Pointer to a struct ttm_lock
- * @val: Boolean whether to kill processes taking the lock.
- * @signal: Signal to send to the process taking the lock.
- *
- * The kill-when-taking-lock functionality is used to kill processes that keep
- * on using the TTM functionality when its resources has been taken down, for
- * example when the X server exits. A typical sequence would look like this:
- * - X server takes lock in write mode.
- * - ttm_lock_set_kill() is called with @val set to true.
- * - As part of X server exit, TTM resources are taken down.
- * - X server releases the lock on file release.
- * - Another dri client wants to render, takes the lock and is killed.
- *
- */
-static inline void ttm_lock_set_kill(struct ttm_lock *lock, bool val,
- int signal)
-{
- lock->kill_takers = val;
- if (val)
- lock->signal = signal;
-}
-
#endif
diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.h b/drivers/gpu/drm/vmwgfx/ttm_object.h
index 50d26c7ff42d..ede26df87c93 100644
--- a/drivers/gpu/drm/vmwgfx/ttm_object.h
+++ b/drivers/gpu/drm/vmwgfx/ttm_object.h
@@ -37,11 +37,12 @@
#ifndef _TTM_OBJECT_H_
#define _TTM_OBJECT_H_
-#include <linux/list.h>
-#include <drm/drm_hashtab.h>
+#include <linux/dma-buf.h>
#include <linux/kref.h>
+#include <linux/list.h>
#include <linux/rcupdate.h>
-#include <linux/dma-buf.h>
+
+#include <drm/drm_hashtab.h>
#include <drm/ttm/ttm_memory.h>
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
index f6ab79d23923..cd9805c045cb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
@@ -27,9 +27,10 @@
#ifndef _VMWGFX_BINDING_H_
#define _VMWGFX_BINDING_H_
-#include "device_include/svga3d_reg.h"
#include <linux/list.h>
+#include "device_include/svga3d_reg.h"
+
#define VMW_MAX_VIEW_BINDINGS 128
struct vmw_private;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
index 6c01ad2785dd..bb46ca0c458f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -459,9 +459,9 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
/* Buffer objects need to be either pinned or reserved: */
if (!(dst->mem.placement & TTM_PL_FLAG_NO_EVICT))
- reservation_object_assert_held(dst->base.resv);
+ dma_resv_assert_held(dst->base.resv);
if (!(src->mem.placement & TTM_PL_FLAG_NO_EVICT))
- reservation_object_assert_held(src->base.resv);
+ dma_resv_assert_held(src->base.resv);
if (dst->ttm->state == tt_unpopulated) {
ret = dst->ttm->bdev->driver->ttm_tt_populate(dst->ttm, &ctx);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 369034c0de31..aad8d8140259 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -28,7 +28,6 @@
#include <drm/ttm/ttm_placement.h>
-#include <drm/drmP.h>
#include "vmwgfx_drv.h"
#include "ttm_object.h"
@@ -342,7 +341,7 @@ void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
uint32_t old_mem_type = bo->mem.mem_type;
int ret;
- reservation_object_assert_held(bo->base.resv);
+ dma_resv_assert_held(bo->base.resv);
if (pin) {
if (vbo->pin_count++ > 0)
@@ -510,6 +509,8 @@ int vmw_bo_init(struct vmw_private *dev_priv,
acc_size = vmw_bo_acc_size(dev_priv, size, user);
memset(vmw_bo, 0, sizeof(*vmw_bo));
+ BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
+ vmw_bo->base.priority = 3;
INIT_LIST_HEAD(&vmw_bo->res_list);
@@ -689,7 +690,7 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
long lret;
- lret = reservation_object_wait_timeout_rcu
+ lret = dma_resv_wait_timeout_rcu
(bo->base.resv, true, true,
nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
if (!lret)
@@ -1007,10 +1008,10 @@ void vmw_bo_fence_single(struct ttm_buffer_object *bo,
if (fence == NULL) {
vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
- reservation_object_add_excl_fence(bo->base.resv, &fence->base);
+ dma_resv_add_excl_fence(bo->base.resv, &fence->base);
dma_fence_put(&fence->base);
} else
- reservation_object_add_excl_fence(bo->base.resv, &fence->base);
+ dma_resv_add_excl_fence(bo->base.resv, &fence->base);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 56979e412ca8..065015d2a8f6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -25,6 +25,9 @@
*
**************************************************************************/
+#include <linux/dmapool.h>
+#include <linux/pci.h>
+
#include <drm/ttm/ttm_bo_api.h>
#include "vmwgfx_drv.h"
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 63f111068a44..a56c9d802382 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -88,6 +88,8 @@ static const struct vmw_res_func vmw_gb_context_func = {
.res_type = vmw_res_context,
.needs_backup = true,
.may_evict = true,
+ .prio = 3,
+ .dirty_prio = 3,
.type_name = "guest backed contexts",
.backup_placement = &vmw_mob_placement,
.create = vmw_gb_context_create,
@@ -100,6 +102,8 @@ static const struct vmw_res_func vmw_dx_context_func = {
.res_type = vmw_res_dx_context,
.needs_backup = true,
.may_evict = true,
+ .prio = 3,
+ .dirty_prio = 3,
.type_name = "dx contexts",
.backup_placement = &vmw_mob_placement,
.create = vmw_dx_context_create,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index 7984f172ec4a..3ca5cf375b01 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -116,6 +116,8 @@ static const struct vmw_res_func vmw_cotable_func = {
.res_type = vmw_res_cotable,
.needs_backup = true,
.may_evict = true,
+ .prio = 3,
+ .dirty_prio = 3,
.type_name = "context guest backed object tables",
.backup_placement = &vmw_mob_placement,
.create = vmw_cotable_create,
@@ -169,7 +171,7 @@ static int vmw_cotable_unscrub(struct vmw_resource *res)
} *cmd;
WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
- reservation_object_assert_held(bo->base.resv);
+ dma_resv_assert_held(bo->base.resv);
cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
if (!cmd)
@@ -307,11 +309,11 @@ static int vmw_cotable_unbind(struct vmw_resource *res,
struct ttm_buffer_object *bo = val_buf->bo;
struct vmw_fence_obj *fence;
- if (list_empty(&res->mob_head))
+ if (!vmw_resource_mob_attached(res))
return 0;
WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
- reservation_object_assert_held(bo->base.resv);
+ dma_resv_assert_held(bo->base.resv);
mutex_lock(&dev_priv->binding_mutex);
if (!vcotbl->scrubbed)
@@ -453,6 +455,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
goto out_wait;
}
+ vmw_resource_mob_detach(res);
res->backup = buf;
res->backup_size = new_size;
vcotbl->size_read_back = cur_size_read_back;
@@ -467,12 +470,12 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
res->backup = old_buf;
res->backup_size = old_size;
vcotbl->size_read_back = old_size_read_back;
+ vmw_resource_mob_attach(res);
goto out_wait;
}
+ vmw_resource_mob_attach(res);
/* Let go of the old mob. */
- list_del(&res->mob_head);
- list_add_tail(&res->mob_head, &buf->res_list);
vmw_bo_unreference(&old_buf);
res->id = vcotbl->type;
@@ -496,7 +499,7 @@ out_wait:
* is called before bind() in the validation sequence is instead used for two
* things.
* 1) Unscrub the cotable if it is scrubbed and still attached to a backup
- * buffer, that is, if @res->mob_head is non-empty.
+ * buffer.
* 2) Resize the cotable if needed.
*/
static int vmw_cotable_create(struct vmw_resource *res)
@@ -512,7 +515,7 @@ static int vmw_cotable_create(struct vmw_resource *res)
new_size *= 2;
if (likely(new_size <= res->backup_size)) {
- if (vcotbl->scrubbed && !list_empty(&res->mob_head)) {
+ if (vcotbl->scrubbed && vmw_resource_mob_attached(res)) {
ret = vmw_cotable_unscrub(res);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index cd0d49d8a8da..b38bcb032c99 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -24,17 +24,22 @@
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
-#include <linux/module.h>
+
#include <linux/console.h>
#include <linux/dma-mapping.h>
+#include <linux/module.h>
-#include <drm/drmP.h>
-#include "vmwgfx_drv.h"
-#include "vmwgfx_binding.h"
-#include "ttm_object.h"
-#include <drm/ttm/ttm_placement.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_pci.h>
+#include <drm/drm_sysfs.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_module.h>
+#include <drm/ttm/ttm_placement.h>
+
+#include "ttm_object.h"
+#include "vmwgfx_binding.h"
+#include "vmwgfx_drv.h"
#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
#define VMWGFX_CHIP_SVGAII 0
@@ -254,7 +259,6 @@ static int vmw_restrict_dma_mask;
static int vmw_assume_16bpp;
static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
-static void vmw_master_init(struct vmw_master *);
static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
void *ptr);
@@ -764,10 +768,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
dev_priv->mmio_start, dev_priv->mmio_size / 1024);
- vmw_master_init(&dev_priv->fbdev_master);
- ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
- dev_priv->active_master = &dev_priv->fbdev_master;
-
dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
dev_priv->mmio_size, MEMREMAP_WB);
@@ -1009,18 +1009,7 @@ static void vmw_driver_unload(struct drm_device *dev)
static void vmw_postclose(struct drm_device *dev,
struct drm_file *file_priv)
{
- struct vmw_fpriv *vmw_fp;
-
- vmw_fp = vmw_fpriv(file_priv);
-
- if (vmw_fp->locked_master) {
- struct vmw_master *vmaster =
- vmw_master(vmw_fp->locked_master);
-
- ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
- ttm_vt_unlock(&vmaster->lock);
- drm_master_put(&vmw_fp->locked_master);
- }
+ struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
ttm_object_file_release(&vmw_fp->tfile);
kfree(vmw_fp);
@@ -1049,55 +1038,6 @@ out_no_tfile:
return ret;
}
-static struct vmw_master *vmw_master_check(struct drm_device *dev,
- struct drm_file *file_priv,
- unsigned int flags)
-{
- int ret;
- struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
- struct vmw_master *vmaster;
-
- if (!drm_is_primary_client(file_priv) || !(flags & DRM_AUTH))
- return NULL;
-
- ret = mutex_lock_interruptible(&dev->master_mutex);
- if (unlikely(ret != 0))
- return ERR_PTR(-ERESTARTSYS);
-
- if (drm_is_current_master(file_priv)) {
- mutex_unlock(&dev->master_mutex);
- return NULL;
- }
-
- /*
- * Check if we were previously master, but now dropped. In that
- * case, allow at least render node functionality.
- */
- if (vmw_fp->locked_master) {
- mutex_unlock(&dev->master_mutex);
-
- if (flags & DRM_RENDER_ALLOW)
- return NULL;
-
- DRM_ERROR("Dropped master trying to access ioctl that "
- "requires authentication.\n");
- return ERR_PTR(-EACCES);
- }
- mutex_unlock(&dev->master_mutex);
-
- /*
- * Take the TTM lock. Possibly sleep waiting for the authenticating
- * master to become master again, or for a SIGTERM if the
- * authenticating master exits.
- */
- vmaster = vmw_master(file_priv->master);
- ret = ttm_read_lock(&vmaster->lock, true);
- if (unlikely(ret != 0))
- vmaster = ERR_PTR(ret);
-
- return vmaster;
-}
-
static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg,
long (*ioctl_func)(struct file *, unsigned int,
@@ -1106,9 +1046,7 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
struct drm_file *file_priv = filp->private_data;
struct drm_device *dev = file_priv->minor->dev;
unsigned int nr = DRM_IOCTL_NR(cmd);
- struct vmw_master *vmaster;
unsigned int flags;
- long ret;
/*
* Do extra checking on driver private ioctls.
@@ -1134,21 +1072,7 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
} else if (!drm_ioctl_flags(nr, &flags))
return -EINVAL;
- vmaster = vmw_master_check(dev, file_priv, flags);
- if (IS_ERR(vmaster)) {
- ret = PTR_ERR(vmaster);
-
- if (ret != -ERESTARTSYS)
- DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n",
- nr, ret);
- return ret;
- }
-
- ret = ioctl_func(filp, cmd, arg);
- if (vmaster)
- ttm_read_unlock(&vmaster->lock);
-
- return ret;
+ return ioctl_func(filp, cmd, arg);
out_io_encoding:
DRM_ERROR("Invalid command format, ioctl %d\n",
@@ -1171,65 +1095,10 @@ static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
}
#endif
-static void vmw_master_init(struct vmw_master *vmaster)
-{
- ttm_lock_init(&vmaster->lock);
-}
-
-static int vmw_master_create(struct drm_device *dev,
- struct drm_master *master)
-{
- struct vmw_master *vmaster;
-
- vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
- if (unlikely(!vmaster))
- return -ENOMEM;
-
- vmw_master_init(vmaster);
- ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
- master->driver_priv = vmaster;
-
- return 0;
-}
-
-static void vmw_master_destroy(struct drm_device *dev,
- struct drm_master *master)
-{
- struct vmw_master *vmaster = vmw_master(master);
-
- master->driver_priv = NULL;
- kfree(vmaster);
-}
-
static int vmw_master_set(struct drm_device *dev,
struct drm_file *file_priv,
bool from_open)
{
- struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
- struct vmw_master *active = dev_priv->active_master;
- struct vmw_master *vmaster = vmw_master(file_priv->master);
- int ret = 0;
-
- if (active) {
- BUG_ON(active != &dev_priv->fbdev_master);
- ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
- if (unlikely(ret != 0))
- return ret;
-
- ttm_lock_set_kill(&active->lock, true, SIGTERM);
- dev_priv->active_master = NULL;
- }
-
- ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
- if (!from_open) {
- ttm_vt_unlock(&vmaster->lock);
- BUG_ON(vmw_fp->locked_master != file_priv->master);
- drm_master_put(&vmw_fp->locked_master);
- }
-
- dev_priv->active_master = vmaster;
-
/*
* Inform a new master that the layout may have changed while
* it was gone.
@@ -1244,31 +1113,10 @@ static void vmw_master_drop(struct drm_device *dev,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
- struct vmw_master *vmaster = vmw_master(file_priv->master);
- int ret;
-
- /**
- * Make sure the master doesn't disappear while we have
- * it locked.
- */
- vmw_fp->locked_master = drm_master_get(file_priv->master);
- ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
vmw_kms_legacy_hotspot_clear(dev_priv);
- if (unlikely((ret != 0))) {
- DRM_ERROR("Unable to lock TTM at VT switch.\n");
- drm_master_put(&vmw_fp->locked_master);
- }
-
- ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
-
if (!dev_priv->enable_fb)
vmw_svga_disable(dev_priv);
-
- dev_priv->active_master = &dev_priv->fbdev_master;
- ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
- ttm_vt_unlock(&dev_priv->fbdev_master.lock);
}
/**
@@ -1546,8 +1394,6 @@ static struct drm_driver driver = {
.disable_vblank = vmw_disable_vblank,
.ioctls = vmw_ioctls,
.num_ioctls = ARRAY_SIZE(vmw_ioctls),
- .master_create = vmw_master_create,
- .master_destroy = vmw_master_destroy,
.master_set = vmw_master_set,
.master_drop = vmw_master_drop,
.open = vmw_driver_open,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index dbb04dbcf478..5eb73ded8e07 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -28,20 +28,32 @@
#ifndef _VMWGFX_DRV_H_
#define _VMWGFX_DRV_H_
-#include "vmwgfx_validation.h"
-#include "vmwgfx_reg.h"
-#include <drm/drmP.h>
-#include <drm/vmwgfx_drm.h>
-#include <drm/drm_hashtab.h>
-#include <drm/drm_auth.h>
#include <linux/suspend.h>
+#include <linux/sync_file.h>
+
+#include <drm/drm_auth.h>
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+#include <drm/drm_hashtab.h>
+#include <drm/drm_rect.h>
+
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_module.h>
-#include "vmwgfx_fence.h"
-#include "ttm_object.h"
+
#include "ttm_lock.h"
-#include <linux/sync_file.h>
+#include "ttm_object.h"
+
+#include "vmwgfx_fence.h"
+#include "vmwgfx_reg.h"
+#include "vmwgfx_validation.h"
+
+/*
+ * FIXME: vmwgfx_drm.h needs to be last due to dependencies.
+ * uapi headers should not depend on header files outside uapi/.
+ */
+#include <drm/vmwgfx_drm.h>
+
#define VMWGFX_DRIVER_NAME "vmwgfx"
#define VMWGFX_DRIVER_DATE "20180704"
@@ -81,11 +93,19 @@
#define VMW_RES_SHADER ttm_driver_type4
struct vmw_fpriv {
- struct drm_master *locked_master;
struct ttm_object_file *tfile;
bool gb_aware; /* user-space is guest-backed aware */
};
+/**
+ * struct vmw_buffer_object - TTM buffer object with vmwgfx additions
+ * @base: The TTM buffer object
+ * @res_list: List of resources using this buffer object as a backing MOB
+ * @pin_count: pin depth
+ * @dx_query_ctx: DX context if this buffer object is used as a DX query MOB
+ * @map: Kmap object for semi-persistent mappings
+ * @res_prios: Eviction priority counts for attached resources
+ */
struct vmw_buffer_object {
struct ttm_buffer_object base;
struct list_head res_list;
@@ -94,6 +114,7 @@ struct vmw_buffer_object {
struct vmw_resource *dx_query_ctx;
/* Protected by reservation */
struct ttm_bo_kmap_obj map;
+ u32 res_prios[TTM_MAX_BO_PRIORITY];
};
/**
@@ -145,6 +166,7 @@ struct vmw_resource {
struct kref kref;
struct vmw_private *dev_priv;
int id;
+ u32 used_prio;
unsigned long backup_size;
bool res_dirty;
bool backup_dirty;
@@ -376,10 +398,6 @@ struct vmw_sw_context{
struct vmw_legacy_display;
struct vmw_overlay;
-struct vmw_master {
- struct ttm_lock lock;
-};
-
struct vmw_vga_topology_state {
uint32_t width;
uint32_t height;
@@ -537,11 +555,8 @@ struct vmw_private {
spinlock_t svga_lock;
/**
- * Master management.
+ * PM management.
*/
-
- struct vmw_master *active_master;
- struct vmw_master fbdev_master;
struct notifier_block pm_nb;
bool refuse_hibernation;
bool suspend_locked;
@@ -607,11 +622,6 @@ static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
return (struct vmw_fpriv *)file_priv->driver_priv;
}
-static inline struct vmw_master *vmw_master(struct drm_master *master)
-{
- return (struct vmw_master *) master->driver_priv;
-}
-
/*
* The locking here is fine-grained, so that it is performed once
* for every read- and write operation. This is of course costly, but we
@@ -704,6 +714,19 @@ extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
extern int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob);
extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo);
+void vmw_resource_mob_attach(struct vmw_resource *res);
+void vmw_resource_mob_detach(struct vmw_resource *res);
+
+/**
+ * vmw_resource_mob_attached - Whether a resource currently has a mob attached
+ * @res: The resource
+ *
+ * Return: true if the resource has a mob attached, false otherwise.
+ */
+static inline bool vmw_resource_mob_attached(const struct vmw_resource *res)
+{
+ return !list_empty(&res->mob_head);
+}
/**
* vmw_user_resource_noref_release - release a user resource pointer looked up
@@ -782,6 +805,54 @@ static inline void vmw_user_bo_noref_release(void)
ttm_base_object_noref_release();
}
+/**
+ * vmw_bo_adjust_prio - Adjust the buffer object eviction priority
+ * according to attached resources
+ * @vbo: The struct vmw_buffer_object
+ */
+static inline void vmw_bo_prio_adjust(struct vmw_buffer_object *vbo)
+{
+ int i = ARRAY_SIZE(vbo->res_prios);
+
+ while (i--) {
+ if (vbo->res_prios[i]) {
+ vbo->base.priority = i;
+ return;
+ }
+ }
+
+ vbo->base.priority = 3;
+}
+
+/**
+ * vmw_bo_prio_add - Notify a buffer object of a newly attached resource
+ * eviction priority
+ * @vbo: The struct vmw_buffer_object
+ * @prio: The resource priority
+ *
+ * After being notified, the code assigns the highest resource eviction priority
+ * to the backing buffer object (mob).
+ */
+static inline void vmw_bo_prio_add(struct vmw_buffer_object *vbo, int prio)
+{
+ if (vbo->res_prios[prio]++ == 0)
+ vmw_bo_prio_adjust(vbo);
+}
+
+/**
+ * vmw_bo_prio_del - Notify a buffer object of a resource with a certain
+ * priority being removed
+ * @vbo: The struct vmw_buffer_object
+ * @prio: The resource priority
+ *
+ * After being notified, the code assigns the highest resource eviction priority
+ * to the backing buffer object (mob).
+ */
+static inline void vmw_bo_prio_del(struct vmw_buffer_object *vbo, int prio)
+{
+ if (--vbo->res_prios[prio] == 0)
+ vmw_bo_prio_adjust(vbo);
+}
/**
* Misc Ioctl functionality - vmwgfx_ioctl.c
@@ -1011,7 +1082,6 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
int vmw_kms_write_svga(struct vmw_private *vmw_priv,
unsigned width, unsigned height, unsigned pitch,
unsigned bpp, unsigned depth);
-void vmw_kms_idle_workqueues(struct vmw_master *vmaster);
bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
uint32_t pitch,
uint32_t height);
@@ -1334,6 +1404,14 @@ int vmw_host_log(const char *log);
DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)
/**
+ * VMW_DEBUG_KMS - Debug output for kernel mode-setting
+ *
+ * This macro is for debugging vmwgfx mode-setting code.
+ */
+#define VMW_DEBUG_KMS(fmt, ...) \
+ DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)
+
+/**
* Inline helper functions
*/
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 972e8fda6d35..ea29953e0b08 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -26,14 +26,14 @@
*
**************************************************************************/
-#include <linux/export.h>
+#include <linux/pci.h>
+
+#include <drm/drm_fourcc.h>
+#include <drm/ttm/ttm_placement.h>
-#include <drm/drmP.h>
#include "vmwgfx_drv.h"
#include "vmwgfx_kms.h"
-#include <drm/ttm/ttm_placement.h>
-
#define VMW_DIRTY_DELAY (HZ / 30)
struct vmw_fb_par {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 301260e23e52..178a6cd1a06f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -25,7 +25,8 @@
*
**************************************************************************/
-#include <drm/drmP.h>
+#include <linux/sched/signal.h>
+
#include "vmwgfx_drv.h"
#define VMW_FENCE_WRAP (1 << 31)
@@ -184,6 +185,9 @@ static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
spin_lock(f->lock);
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
+ goto out;
+
if (intr && signal_pending(current)) {
ret = -ERESTARTSYS;
goto out;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
index c9382933c2b9..50e9fdd7acf1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
@@ -32,8 +32,11 @@
#define VMW_FENCE_WAIT_TIMEOUT (5*HZ)
-struct vmw_private;
+struct drm_device;
+struct drm_file;
+struct drm_pending_event;
+struct vmw_private;
struct vmw_fence_manager;
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index ff3586cb6851..e5252ef3812f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -25,10 +25,12 @@
*
**************************************************************************/
-#include "vmwgfx_drv.h"
-#include <drm/drmP.h>
+#include <linux/sched/signal.h>
+
#include <drm/ttm/ttm_placement.h>
+#include "vmwgfx_drv.h"
+
struct vmw_temp_set_context {
SVGA3dCmdHeader header;
SVGA3dCmdDXTempSetContext body;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index ae7acc6f3dda..83c0d5a3e4fd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -25,10 +25,10 @@
*
**************************************************************************/
-#include "vmwgfx_drv.h"
-#include <drm/drmP.h>
#include <drm/ttm/ttm_bo_driver.h>
+#include "vmwgfx_drv.h"
+
#define VMW_PPN_SIZE (sizeof(unsigned long))
/* A future safe maximum remap size. */
#define VMW_PPN_PER_REMAP ((31 * 1024) / VMW_PPN_SIZE)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index c3ad4478266b..75f3efee21a4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -25,7 +25,8 @@
*
**************************************************************************/
-#include <drm/drmP.h>
+#include <linux/sched/signal.h>
+
#include "vmwgfx_drv.h"
#define VMW_FENCE_WRAP (1 << 24)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 34284f0f5084..f47d5710cc95 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -25,12 +25,16 @@
*
**************************************************************************/
-#include "vmwgfx_kms.h"
-#include <drm/drm_plane_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_rect.h>
#include <drm/drm_damage_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_rect.h>
+#include <drm/drm_sysfs.h>
+#include <drm/drm_vblank.h>
+
+#include "vmwgfx_kms.h"
/* Might need a hrtimer here? */
#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
@@ -1462,7 +1466,7 @@ static int vmw_kms_check_display_memory(struct drm_device *dev,
if (dev_priv->active_display_unit == vmw_du_screen_target &&
(drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
- DRM_ERROR("Screen size not supported.\n");
+ VMW_DEBUG_KMS("Screen size not supported.\n");
return -EINVAL;
}
@@ -1486,7 +1490,7 @@ static int vmw_kms_check_display_memory(struct drm_device *dev,
* limit on primary bounding box
*/
if (pixel_mem > dev_priv->prim_bb_mem) {
- DRM_ERROR("Combined output size too large.\n");
+ VMW_DEBUG_KMS("Combined output size too large.\n");
return -EINVAL;
}
@@ -1496,7 +1500,7 @@ static int vmw_kms_check_display_memory(struct drm_device *dev,
bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4;
if (bb_mem > dev_priv->prim_bb_mem) {
- DRM_ERROR("Topology is beyond supported limits.\n");
+ VMW_DEBUG_KMS("Topology is beyond supported limits.\n");
return -EINVAL;
}
}
@@ -1645,6 +1649,7 @@ static int vmw_kms_check_topology(struct drm_device *dev,
struct vmw_connector_state *vmw_conn_state;
if (!du->pref_active && new_crtc_state->enable) {
+ VMW_DEBUG_KMS("Enabling a disabled display unit\n");
ret = -EINVAL;
goto clean;
}
@@ -1701,8 +1706,10 @@ vmw_kms_atomic_check_modeset(struct drm_device *dev,
return ret;
ret = vmw_kms_check_implicit(dev, state);
- if (ret)
+ if (ret) {
+ VMW_DEBUG_KMS("Invalid implicit state\n");
return ret;
+ }
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
if (drm_atomic_crtc_needs_modeset(crtc_state))
@@ -2339,6 +2346,9 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
if (!arg->num_outputs) {
struct drm_rect def_rect = {0, 0, 800, 600};
+ VMW_DEBUG_KMS("Default layout x1 = %d y1 = %d x2 = %d y2 = %d\n",
+ def_rect.x1, def_rect.y1,
+ def_rect.x2, def_rect.y2);
vmw_du_update_layout(dev_priv, 1, &def_rect);
return 0;
}
@@ -2359,6 +2369,7 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
drm_rects = (struct drm_rect *)rects;
+ VMW_DEBUG_KMS("Layout count = %u\n", arg->num_outputs);
for (i = 0; i < arg->num_outputs; i++) {
struct drm_vmw_rect curr_rect;
@@ -2375,6 +2386,10 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
drm_rects[i].x2 = curr_rect.x + curr_rect.w;
drm_rects[i].y2 = curr_rect.y + curr_rect.h;
+ VMW_DEBUG_KMS(" x1 = %d y1 = %d x2 = %d y2 = %d\n",
+ drm_rects[i].x1, drm_rects[i].y1,
+ drm_rects[i].x2, drm_rects[i].y2);
+
/*
* Currently this check is limiting the topology within
* mode_config->max (which actually is max texture size
@@ -2385,7 +2400,9 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
if (drm_rects[i].x1 < 0 || drm_rects[i].y1 < 0 ||
drm_rects[i].x2 > mode_config->max_width ||
drm_rects[i].y2 > mode_config->max_height) {
- DRM_ERROR("Invalid GUI layout.\n");
+ VMW_DEBUG_KMS("Invalid layout %d %d %d %d\n",
+ drm_rects[i].x1, drm_rects[i].y1,
+ drm_rects[i].x2, drm_rects[i].y2);
ret = -EINVAL;
goto out_free;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 535b03599e55..3ee03227607c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -28,9 +28,9 @@
#ifndef VMWGFX_KMS_H_
#define VMWGFX_KMS_H_
-#include <drm/drmP.h>
#include <drm/drm_encoder.h>
#include <drm/drm_probe_helper.h>
+
#include "vmwgfx_drv.h"
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 25e6343bcf21..5702219ec38f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -25,11 +25,13 @@
*
**************************************************************************/
-#include "vmwgfx_kms.h"
-#include <drm/drm_plane_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_vblank.h>
+#include "vmwgfx_kms.h"
#define vmw_crtc_to_ldu(x) \
container_of(x, struct vmw_legacy_display_unit, base.crtc)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index 406edc8cef35..0a6bbac00896 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -25,6 +25,8 @@
*
**************************************************************************/
+#include <linux/highmem.h>
+
#include "vmwgfx_drv.h"
/*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index e4e09d47c5c0..81a86c3b77bc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -24,17 +24,16 @@
*
*/
-
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
#include <linux/frame.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
#include <asm/hypervisor.h>
-#include <drm/drmP.h>
+
#include "vmwgfx_drv.h"
#include "vmwgfx_msg.h"
-
#define MESSAGE_STATUS_SUCCESS 0x0001
#define MESSAGE_STATUS_DORECV 0x0002
#define MESSAGE_STATUS_CPT 0x0010
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index d5ef8cf802de..fdb52f6d29fb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -25,15 +25,13 @@
*
**************************************************************************/
-
-#include <drm/drmP.h>
-#include "vmwgfx_drv.h"
-
#include <drm/ttm/ttm_placement.h>
#include "device_include/svga_overlay.h"
#include "device_include/svga_escape.h"
+#include "vmwgfx_drv.h"
+
#define VMW_MAX_NUM_STREAMS 1
#define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 701643b7b0c4..5581a7826b4c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -25,15 +25,44 @@
*
**************************************************************************/
-#include "vmwgfx_drv.h"
-#include <drm/vmwgfx_drm.h>
#include <drm/ttm/ttm_placement.h>
-#include <drm/drmP.h>
+
#include "vmwgfx_resource_priv.h"
#include "vmwgfx_binding.h"
+#include "vmwgfx_drv.h"
#define VMW_RES_EVICT_ERR_COUNT 10
+/**
+ * vmw_resource_mob_attach - Mark a resource as attached to its backing mob
+ * @res: The resource
+ */
+void vmw_resource_mob_attach(struct vmw_resource *res)
+{
+ struct vmw_buffer_object *backup = res->backup;
+
+ dma_resv_assert_held(res->backup->base.base.resv);
+ res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
+ res->func->prio;
+ list_add_tail(&res->mob_head, &backup->res_list);
+ vmw_bo_prio_add(backup, res->used_prio);
+}
+
+/**
+ * vmw_resource_mob_detach - Mark a resource as detached from its backing mob
+ * @res: The resource
+ */
+void vmw_resource_mob_detach(struct vmw_resource *res)
+{
+ struct vmw_buffer_object *backup = res->backup;
+
+ dma_resv_assert_held(backup->base.base.resv);
+ if (vmw_resource_mob_attached(res)) {
+ list_del_init(&res->mob_head);
+ vmw_bo_prio_del(backup, res->used_prio);
+ }
+}
+
struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
{
kref_get(&res->kref);
@@ -80,7 +109,7 @@ static void vmw_resource_release(struct kref *kref)
struct ttm_buffer_object *bo = &res->backup->base;
ttm_bo_reserve(bo, false, false, NULL);
- if (!list_empty(&res->mob_head) &&
+ if (vmw_resource_mob_attached(res) &&
res->func->unbind != NULL) {
struct ttm_validate_buffer val_buf;
@@ -89,7 +118,7 @@ static void vmw_resource_release(struct kref *kref)
res->func->unbind(res, false, &val_buf);
}
res->backup_dirty = false;
- list_del_init(&res->mob_head);
+ vmw_resource_mob_detach(res);
ttm_bo_unreserve(bo);
vmw_bo_unreference(&res->backup);
}
@@ -179,6 +208,7 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
res->backup_offset = 0;
res->backup_dirty = false;
res->res_dirty = false;
+ res->used_prio = 3;
if (delay_id)
return 0;
else
@@ -355,14 +385,14 @@ static int vmw_resource_do_validate(struct vmw_resource *res,
}
if (func->bind &&
- ((func->needs_backup && list_empty(&res->mob_head) &&
+ ((func->needs_backup && !vmw_resource_mob_attached(res) &&
val_buf->bo != NULL) ||
(!func->needs_backup && val_buf->bo != NULL))) {
ret = func->bind(res, val_buf);
if (unlikely(ret != 0))
goto out_bind_failed;
if (func->needs_backup)
- list_add_tail(&res->mob_head, &res->backup->res_list);
+ vmw_resource_mob_attach(res);
}
return 0;
@@ -402,15 +432,13 @@ void vmw_resource_unreserve(struct vmw_resource *res,
if (switch_backup && new_backup != res->backup) {
if (res->backup) {
- reservation_object_assert_held(res->backup->base.base.resv);
- list_del_init(&res->mob_head);
+ vmw_resource_mob_detach(res);
vmw_bo_unreference(&res->backup);
}
if (new_backup) {
res->backup = vmw_bo_reference(new_backup);
- reservation_object_assert_held(new_backup->base.base.resv);
- list_add_tail(&res->mob_head, &new_backup->res_list);
+ vmw_resource_mob_attach(res);
} else {
res->backup = NULL;
}
@@ -469,7 +497,7 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
if (unlikely(ret != 0))
goto out_no_reserve;
- if (res->func->needs_backup && list_empty(&res->mob_head))
+ if (res->func->needs_backup && !vmw_resource_mob_attached(res))
return 0;
backup_dirty = res->backup_dirty;
@@ -574,11 +602,11 @@ static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
return ret;
if (unlikely(func->unbind != NULL &&
- (!func->needs_backup || !list_empty(&res->mob_head)))) {
+ (!func->needs_backup || vmw_resource_mob_attached(res)))) {
ret = func->unbind(res, res->res_dirty, &val_buf);
if (unlikely(ret != 0))
goto out_no_unbind;
- list_del_init(&res->mob_head);
+ vmw_resource_mob_detach(res);
}
ret = func->destroy(res);
res->backup_dirty = true;
@@ -660,7 +688,7 @@ int vmw_resource_validate(struct vmw_resource *res, bool intr)
if (unlikely(ret != 0))
goto out_no_validate;
else if (!res->func->needs_backup && res->backup) {
- list_del_init(&res->mob_head);
+ WARN_ON_ONCE(vmw_resource_mob_attached(res));
vmw_bo_unreference(&res->backup);
}
@@ -691,7 +719,7 @@ void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
.num_shared = 0
};
- reservation_object_assert_held(vbo->base.base.resv);
+ dma_resv_assert_held(vbo->base.base.resv);
list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) {
if (!res->func->unbind)
continue;
@@ -699,7 +727,7 @@ void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
(void) res->func->unbind(res, res->res_dirty, &val_buf);
res->backup_dirty = true;
res->res_dirty = false;
- list_del_init(&res->mob_head);
+ vmw_resource_mob_detach(res);
}
(void) ttm_bo_wait(&vbo->base, false, false);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
index 7e19eba0b0b8..984e588c62ca 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
@@ -78,6 +78,8 @@ struct vmw_res_func {
const char *type_name;
struct ttm_placement *backup_placement;
bool may_evict;
+ u32 prio;
+ u32 dirty_prio;
int (*create) (struct vmw_resource *res);
int (*destroy) (struct vmw_resource *res);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 9a2a3836d89a..e5a283263211 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -25,12 +25,14 @@
*
**************************************************************************/
-#include "vmwgfx_kms.h"
-#include <drm/drm_plane_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_vblank.h>
+#include "vmwgfx_kms.h"
#define vmw_crtc_to_sou(x) \
container_of(x, struct vmw_screen_object_unit, base.crtc)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index d310d21f0d54..e139fdfd1635 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -95,6 +95,8 @@ static const struct vmw_res_func vmw_gb_shader_func = {
.res_type = vmw_res_shader,
.needs_backup = true,
.may_evict = true,
+ .prio = 3,
+ .dirty_prio = 3,
.type_name = "guest backed shaders",
.backup_placement = &vmw_mob_placement,
.create = vmw_gb_shader_create,
@@ -106,7 +108,9 @@ static const struct vmw_res_func vmw_gb_shader_func = {
static const struct vmw_res_func vmw_dx_shader_func = {
.res_type = vmw_res_shader,
.needs_backup = true,
- .may_evict = false,
+ .may_evict = true,
+ .prio = 3,
+ .dirty_prio = 3,
.type_name = "dx shaders",
.backup_placement = &vmw_mob_placement,
.create = vmw_dx_shader_create,
@@ -423,7 +427,7 @@ static int vmw_dx_shader_create(struct vmw_resource *res)
WARN_ON_ONCE(!shader->committed);
- if (!list_empty(&res->mob_head)) {
+ if (vmw_resource_mob_attached(res)) {
mutex_lock(&dev_priv->binding_mutex);
ret = vmw_dx_shader_unscrub(res);
mutex_unlock(&dev_priv->binding_mutex);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index f803bb5e782b..41a96fb49835 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -25,12 +25,15 @@
*
******************************************************************************/
-#include "vmwgfx_kms.h"
-#include "device_include/svga3d_surfacedefs.h"
-#include <drm/drm_plane_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "vmwgfx_kms.h"
+#include "device_include/svga3d_surfacedefs.h"
#define vmw_crtc_to_stdu(x) \
container_of(x, struct vmw_screen_target_display_unit, base.crtc)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 3a6da3b66484..29d8794f0421 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -112,6 +112,8 @@ static const struct vmw_res_func vmw_legacy_surface_func = {
.res_type = vmw_res_surface,
.needs_backup = false,
.may_evict = true,
+ .prio = 1,
+ .dirty_prio = 1,
.type_name = "legacy surfaces",
.backup_placement = &vmw_srf_placement,
.create = &vmw_legacy_srf_create,
@@ -124,6 +126,8 @@ static const struct vmw_res_func vmw_gb_surface_func = {
.res_type = vmw_res_surface,
.needs_backup = true,
.may_evict = true,
+ .prio = 1,
+ .dirty_prio = 2,
.type_name = "guest backed surfaces",
.backup_placement = &vmw_mob_placement,
.create = vmw_gb_surface_create,
@@ -915,12 +919,6 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
if (unlikely(drm_is_render_client(file_priv)))
require_exist = true;
- if (READ_ONCE(vmw_fpriv(file_priv)->locked_master)) {
- DRM_ERROR("Locked master refused legacy "
- "surface reference.\n");
- return -EACCES;
- }
-
handle = u_handle;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index 8bafa6eac5a8..5a7b8bb420de 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -25,7 +25,6 @@
*
**************************************************************************/
-#include <drm/drmP.h>
#include "vmwgfx_drv.h"
int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
index 1d2322ad6fd5..0e063743dd86 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
@@ -28,9 +28,10 @@
#ifndef _VMWGFX_VALIDATION_H_
#define _VMWGFX_VALIDATION_H_
-#include <drm/drm_hashtab.h>
#include <linux/list.h>
#include <linux/ww_mutex.h>
+
+#include <drm/drm_hashtab.h>
#include <drm/ttm/ttm_execbuf_util.h>
#define VMW_RES_DIRTY_NONE 0
diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.c b/drivers/gpu/drm/xen/xen_drm_front_kms.c
index de990036199d..21ad1c359b61 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_kms.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_kms.c
@@ -46,7 +46,7 @@ static void fb_destroy(struct drm_framebuffer *fb)
drm_gem_fb_destroy(fb);
}
-static struct drm_framebuffer_funcs fb_funcs = {
+static const struct drm_framebuffer_funcs fb_funcs = {
.destroy = fb_destroy,
};
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 6386e2fe2ff7..ee2a025e54cf 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -113,13 +113,17 @@ enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat)
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
return IPUV3_COLORSPACE_YUV;
- case V4L2_PIX_FMT_XRGB32:
- case V4L2_PIX_FMT_XBGR32:
- case V4L2_PIX_FMT_RGB32:
- case V4L2_PIX_FMT_BGR32:
- case V4L2_PIX_FMT_RGB24:
- case V4L2_PIX_FMT_BGR24:
case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_BGR24:
+ case V4L2_PIX_FMT_RGB24:
+ case V4L2_PIX_FMT_ABGR32:
+ case V4L2_PIX_FMT_XBGR32:
+ case V4L2_PIX_FMT_BGRA32:
+ case V4L2_PIX_FMT_BGRX32:
+ case V4L2_PIX_FMT_RGBA32:
+ case V4L2_PIX_FMT_RGBX32:
+ case V4L2_PIX_FMT_ARGB32:
+ case V4L2_PIX_FMT_XRGB32:
return IPUV3_COLORSPACE_RGB;
default:
return IPUV3_COLORSPACE_UNKNOWN;
diff --git a/drivers/gpu/ipu-v3/ipu-cpmem.c b/drivers/gpu/ipu-v3/ipu-cpmem.c
index be1226ce28cd..a1c85d1521f5 100644
--- a/drivers/gpu/ipu-v3/ipu-cpmem.c
+++ b/drivers/gpu/ipu-v3/ipu-cpmem.c
@@ -182,9 +182,27 @@ static int v4l2_pix_fmt_to_drm_fourcc(u32 pixelformat)
case V4L2_PIX_FMT_RGB32:
/* R G B A <=> [32:0] A:B:G:R */
return DRM_FORMAT_XBGR8888;
+ case V4L2_PIX_FMT_ABGR32:
+ /* B G R A <=> [32:0] A:R:G:B */
+ return DRM_FORMAT_ARGB8888;
case V4L2_PIX_FMT_XBGR32:
/* B G R X <=> [32:0] X:R:G:B */
return DRM_FORMAT_XRGB8888;
+ case V4L2_PIX_FMT_BGRA32:
+ /* A B G R <=> [32:0] R:G:B:A */
+ return DRM_FORMAT_RGBA8888;
+ case V4L2_PIX_FMT_BGRX32:
+ /* X B G R <=> [32:0] R:G:B:X */
+ return DRM_FORMAT_RGBX8888;
+ case V4L2_PIX_FMT_RGBA32:
+ /* R G B A <=> [32:0] A:B:G:R */
+ return DRM_FORMAT_ABGR8888;
+ case V4L2_PIX_FMT_RGBX32:
+ /* R G B X <=> [32:0] X:B:G:R */
+ return DRM_FORMAT_XBGR8888;
+ case V4L2_PIX_FMT_ARGB32:
+ /* A R G B <=> [32:0] B:G:R:A */
+ return DRM_FORMAT_BGRA8888;
case V4L2_PIX_FMT_XRGB32:
/* X R G B <=> [32:0] B:G:R:X */
return DRM_FORMAT_BGRX8888;
@@ -823,8 +841,14 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
break;
case V4L2_PIX_FMT_RGB32:
case V4L2_PIX_FMT_BGR32:
- case V4L2_PIX_FMT_XRGB32:
+ case V4L2_PIX_FMT_ABGR32:
case V4L2_PIX_FMT_XBGR32:
+ case V4L2_PIX_FMT_BGRA32:
+ case V4L2_PIX_FMT_BGRX32:
+ case V4L2_PIX_FMT_RGBA32:
+ case V4L2_PIX_FMT_RGBX32:
+ case V4L2_PIX_FMT_ARGB32:
+ case V4L2_PIX_FMT_XRGB32:
offset = image->rect.left * 4 +
image->rect.top * pix->bytesperline;
break;
diff --git a/drivers/gpu/ipu-v3/ipu-image-convert.c b/drivers/gpu/ipu-v3/ipu-image-convert.c
index 9d25db6924b3..eeca50d9a1ee 100644
--- a/drivers/gpu/ipu-v3/ipu-image-convert.c
+++ b/drivers/gpu/ipu-v3/ipu-image-convert.c
@@ -252,6 +252,12 @@ static const struct ipu_image_pixfmt image_convert_formats[] = {
.fourcc = V4L2_PIX_FMT_XBGR32,
.bpp = 32,
}, {
+ .fourcc = V4L2_PIX_FMT_BGRX32,
+ .bpp = 32,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGBX32,
+ .bpp = 32,
+ }, {
.fourcc = V4L2_PIX_FMT_YUYV,
.bpp = 16,
.uv_width_dec = 2,
@@ -376,8 +382,11 @@ static inline int num_stripes(int dim)
/*
* Calculate downsizing coefficients, which are the same for all tiles,
- * and bilinear resizing coefficients, which are used to find the best
- * seam positions.
+ * and initial bilinear resizing coefficients, which are used to find the
+ * best seam positions.
+ * Also determine the number of tiles necessary to guarantee that no tile
+ * is larger than 1024 pixels in either dimension at the output and between
+ * IC downsizing and main processing sections.
*/
static int calc_image_resize_coefficients(struct ipu_image_convert_ctx *ctx,
struct ipu_image *in,
@@ -391,6 +400,8 @@ static int calc_image_resize_coefficients(struct ipu_image_convert_ctx *ctx,
u32 resized_height = out->rect.height;
u32 resize_coeff_h;
u32 resize_coeff_v;
+ u32 cols;
+ u32 rows;
if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
resized_width = out->rect.height;
@@ -401,14 +412,12 @@ static int calc_image_resize_coefficients(struct ipu_image_convert_ctx *ctx,
if (WARN_ON(resized_width == 0 || resized_height == 0))
return -EINVAL;
- while (downsized_width > 1024 ||
- downsized_width >= resized_width * 2) {
+ while (downsized_width >= resized_width * 2) {
downsized_width >>= 1;
downsize_coeff_h++;
}
- while (downsized_height > 1024 ||
- downsized_height >= resized_height * 2) {
+ while (downsized_height >= resized_height * 2) {
downsized_height >>= 1;
downsize_coeff_v++;
}
@@ -422,10 +431,18 @@ static int calc_image_resize_coefficients(struct ipu_image_convert_ctx *ctx,
resize_coeff_h = 8192 * (downsized_width - 1) / (resized_width - 1);
resize_coeff_v = 8192 * (downsized_height - 1) / (resized_height - 1);
+ /*
+ * Both the output of the IC downsizing section before being passed to
+ * the IC main processing section and the final output of the IC main
+ * processing section must be <= 1024 pixels in both dimensions.
+ */
+ cols = num_stripes(max_t(u32, downsized_width, resized_width));
+ rows = num_stripes(max_t(u32, downsized_height, resized_height));
+
dev_dbg(ctx->chan->priv->ipu->dev,
"%s: hscale: >>%u, *8192/%u vscale: >>%u, *8192/%u, %ux%u tiles\n",
__func__, downsize_coeff_h, resize_coeff_h, downsize_coeff_v,
- resize_coeff_v, ctx->in.num_cols, ctx->in.num_rows);
+ resize_coeff_v, cols, rows);
if (downsize_coeff_h > 2 || downsize_coeff_v > 2 ||
resize_coeff_h > 0x3fff || resize_coeff_v > 0x3fff)
@@ -435,6 +452,8 @@ static int calc_image_resize_coefficients(struct ipu_image_convert_ctx *ctx,
ctx->downsize_coeff_v = downsize_coeff_v;
ctx->image_resize_coeff_h = resize_coeff_h;
ctx->image_resize_coeff_v = resize_coeff_v;
+ ctx->in.num_cols = cols;
+ ctx->in.num_rows = rows;
return 0;
}
@@ -442,12 +461,10 @@ static int calc_image_resize_coefficients(struct ipu_image_convert_ctx *ctx,
#define round_closest(x, y) round_down((x) + (y)/2, (y))
/*
- * Find the best aligned seam position in the inverval [out_start, out_end].
+ * Find the best aligned seam position for the given column / row index.
* Rotation and image offsets are out of scope.
*
- * @out_start: start of inverval, must be within 1024 pixels / lines
- * of out_end
- * @out_end: end of interval, smaller than or equal to out_edge
+ * @index: column / row index, used to calculate valid interval
* @in_edge: input right / bottom edge
* @out_edge: output right / bottom edge
* @in_align: input alignment, either horizontal 8-byte line start address
@@ -463,8 +480,7 @@ static int calc_image_resize_coefficients(struct ipu_image_convert_ctx *ctx,
* @_out_seam: aligned output seam position return value
*/
static void find_best_seam(struct ipu_image_convert_ctx *ctx,
- unsigned int out_start,
- unsigned int out_end,
+ unsigned int index,
unsigned int in_edge,
unsigned int out_edge,
unsigned int in_align,
@@ -482,6 +498,24 @@ static void find_best_seam(struct ipu_image_convert_ctx *ctx,
unsigned int out_seam = 0;
unsigned int in_seam = 0;
unsigned int min_diff = UINT_MAX;
+ unsigned int out_start;
+ unsigned int out_end;
+ unsigned int in_start;
+ unsigned int in_end;
+
+ /* Start within 1024 pixels of the right / bottom edge */
+ out_start = max_t(int, index * out_align, out_edge - 1024);
+ /* End before having to add more columns to the left / rows above */
+ out_end = min_t(unsigned int, out_edge, index * 1024 + 1);
+
+ /*
+ * Limit input seam position to make sure that the downsized input tile
+ * to the right or bottom does not exceed 1024 pixels.
+ */
+ in_start = max_t(int, index * in_align,
+ in_edge - (1024 << downsize_coeff));
+ in_end = min_t(unsigned int, in_edge,
+ index * (1024 << downsize_coeff) + 1);
/*
* Output tiles must start at a multiple of 8 bytes horizontally and
@@ -492,6 +526,7 @@ static void find_best_seam(struct ipu_image_convert_ctx *ctx,
for (out_pos = out_start; out_pos < out_end; out_pos += out_align) {
unsigned int in_pos;
unsigned int in_pos_aligned;
+ unsigned int in_pos_rounded;
unsigned int abs_diff;
/*
@@ -512,9 +547,16 @@ static void find_best_seam(struct ipu_image_convert_ctx *ctx,
* start the input tile at, 19.13 fixed point.
*/
in_pos_aligned = round_closest(in_pos, 8192U * in_align);
+ /* Convert 19.13 fixed point to integer */
+ in_pos_rounded = in_pos_aligned / 8192U;
+
+ if (in_pos_rounded < in_start)
+ continue;
+ if (in_pos_rounded >= in_end)
+ break;
if ((in_burst > 1) &&
- (in_edge - in_pos_aligned / 8192U) % in_burst)
+ (in_edge - in_pos_rounded) % in_burst)
continue;
if (in_pos < in_pos_aligned)
@@ -523,19 +565,18 @@ static void find_best_seam(struct ipu_image_convert_ctx *ctx,
abs_diff = in_pos - in_pos_aligned;
if (abs_diff < min_diff) {
- in_seam = in_pos_aligned;
+ in_seam = in_pos_rounded;
out_seam = out_pos;
min_diff = abs_diff;
}
}
*_out_seam = out_seam;
- /* Convert 19.13 fixed point to integer seam position */
- *_in_seam = DIV_ROUND_CLOSEST(in_seam, 8192U);
+ *_in_seam = in_seam;
- dev_dbg(dev, "%s: out_seam %u(%u) in [%u, %u], in_seam %u(%u) diff %u.%03u\n",
+ dev_dbg(dev, "%s: out_seam %u(%u) in [%u, %u], in_seam %u(%u) in [%u, %u] diff %u.%03u\n",
__func__, out_seam, out_align, out_start, out_end,
- *_in_seam, in_align, min_diff / 8192,
+ in_seam, in_align, in_start, in_end, min_diff / 8192,
DIV_ROUND_CLOSEST(min_diff % 8192 * 1000, 8192));
}
@@ -712,8 +753,6 @@ static void find_seams(struct ipu_image_convert_ctx *ctx,
!(ctx->rot_mode & IPU_ROT_BIT_HFLIP);
bool allow_out_overshoot = (col < in->num_cols - 1) &&
!(ctx->rot_mode & IPU_ROT_BIT_HFLIP);
- unsigned int out_start;
- unsigned int out_end;
unsigned int in_left;
unsigned int out_left;
@@ -722,12 +761,7 @@ static void find_seams(struct ipu_image_convert_ctx *ctx,
* horizontally.
*/
- /* Start within 1024 pixels of the right edge */
- out_start = max_t(int, 0, out_right - 1024);
- /* End before having to add more columns to the left */
- out_end = min_t(unsigned int, out_right, col * 1024);
-
- find_best_seam(ctx, out_start, out_end,
+ find_best_seam(ctx, col,
in_right, out_right,
in_left_align, out_left_align,
allow_in_overshoot ? 1 : 8 /* burst length */,
@@ -762,17 +796,10 @@ static void find_seams(struct ipu_image_convert_ctx *ctx,
for (row = in->num_rows - 1; row > 0; row--) {
bool allow_overshoot = row < in->num_rows - 1;
- unsigned int out_start;
- unsigned int out_end;
unsigned int in_top;
unsigned int out_top;
- /* Start within 1024 lines of the bottom edge */
- out_start = max_t(int, 0, out_bottom - 1024);
- /* End before having to add more rows above */
- out_end = min_t(unsigned int, out_bottom, row * 1024);
-
- find_best_seam(ctx, out_start, out_end,
+ find_best_seam(ctx, row,
in_bottom, out_bottom,
in_top_align, out_top_align,
1, allow_overshoot ? 1 : out_height_align,
@@ -809,13 +836,21 @@ static void find_seams(struct ipu_image_convert_ctx *ctx,
in_bottom, flipped_out_top, out_bottom);
}
-static void calc_tile_dimensions(struct ipu_image_convert_ctx *ctx,
- struct ipu_image_convert_image *image)
+static int calc_tile_dimensions(struct ipu_image_convert_ctx *ctx,
+ struct ipu_image_convert_image *image)
{
struct ipu_image_convert_chan *chan = ctx->chan;
struct ipu_image_convert_priv *priv = chan->priv;
+ unsigned int max_width = 1024;
+ unsigned int max_height = 1024;
unsigned int i;
+ if (image->type == IMAGE_CONVERT_IN) {
+ /* Up to 4096x4096 input tile size */
+ max_width <<= ctx->downsize_coeff_h;
+ max_height <<= ctx->downsize_coeff_v;
+ }
+
for (i = 0; i < ctx->num_tiles; i++) {
struct ipu_image_tile *tile;
const unsigned int row = i / image->num_cols;
@@ -845,7 +880,17 @@ static void calc_tile_dimensions(struct ipu_image_convert_ctx *ctx,
image->type == IMAGE_CONVERT_IN ? "Input" : "Output",
row, col,
tile->width, tile->height, tile->left, tile->top);
+
+ if (!tile->width || tile->width > max_width ||
+ !tile->height || tile->height > max_height) {
+ dev_err(priv->ipu->dev, "invalid %s tile size: %ux%u\n",
+ image->type == IMAGE_CONVERT_IN ? "input" :
+ "output", tile->width, tile->height);
+ return -EINVAL;
+ }
}
+
+ return 0;
}
/*
@@ -1076,6 +1121,7 @@ static void calc_tile_resize_coefficients(struct ipu_image_convert_ctx *ctx)
!(ctx->rot_mode & IPU_ROT_BIT_HFLIP);
u32 resized_width;
u32 resize_coeff_h;
+ u32 in_width;
tile_idx = col;
in_tile = &ctx->in.tile[tile_idx];
@@ -1093,33 +1139,35 @@ static void calc_tile_resize_coefficients(struct ipu_image_convert_ctx *ctx)
dev_dbg(priv->ipu->dev, "%s: column %u hscale: *8192/%u\n",
__func__, col, resize_coeff_h);
+ /*
+ * With the horizontal scaling factor known, round up resized
+ * width (output width or height) to burst size.
+ */
+ resized_width = round_up(resized_width, 8);
+
+ /*
+ * Calculate input width from the last accessed input pixel
+ * given resized width and scaling coefficients. Round up to
+ * burst size.
+ */
+ last_output = resized_width - 1;
+ if (closest && ((last_output * resize_coeff_h) % 8192))
+ last_output++;
+ in_width = round_up(
+ (DIV_ROUND_UP(last_output * resize_coeff_h, 8192) + 1)
+ << ctx->downsize_coeff_h, 8);
for (row = 0; row < ctx->in.num_rows; row++) {
tile_idx = row * ctx->in.num_cols + col;
in_tile = &ctx->in.tile[tile_idx];
out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
- /*
- * With the horizontal scaling factor known, round up
- * resized width (output width or height) to burst size.
- */
if (ipu_rot_mode_is_irt(ctx->rot_mode))
- out_tile->height = round_up(resized_width, 8);
+ out_tile->height = resized_width;
else
- out_tile->width = round_up(resized_width, 8);
-
- /*
- * Calculate input width from the last accessed input
- * pixel given resized width and scaling coefficients.
- * Round up to burst size.
- */
- last_output = round_up(resized_width, 8) - 1;
- if (closest)
- last_output++;
- in_tile->width = round_up(
- (DIV_ROUND_UP(last_output * resize_coeff_h,
- 8192) + 1)
- << ctx->downsize_coeff_h, 8);
+ out_tile->width = resized_width;
+
+ in_tile->width = in_width;
}
ctx->resize_coeffs_h[col] = resize_coeff_h;
@@ -1130,6 +1178,7 @@ static void calc_tile_resize_coefficients(struct ipu_image_convert_ctx *ctx)
!(ctx->rot_mode & IPU_ROT_BIT_VFLIP);
u32 resized_height;
u32 resize_coeff_v;
+ u32 in_height;
tile_idx = row * ctx->in.num_cols;
in_tile = &ctx->in.tile[tile_idx];
@@ -1147,33 +1196,35 @@ static void calc_tile_resize_coefficients(struct ipu_image_convert_ctx *ctx)
dev_dbg(priv->ipu->dev, "%s: row %u vscale: *8192/%u\n",
__func__, row, resize_coeff_v);
+ /*
+ * With the vertical scaling factor known, round up resized
+ * height (output width or height) to IDMAC limitations.
+ */
+ resized_height = round_up(resized_height, 2);
+
+ /*
+ * Calculate input width from the last accessed input pixel
+ * given resized height and scaling coefficients. Align to
+ * IDMAC restrictions.
+ */
+ last_output = resized_height - 1;
+ if (closest && ((last_output * resize_coeff_v) % 8192))
+ last_output++;
+ in_height = round_up(
+ (DIV_ROUND_UP(last_output * resize_coeff_v, 8192) + 1)
+ << ctx->downsize_coeff_v, 2);
+
for (col = 0; col < ctx->in.num_cols; col++) {
tile_idx = row * ctx->in.num_cols + col;
in_tile = &ctx->in.tile[tile_idx];
out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
- /*
- * With the vertical scaling factor known, round up
- * resized height (output width or height) to IDMAC
- * limitations.
- */
if (ipu_rot_mode_is_irt(ctx->rot_mode))
- out_tile->width = round_up(resized_height, 2);
+ out_tile->width = resized_height;
else
- out_tile->height = round_up(resized_height, 2);
-
- /*
- * Calculate input width from the last accessed input
- * pixel given resized height and scaling coefficients.
- * Align to IDMAC restrictions.
- */
- last_output = round_up(resized_height, 2) - 1;
- if (closest)
- last_output++;
- in_tile->height = round_up(
- (DIV_ROUND_UP(last_output * resize_coeff_v,
- 8192) + 1)
- << ctx->downsize_coeff_v, 2);
+ out_tile->height = resized_height;
+
+ in_tile->height = in_height;
}
ctx->resize_coeffs_v[row] = resize_coeff_v;
@@ -2024,22 +2075,26 @@ ipu_image_convert_prepare(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
ctx->chan = chan;
init_completion(&ctx->aborted);
+ ctx->rot_mode = rot_mode;
+
+ /* Sets ctx->in.num_rows/cols as well */
+ ret = calc_image_resize_coefficients(ctx, in, out);
+ if (ret)
+ goto out_free;
+
s_image = &ctx->in;
d_image = &ctx->out;
/* set tiling and rotation */
- d_image->num_rows = num_stripes(out->pix.height);
- d_image->num_cols = num_stripes(out->pix.width);
if (ipu_rot_mode_is_irt(rot_mode)) {
- s_image->num_rows = d_image->num_cols;
- s_image->num_cols = d_image->num_rows;
+ d_image->num_rows = s_image->num_cols;
+ d_image->num_cols = s_image->num_rows;
} else {
- s_image->num_rows = d_image->num_rows;
- s_image->num_cols = d_image->num_cols;
+ d_image->num_rows = s_image->num_rows;
+ d_image->num_cols = s_image->num_cols;
}
ctx->num_tiles = d_image->num_cols * d_image->num_rows;
- ctx->rot_mode = rot_mode;
ret = fill_image(ctx, s_image, in, IMAGE_CONVERT_IN);
if (ret)
@@ -2048,15 +2103,14 @@ ipu_image_convert_prepare(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
if (ret)
goto out_free;
- ret = calc_image_resize_coefficients(ctx, in, out);
- if (ret)
- goto out_free;
-
calc_out_tile_map(ctx);
find_seams(ctx, s_image, d_image);
- calc_tile_dimensions(ctx, s_image);
+ ret = calc_tile_dimensions(ctx, s_image);
+ if (ret)
+ goto out_free;
+
ret = calc_tile_offsets(ctx, s_image);
if (ret)
goto out_free;
diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
index 8504e19437ff..fc1e45d44719 100644
--- a/drivers/video/fbdev/aty/aty128fb.c
+++ b/drivers/video/fbdev/aty/aty128fb.c
@@ -487,11 +487,6 @@ static int aty128_encode_var(struct fb_var_screeninfo *var,
const struct aty128fb_par *par);
static int aty128_decode_var(struct fb_var_screeninfo *var,
struct aty128fb_par *par);
-#if 0
-static void aty128_get_pllinfo(struct aty128fb_par *par, void __iomem *bios);
-static void __iomem *aty128_map_ROM(struct pci_dev *pdev,
- const struct aty128fb_par *par);
-#endif
static void aty128_timings(struct aty128fb_par *par);
static void aty128_init_engine(struct aty128fb_par *par);
static void aty128_reset_engine(const struct aty128fb_par *par);
@@ -1665,19 +1660,6 @@ static void aty128_st_pal(u_int regno, u_int red, u_int green, u_int blue,
struct aty128fb_par *par)
{
if (par->chip_gen == rage_M3) {
-#if 0
- /* Note: For now, on M3, we set palette on both heads, which may
- * be useless. Can someone with a M3 check this ?
- *
- * This code would still be useful if using the second CRTC to
- * do mirroring
- */
-
- aty_st_le32(DAC_CNTL, aty_ld_le32(DAC_CNTL) |
- DAC_PALETTE_ACCESS_CNTL);
- aty_st_8(PALETTE_INDEX, regno);
- aty_st_le32(PALETTE_DATA, (red<<16)|(green<<8)|blue);
-#endif
aty_st_le32(DAC_CNTL, aty_ld_le32(DAC_CNTL) &
~DAC_PALETTE_ACCESS_CNTL);
}
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index 72bcfbe42e49..6dda5d885a03 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -1188,19 +1188,6 @@ static int aty_crtc_to_var(const struct crtc *crtc,
(c_sync ? FB_SYNC_COMP_HIGH_ACT : 0);
switch (pix_width) {
-#if 0
- case CRTC_PIX_WIDTH_4BPP:
- bpp = 4;
- var->red.offset = 0;
- var->red.length = 8;
- var->green.offset = 0;
- var->green.length = 8;
- var->blue.offset = 0;
- var->blue.length = 8;
- var->transp.offset = 0;
- var->transp.length = 0;
- break;
-#endif
case CRTC_PIX_WIDTH_8BPP:
bpp = 8;
var->red.offset = 0;
@@ -1466,11 +1453,6 @@ static int atyfb_set_par(struct fb_info *info)
var->bits_per_pixel,
par->crtc.vxres * var->bits_per_pixel / 8);
#endif /* CONFIG_BOOTX_TEXT */
-#if 0
- /* switch to accelerator mode */
- if (!(par->crtc.gen_cntl & CRTC_EXT_DISP_EN))
- aty_st_le32(CRTC_GEN_CNTL, par->crtc.gen_cntl | CRTC_EXT_DISP_EN, par);
-#endif
#ifdef DEBUG
{
/* dump non shadow CRTC, pll, LCD registers */
@@ -2396,17 +2378,6 @@ static int aty_init(struct fb_info *info)
par->pll_ops = &aty_pll_ibm514;
break;
#endif
-#if 0 /* dead code */
- case CLK_STG1703:
- par->pll_ops = &aty_pll_stg1703;
- break;
- case CLK_CH8398:
- par->pll_ops = &aty_pll_ch8398;
- break;
- case CLK_ATT20C408:
- par->pll_ops = &aty_pll_att20c408;
- break;
-#endif
default:
PRINTKI("aty_init: CLK type not implemented yet!");
par->pll_ops = &aty_pll_unsupported;
diff --git a/drivers/video/fbdev/aty/radeon_base.c b/drivers/video/fbdev/aty/radeon_base.c
index 6f891d82eebe..4ca07866f2f6 100644
--- a/drivers/video/fbdev/aty/radeon_base.c
+++ b/drivers/video/fbdev/aty/radeon_base.c
@@ -2217,8 +2217,7 @@ static ssize_t radeon_show_edid1(struct file *filp, struct kobject *kobj,
char *buf, loff_t off, size_t count)
{
struct device *dev = container_of(kobj, struct device, kobj);
- struct pci_dev *pdev = to_pci_dev(dev);
- struct fb_info *info = pci_get_drvdata(pdev);
+ struct fb_info *info = dev_get_drvdata(dev);
struct radeonfb_info *rinfo = info->par;
return radeon_show_one_edid(buf, off, count, rinfo->mon1_EDID);
@@ -2230,8 +2229,7 @@ static ssize_t radeon_show_edid2(struct file *filp, struct kobject *kobj,
char *buf, loff_t off, size_t count)
{
struct device *dev = container_of(kobj, struct device, kobj);
- struct pci_dev *pdev = to_pci_dev(dev);
- struct fb_info *info = pci_get_drvdata(pdev);
+ struct fb_info *info = dev_get_drvdata(dev);
struct radeonfb_info *rinfo = info->par;
return radeon_show_one_edid(buf, off, count, rinfo->mon2_EDID);
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index 04a22663b4fb..51d97ec4f58f 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -122,28 +122,13 @@ static void efifb_copy_bmp(u8 *src, u32 *dst, int width, struct screen_info *si)
*/
static bool efifb_bgrt_sanity_check(struct screen_info *si, u32 bmp_width)
{
- static const int default_resolutions[][2] = {
- { 800, 600 },
- { 1024, 768 },
- { 1280, 1024 },
- };
- u32 i, right_margin;
-
- for (i = 0; i < ARRAY_SIZE(default_resolutions); i++) {
- if (default_resolutions[i][0] == si->lfb_width &&
- default_resolutions[i][1] == si->lfb_height)
- break;
- }
- /* If not a default resolution used for textmode, this should be fine */
- if (i >= ARRAY_SIZE(default_resolutions))
- return true;
-
- /* If the right margin is 5 times smaller then the left one, reject */
- right_margin = si->lfb_width - (bgrt_tab.image_offset_x + bmp_width);
- if (right_margin < (bgrt_tab.image_offset_x / 5))
- return false;
+ /*
+ * All x86 firmwares horizontally center the image (the yoffset
+ * calculations differ between boards, but xoffset is predictable).
+ */
+ u32 expected_xoffset = (si->lfb_width - bmp_width) / 2;
- return true;
+ return bgrt_tab.image_offset_x == expected_xoffset;
}
#else
static bool efifb_bgrt_sanity_check(struct screen_info *si, u32 bmp_width)
diff --git a/drivers/video/fbdev/mmp/core.c b/drivers/video/fbdev/mmp/core.c
index 0ffc1b7b7052..154127256a2c 100644
--- a/drivers/video/fbdev/mmp/core.c
+++ b/drivers/video/fbdev/mmp/core.c
@@ -153,13 +153,11 @@ EXPORT_SYMBOL_GPL(mmp_get_path);
struct mmp_path *mmp_register_path(struct mmp_path_info *info)
{
int i;
- size_t size;
struct mmp_path *path = NULL;
struct mmp_panel *panel;
- size = sizeof(struct mmp_path)
- + sizeof(struct mmp_overlay) * info->overlay_num;
- path = kzalloc(size, GFP_KERNEL);
+ path = kzalloc(struct_size(path, overlays, info->overlay_num),
+ GFP_KERNEL);
if (!path)
return NULL;
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/Kconfig b/drivers/video/fbdev/omap2/omapfb/displays/Kconfig
index 8c1c5a4cfe18..744416dc530e 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/Kconfig
+++ b/drivers/video/fbdev/omap2/omapfb/displays/Kconfig
@@ -49,6 +49,7 @@ config FB_OMAP2_PANEL_DSI_CM
config FB_OMAP2_PANEL_SONY_ACX565AKM
tristate "ACX565AKM Panel"
depends on SPI && BACKLIGHT_CLASS_DEVICE
+ depends on DRM_PANEL_SONY_ACX565AKM = n
help
This is the LCD panel used on Nokia N900
@@ -61,18 +62,21 @@ config FB_OMAP2_PANEL_LGPHILIPS_LB035Q02
config FB_OMAP2_PANEL_SHARP_LS037V7DW01
tristate "Sharp LS037V7DW01 LCD Panel"
depends on BACKLIGHT_CLASS_DEVICE
+ depends on DRM_PANEL_SHARP_LS037V7DW01 = n
help
LCD Panel used in TI's SDP3430 and EVM boards
config FB_OMAP2_PANEL_TPO_TD028TTEC1
tristate "TPO TD028TTEC1 LCD Panel"
depends on SPI
+ depends on DRM_PANEL_TPO_TD028TTEC1 = n
help
LCD panel used in Openmoko.
config FB_OMAP2_PANEL_TPO_TD043MTEA1
tristate "TPO TD043MTEA1 LCD Panel"
depends on SPI
+ depends on DRM_PANEL_TPO_TD043MTEA1 = n
help
LCD Panel used in OMAP3 Pandora
@@ -80,6 +84,7 @@ config FB_OMAP2_PANEL_NEC_NL8048HL11
tristate "NEC NL8048HL11 Panel"
depends on SPI
depends on BACKLIGHT_CLASS_DEVICE
+ depends on DRM_PANEL_NEC_NL8048HL11 = n
help
This NEC NL8048HL11 panel is TFT LCD used in the
Zoom2/3/3630 sdp boards.
diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c
index 7ff4b6b84282..0a3b2b7c7891 100644
--- a/drivers/video/fbdev/pvr2fb.c
+++ b/drivers/video/fbdev/pvr2fb.c
@@ -458,13 +458,11 @@ static int pvr2fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
set_color_bitfields(var);
if (var->vmode & FB_VMODE_YWRAP) {
- if (var->xoffset || var->yoffset < 0 ||
- var->yoffset >= var->yres_virtual) {
+ if (var->xoffset || var->yoffset >= var->yres_virtual) {
var->xoffset = var->yoffset = 0;
} else {
if (var->xoffset > var->xres_virtual - var->xres ||
- var->yoffset > var->yres_virtual - var->yres ||
- var->xoffset < 0 || var->yoffset < 0)
+ var->yoffset > var->yres_virtual - var->yres)
var->xoffset = var->yoffset = 0;
}
} else {
diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
index ac0bcac9a865..c249763dbf0b 100644
--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
@@ -1594,6 +1594,7 @@ sh_mobile_lcdc_overlay_fb_init(struct sh_mobile_lcdc_overlay *ovl)
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
info->fix.ypanstep = 2;
+ /* Fall through */
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
info->fix.xpanstep = 2;
@@ -2084,6 +2085,7 @@ sh_mobile_lcdc_channel_fb_init(struct sh_mobile_lcdc_chan *ch,
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
info->fix.ypanstep = 2;
+ /* Fall through */
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
info->fix.xpanstep = 2;
diff --git a/drivers/video/fbdev/sm712fb.c b/drivers/video/fbdev/sm712fb.c
index 7b1b0d8d27a7..207d0add684b 100644
--- a/drivers/video/fbdev/sm712fb.c
+++ b/drivers/video/fbdev/sm712fb.c
@@ -1694,10 +1694,8 @@ static void smtcfb_pci_remove(struct pci_dev *pdev)
static int __maybe_unused smtcfb_pci_suspend(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct smtcfb_info *sfb;
+ struct smtcfb_info *sfb = dev_get_drvdata(device);
- sfb = pci_get_drvdata(pdev);
/* set the hw in sleep mode use external clock and self memory refresh
* so that we can turn off internal PLLs later on
@@ -1717,10 +1715,8 @@ static int __maybe_unused smtcfb_pci_suspend(struct device *device)
static int __maybe_unused smtcfb_pci_resume(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct smtcfb_info *sfb;
+ struct smtcfb_info *sfb = dev_get_drvdata(device);
- sfb = pci_get_drvdata(pdev);
/* reinit hardware */
sm7xx_init_hw();
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index c328e8265cb1..fe373b63ddd6 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -1183,7 +1183,7 @@ static int dlfb_ops_blank(int blank_mode, struct fb_info *info)
return 0;
}
-static struct fb_ops dlfb_ops = {
+static const struct fb_ops dlfb_ops = {
.owner = THIS_MODULE,
.fb_read = fb_sys_read,
.fb_write = dlfb_ops_write,
diff --git a/drivers/video/fbdev/via/via-core.c b/drivers/video/fbdev/via/via-core.c
index e2b2062673da..ffa2ca2d3f5e 100644
--- a/drivers/video/fbdev/via/via-core.c
+++ b/drivers/video/fbdev/via/via-core.c
@@ -221,49 +221,6 @@ void viafb_release_dma(void)
}
EXPORT_SYMBOL_GPL(viafb_release_dma);
-
-#if 0
-/*
- * Copy a single buffer from FB memory, synchronously. This code works
- * but is not currently used.
- */
-void viafb_dma_copy_out(unsigned int offset, dma_addr_t paddr, int len)
-{
- unsigned long flags;
- int csr;
-
- mutex_lock(&viafb_dma_lock);
- init_completion(&viafb_dma_completion);
- /*
- * Program the controller.
- */
- spin_lock_irqsave(&global_dev.reg_lock, flags);
- viafb_mmio_write(VDMA_CSR0, VDMA_C_ENABLE|VDMA_C_DONE);
- /* Enable ints; must happen after CSR0 write! */
- viafb_mmio_write(VDMA_MR0, VDMA_MR_TDIE);
- viafb_mmio_write(VDMA_MARL0, (int) (paddr & 0xfffffff0));
- viafb_mmio_write(VDMA_MARH0, (int) ((paddr >> 28) & 0xfff));
- /* Data sheet suggests DAR0 should be <<4, but it lies */
- viafb_mmio_write(VDMA_DAR0, offset);
- viafb_mmio_write(VDMA_DQWCR0, len >> 4);
- viafb_mmio_write(VDMA_TMR0, 0);
- viafb_mmio_write(VDMA_DPRL0, 0);
- viafb_mmio_write(VDMA_DPRH0, 0);
- viafb_mmio_write(VDMA_PMR0, 0);
- csr = viafb_mmio_read(VDMA_CSR0);
- viafb_mmio_write(VDMA_CSR0, VDMA_C_ENABLE|VDMA_C_START);
- spin_unlock_irqrestore(&global_dev.reg_lock, flags);
- /*
- * Now we just wait until the interrupt handler says
- * we're done.
- */
- wait_for_completion_interruptible(&viafb_dma_completion);
- viafb_mmio_write(VDMA_MR0, 0); /* Reset int enable */
- mutex_unlock(&viafb_dma_lock);
-}
-EXPORT_SYMBOL_GPL(viafb_dma_copy_out);
-#endif
-
/*
* Do a scatter/gather DMA copy from FB memory. You must have done
* a successful call to viafb_request_dma() first.
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
index c402364aec0d..cf528c289857 100644
--- a/include/drm/bridge/dw_hdmi.h
+++ b/include/drm/bridge/dw_hdmi.h
@@ -155,6 +155,8 @@ void dw_hdmi_resume(struct dw_hdmi *hdmi);
void dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense);
void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate);
+void dw_hdmi_set_channel_count(struct dw_hdmi *hdmi, unsigned int cnt);
+void dw_hdmi_set_channel_allocation(struct dw_hdmi *hdmi, unsigned int ca);
void dw_hdmi_audio_enable(struct dw_hdmi *hdmi);
void dw_hdmi_audio_disable(struct dw_hdmi *hdmi);
void dw_hdmi_set_high_tmds_clock_ratio(struct dw_hdmi *hdmi);
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 94aae87b1138..037b1f7a87a5 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -87,7 +87,7 @@ struct module;
struct device_node;
struct videomode;
-struct reservation_object;
+struct dma_resv;
struct dma_buf_attachment;
struct pci_dev;
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 0b9997e27689..681cb590f952 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -543,8 +543,8 @@ struct drm_connector_state {
*
* This is also used in the atomic helpers to map encoders to their
* current and previous connectors, see
- * &drm_atomic_get_old_connector_for_encoder() and
- * &drm_atomic_get_new_connector_for_encoder().
+ * drm_atomic_get_old_connector_for_encoder() and
+ * drm_atomic_get_new_connector_for_encoder().
*
* NOTE: Atomic drivers must fill this out (either themselves or through
* helpers), for otherwise the GETCONNECTOR and GETENCODER IOCTLs will
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index ae693c0666cd..6aaba14f5972 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -35,7 +35,7 @@
*/
#include <linux/kref.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
#include <drm/drm_vma_manager.h>
@@ -276,7 +276,7 @@ struct drm_gem_object {
*
* Normally (@resv == &@_resv) except for imported GEM objects.
*/
- struct reservation_object *resv;
+ struct dma_resv *resv;
/**
* @_resv:
@@ -285,7 +285,7 @@ struct drm_gem_object {
*
* This is unused for imported GEM objects.
*/
- struct reservation_object _resv;
+ struct dma_resv _resv;
/**
* @funcs:
@@ -390,7 +390,7 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
int count, struct drm_gem_object ***objs_out);
struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp, u32 handle);
-long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle,
+long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
bool wait_all, unsigned long timeout);
int drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
struct ww_acquire_ctx *acquire_ctx);
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
index 038b6d313447..ce1600fdfc3e 100644
--- a/include/drm/drm_gem_shmem_helper.h
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -44,6 +44,9 @@ struct drm_gem_shmem_object {
*/
unsigned int pages_use_count;
+ int madv;
+ struct list_head madv_list;
+
/**
* @pages_mark_dirty_on_put:
*
@@ -121,6 +124,18 @@ void drm_gem_shmem_unpin(struct drm_gem_object *obj);
void *drm_gem_shmem_vmap(struct drm_gem_object *obj);
void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr);
+int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv);
+
+static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem)
+{
+ return (shmem->madv > 0) &&
+ !shmem->vmap_use_count && shmem->sgt &&
+ !shmem->base.dma_buf && !shmem->base.import_attach;
+}
+
+void drm_gem_shmem_purge_locked(struct drm_gem_object *obj);
+void drm_gem_shmem_purge(struct drm_gem_object *obj);
+
struct drm_gem_shmem_object *
drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
struct drm_device *dev, size_t size,
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index 26377836141c..624bd15ecfab 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -36,14 +36,6 @@ struct display_timing;
/**
* struct drm_panel_funcs - perform operations on a given panel
- * @disable: disable panel (turn off back light, etc.)
- * @unprepare: turn off panel
- * @prepare: turn on panel and perform set up
- * @enable: enable panel (turn on back light, etc.)
- * @get_modes: add modes to the connector that the panel is attached to and
- * return the number of modes added
- * @get_timings: copy display timings into the provided array and return
- * the number of display timings available
*
* The .prepare() function is typically called before the display controller
* starts to transmit video data. Panel drivers can use this to turn the panel
@@ -69,132 +61,107 @@ struct display_timing;
* the panel. This is the job of the .unprepare() function.
*/
struct drm_panel_funcs {
- int (*disable)(struct drm_panel *panel);
- int (*unprepare)(struct drm_panel *panel);
+ /**
+ * @prepare:
+ *
+ * Turn on panel and perform set up.
+ */
int (*prepare)(struct drm_panel *panel);
+
+ /**
+ * @enable:
+ *
+ * Enable panel (turn on back light, etc.).
+ */
int (*enable)(struct drm_panel *panel);
+
+ /**
+ * @disable:
+ *
+ * Disable panel (turn off back light, etc.).
+ */
+ int (*disable)(struct drm_panel *panel);
+
+ /**
+ * @unprepare:
+ *
+ * Turn off panel.
+ */
+ int (*unprepare)(struct drm_panel *panel);
+
+ /**
+ * @get_modes:
+ *
+ * Add modes to the connector that the panel is attached to and
+ * return the number of modes added.
+ */
int (*get_modes)(struct drm_panel *panel);
+
+ /**
+ * @get_timings:
+ *
+ * Copy display timings into the provided array and return
+ * the number of display timings available.
+ */
int (*get_timings)(struct drm_panel *panel, unsigned int num_timings,
struct display_timing *timings);
};
/**
* struct drm_panel - DRM panel object
- * @drm: DRM device owning the panel
- * @connector: DRM connector that the panel is attached to
- * @dev: parent device of the panel
- * @link: link from panel device (supplier) to DRM device (consumer)
- * @funcs: operations that can be performed on the panel
- * @list: panel entry in registry
*/
struct drm_panel {
+ /**
+ * @drm:
+ *
+ * DRM device owning the panel.
+ */
struct drm_device *drm;
+
+ /**
+ * @connector:
+ *
+ * DRM connector that the panel is attached to.
+ */
struct drm_connector *connector;
+
+ /**
+ * @dev:
+ *
+ * Parent device of the panel.
+ */
struct device *dev;
+ /**
+ * @funcs:
+ *
+ * Operations that can be performed on the panel.
+ */
const struct drm_panel_funcs *funcs;
+ /**
+ * @list:
+ *
+ * Panel entry in registry.
+ */
struct list_head list;
};
-/**
- * drm_disable_unprepare - power off a panel
- * @panel: DRM panel
- *
- * Calling this function will completely power off a panel (assert the panel's
- * reset, turn off power supplies, ...). After this function has completed, it
- * is usually no longer possible to communicate with the panel until another
- * call to drm_panel_prepare().
- *
- * Return: 0 on success or a negative error code on failure.
- */
-static inline int drm_panel_unprepare(struct drm_panel *panel)
-{
- if (panel && panel->funcs && panel->funcs->unprepare)
- return panel->funcs->unprepare(panel);
-
- return panel ? -ENOSYS : -EINVAL;
-}
-
-/**
- * drm_panel_disable - disable a panel
- * @panel: DRM panel
- *
- * This will typically turn off the panel's backlight or disable the display
- * drivers. For smart panels it should still be possible to communicate with
- * the integrated circuitry via any command bus after this call.
- *
- * Return: 0 on success or a negative error code on failure.
- */
-static inline int drm_panel_disable(struct drm_panel *panel)
-{
- if (panel && panel->funcs && panel->funcs->disable)
- return panel->funcs->disable(panel);
-
- return panel ? -ENOSYS : -EINVAL;
-}
-
-/**
- * drm_panel_prepare - power on a panel
- * @panel: DRM panel
- *
- * Calling this function will enable power and deassert any reset signals to
- * the panel. After this has completed it is possible to communicate with any
- * integrated circuitry via a command bus.
- *
- * Return: 0 on success or a negative error code on failure.
- */
-static inline int drm_panel_prepare(struct drm_panel *panel)
-{
- if (panel && panel->funcs && panel->funcs->prepare)
- return panel->funcs->prepare(panel);
-
- return panel ? -ENOSYS : -EINVAL;
-}
-
-/**
- * drm_panel_enable - enable a panel
- * @panel: DRM panel
- *
- * Calling this function will cause the panel display drivers to be turned on
- * and the backlight to be enabled. Content will be visible on screen after
- * this call completes.
- *
- * Return: 0 on success or a negative error code on failure.
- */
-static inline int drm_panel_enable(struct drm_panel *panel)
-{
- if (panel && panel->funcs && panel->funcs->enable)
- return panel->funcs->enable(panel);
-
- return panel ? -ENOSYS : -EINVAL;
-}
-
-/**
- * drm_panel_get_modes - probe the available display modes of a panel
- * @panel: DRM panel
- *
- * The modes probed from the panel are automatically added to the connector
- * that the panel is attached to.
- *
- * Return: The number of modes available from the panel on success or a
- * negative error code on failure.
- */
-static inline int drm_panel_get_modes(struct drm_panel *panel)
-{
- if (panel && panel->funcs && panel->funcs->get_modes)
- return panel->funcs->get_modes(panel);
-
- return panel ? -ENOSYS : -EINVAL;
-}
-
void drm_panel_init(struct drm_panel *panel);
int drm_panel_add(struct drm_panel *panel);
void drm_panel_remove(struct drm_panel *panel);
int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector);
-int drm_panel_detach(struct drm_panel *panel);
+void drm_panel_detach(struct drm_panel *panel);
+
+int drm_panel_prepare(struct drm_panel *panel);
+int drm_panel_unprepare(struct drm_panel *panel);
+
+int drm_panel_enable(struct drm_panel *panel);
+int drm_panel_disable(struct drm_panel *panel);
+
+int drm_panel_get_modes(struct drm_panel *panel);
#if defined(CONFIG_OF) && defined(CONFIG_DRM_PANEL)
struct drm_panel *of_drm_find_panel(const struct device_node *np);
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index a70c982ddff9..b1f66b117c74 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -466,7 +466,10 @@
INTEL_VGA_DEVICE(0x9BC5, info), \
INTEL_VGA_DEVICE(0x9BC8, info), \
INTEL_VGA_DEVICE(0x9BC4, info), \
- INTEL_VGA_DEVICE(0x9BC2, info)
+ INTEL_VGA_DEVICE(0x9BC2, info), \
+ INTEL_VGA_DEVICE(0x9BC6, info), \
+ INTEL_VGA_DEVICE(0x9BE6, info), \
+ INTEL_VGA_DEVICE(0x9BF6, info)
#define INTEL_KBL_IDS(info) \
INTEL_KBL_GT1_IDS(info), \
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 65ef5376de59..43c4929a2171 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -40,7 +40,7 @@
#include <linux/mutex.h>
#include <linux/mm.h>
#include <linux/bitmap.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
struct ttm_bo_global;
@@ -273,7 +273,7 @@ struct ttm_bo_kmap_obj {
struct ttm_operation_ctx {
bool interruptible;
bool no_wait_gpu;
- struct reservation_object *resv;
+ struct dma_resv *resv;
uint64_t bytes_moved;
uint32_t flags;
};
@@ -493,7 +493,7 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
* @page_alignment: Data alignment in pages.
* @ctx: TTM operation context for memory allocation.
* @acc_size: Accounted size for this object.
- * @resv: Pointer to a reservation_object, or NULL to let ttm allocate one.
+ * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
* @destroy: Destroy function. Use NULL for kfree().
*
* This function initializes a pre-allocated struct ttm_buffer_object.
@@ -526,7 +526,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
struct ttm_operation_ctx *ctx,
size_t acc_size,
struct sg_table *sg,
- struct reservation_object *resv,
+ struct dma_resv *resv,
void (*destroy) (struct ttm_buffer_object *));
/**
@@ -545,7 +545,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
* point to the shmem object backing a GEM object if TTM is used to back a
* GEM user interface.
* @acc_size: Accounted size for this object.
- * @resv: Pointer to a reservation_object, or NULL to let ttm allocate one.
+ * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
* @destroy: Destroy function. Use NULL for kfree().
*
* This function initializes a pre-allocated struct ttm_buffer_object.
@@ -570,7 +570,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo,
unsigned long size, enum ttm_bo_type type,
struct ttm_placement *placement,
uint32_t page_alignment, bool interrubtible, size_t acc_size,
- struct sg_table *sg, struct reservation_object *resv,
+ struct sg_table *sg, struct dma_resv *resv,
void (*destroy) (struct ttm_buffer_object *));
/**
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 076d7ca0f8b6..6f536caea368 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -35,7 +35,7 @@
#include <linux/workqueue.h>
#include <linux/fs.h>
#include <linux/spinlock.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
#include "ttm_bo_api.h"
#include "ttm_memory.h"
@@ -664,14 +664,14 @@ static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo,
if (WARN_ON(ticket))
return -EBUSY;
- success = reservation_object_trylock(bo->base.resv);
+ success = dma_resv_trylock(bo->base.resv);
return success ? 0 : -EBUSY;
}
if (interruptible)
- ret = reservation_object_lock_interruptible(bo->base.resv, ticket);
+ ret = dma_resv_lock_interruptible(bo->base.resv, ticket);
else
- ret = reservation_object_lock(bo->base.resv, ticket);
+ ret = dma_resv_lock(bo->base.resv, ticket);
if (ret == -EINTR)
return -ERESTARTSYS;
return ret;
@@ -755,10 +755,10 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
WARN_ON(!kref_read(&bo->kref));
if (interruptible)
- ret = reservation_object_lock_slow_interruptible(bo->base.resv,
+ ret = dma_resv_lock_slow_interruptible(bo->base.resv,
ticket);
else
- reservation_object_lock_slow(bo->base.resv, ticket);
+ dma_resv_lock_slow(bo->base.resv, ticket);
if (likely(ret == 0))
ttm_bo_del_sub_from_lru(bo);
@@ -783,7 +783,7 @@ static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
else
ttm_bo_move_to_lru_tail(bo, NULL);
spin_unlock(&bo->bdev->glob->lru_lock);
- reservation_object_unlock(bo->base.resv);
+ dma_resv_unlock(bo->base.resv);
}
/*
diff --git a/include/linux/amba/clcd-regs.h b/include/linux/amba/clcd-regs.h
index 516a6fda83c5..421b0fa90d6a 100644
--- a/include/linux/amba/clcd-regs.h
+++ b/include/linux/amba/clcd-regs.h
@@ -42,6 +42,7 @@
#define TIM2_PCD_LO_MASK GENMASK(4, 0)
#define TIM2_PCD_LO_BITS 5
#define TIM2_CLKSEL (1 << 5)
+#define TIM2_ACB_MASK GENMASK(10, 6)
#define TIM2_IVS (1 << 11)
#define TIM2_IHS (1 << 12)
#define TIM2_IPC (1 << 13)
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index bae060fae862..ec212cb27fdc 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -306,7 +306,7 @@ struct dma_buf {
struct module *owner;
struct list_head list_node;
void *priv;
- struct reservation_object *resv;
+ struct dma_resv *resv;
/* poll support */
wait_queue_head_t poll;
@@ -365,7 +365,7 @@ struct dma_buf_export_info {
const struct dma_buf_ops *ops;
size_t size;
int flags;
- struct reservation_object *resv;
+ struct dma_resv *resv;
void *priv;
};
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index 05d29dbc7e62..3347c54f3a87 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -63,15 +63,35 @@ struct dma_fence_cb;
* been completed, or never called at all.
*/
struct dma_fence {
- struct kref refcount;
- const struct dma_fence_ops *ops;
- struct rcu_head rcu;
- struct list_head cb_list;
spinlock_t *lock;
+ const struct dma_fence_ops *ops;
+ /*
+ * We clear the callback list on kref_put so that by the time we
+ * release the fence it is unused. No one should be adding to the
+ * cb_list that they don't themselves hold a reference for.
+ *
+ * The lifetime of the timestamp is similarly tied to both the
+ * rcu freelist and the cb_list. The timestamp is only set upon
+ * signaling while simultaneously notifying the cb_list. Ergo, we
+ * only use either the cb_list of timestamp. Upon destruction,
+ * neither are accessible, and so we can use the rcu. This means
+ * that the cb_list is *only* valid until the signal bit is set,
+ * and to read either you *must* hold a reference to the fence,
+ * and not just the rcu_read_lock.
+ *
+ * Listed in chronological order.
+ */
+ union {
+ struct list_head cb_list;
+ /* @cb_list replaced by @timestamp on dma_fence_signal() */
+ ktime_t timestamp;
+ /* @timestamp replaced by @rcu on dma_fence_release() */
+ struct rcu_head rcu;
+ };
u64 context;
u64 seqno;
unsigned long flags;
- ktime_t timestamp;
+ struct kref refcount;
int error;
};
@@ -273,7 +293,7 @@ static inline struct dma_fence *dma_fence_get(struct dma_fence *fence)
}
/**
- * dma_fence_get_rcu - get a fence from a reservation_object_list with
+ * dma_fence_get_rcu - get a fence from a dma_resv_list with
* rcu read lock
* @fence: fence to increase refcount of
*
@@ -297,7 +317,7 @@ static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence)
* so long as the caller is using RCU on the pointer to the fence.
*
* An alternative mechanism is to employ a seqlock to protect a bunch of
- * fences, such as used by struct reservation_object. When using a seqlock,
+ * fences, such as used by struct dma_resv. When using a seqlock,
* the seqlock must be taken before and checked after a reference to the
* fence is acquired (as shown here).
*
diff --git a/include/linux/reservation.h b/include/linux/dma-resv.h
index 56b782fec49b..ee50d10f052b 100644
--- a/include/linux/reservation.h
+++ b/include/linux/dma-resv.h
@@ -50,54 +50,52 @@ extern struct lock_class_key reservation_seqcount_class;
extern const char reservation_seqcount_string[];
/**
- * struct reservation_object_list - a list of shared fences
+ * struct dma_resv_list - a list of shared fences
* @rcu: for internal use
* @shared_count: table of shared fences
* @shared_max: for growing shared fence table
* @shared: shared fence table
*/
-struct reservation_object_list {
+struct dma_resv_list {
struct rcu_head rcu;
u32 shared_count, shared_max;
struct dma_fence __rcu *shared[];
};
/**
- * struct reservation_object - a reservation object manages fences for a buffer
+ * struct dma_resv - a reservation object manages fences for a buffer
* @lock: update side lock
* @seq: sequence count for managing RCU read-side synchronization
* @fence_excl: the exclusive fence, if there is one currently
* @fence: list of current shared fences
*/
-struct reservation_object {
+struct dma_resv {
struct ww_mutex lock;
seqcount_t seq;
struct dma_fence __rcu *fence_excl;
- struct reservation_object_list __rcu *fence;
+ struct dma_resv_list __rcu *fence;
};
-#define reservation_object_held(obj) lockdep_is_held(&(obj)->lock.base)
-#define reservation_object_assert_held(obj) \
- lockdep_assert_held(&(obj)->lock.base)
+#define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
+#define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
/**
- * reservation_object_get_list - get the reservation object's
+ * dma_resv_get_list - get the reservation object's
* shared fence list, with update-side lock held
* @obj: the reservation object
*
* Returns the shared fence list. Does NOT take references to
* the fence. The obj->lock must be held.
*/
-static inline struct reservation_object_list *
-reservation_object_get_list(struct reservation_object *obj)
+static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj)
{
return rcu_dereference_protected(obj->fence,
- reservation_object_held(obj));
+ dma_resv_held(obj));
}
/**
- * reservation_object_lock - lock the reservation object
+ * dma_resv_lock - lock the reservation object
* @obj: the reservation object
* @ctx: the locking context
*
@@ -111,15 +109,14 @@ reservation_object_get_list(struct reservation_object *obj)
* is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
* object may be locked by itself by passing NULL as @ctx.
*/
-static inline int
-reservation_object_lock(struct reservation_object *obj,
- struct ww_acquire_ctx *ctx)
+static inline int dma_resv_lock(struct dma_resv *obj,
+ struct ww_acquire_ctx *ctx)
{
return ww_mutex_lock(&obj->lock, ctx);
}
/**
- * reservation_object_lock_interruptible - lock the reservation object
+ * dma_resv_lock_interruptible - lock the reservation object
* @obj: the reservation object
* @ctx: the locking context
*
@@ -133,48 +130,45 @@ reservation_object_lock(struct reservation_object *obj,
* is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
* object may be locked by itself by passing NULL as @ctx.
*/
-static inline int
-reservation_object_lock_interruptible(struct reservation_object *obj,
- struct ww_acquire_ctx *ctx)
+static inline int dma_resv_lock_interruptible(struct dma_resv *obj,
+ struct ww_acquire_ctx *ctx)
{
return ww_mutex_lock_interruptible(&obj->lock, ctx);
}
/**
- * reservation_object_lock_slow - slowpath lock the reservation object
+ * dma_resv_lock_slow - slowpath lock the reservation object
* @obj: the reservation object
* @ctx: the locking context
*
* Acquires the reservation object after a die case. This function
- * will sleep until the lock becomes available. See reservation_object_lock() as
+ * will sleep until the lock becomes available. See dma_resv_lock() as
* well.
*/
-static inline void
-reservation_object_lock_slow(struct reservation_object *obj,
- struct ww_acquire_ctx *ctx)
+static inline void dma_resv_lock_slow(struct dma_resv *obj,
+ struct ww_acquire_ctx *ctx)
{
ww_mutex_lock_slow(&obj->lock, ctx);
}
/**
- * reservation_object_lock_slow_interruptible - slowpath lock the reservation
+ * dma_resv_lock_slow_interruptible - slowpath lock the reservation
* object, interruptible
* @obj: the reservation object
* @ctx: the locking context
*
* Acquires the reservation object interruptible after a die case. This function
* will sleep until the lock becomes available. See
- * reservation_object_lock_interruptible() as well.
+ * dma_resv_lock_interruptible() as well.
*/
-static inline int
-reservation_object_lock_slow_interruptible(struct reservation_object *obj,
- struct ww_acquire_ctx *ctx)
+static inline int dma_resv_lock_slow_interruptible(struct dma_resv *obj,
+ struct ww_acquire_ctx *ctx)
{
return ww_mutex_lock_slow_interruptible(&obj->lock, ctx);
}
/**
- * reservation_object_trylock - trylock the reservation object
+ * dma_resv_trylock - trylock the reservation object
* @obj: the reservation object
*
* Tries to lock the reservation object for exclusive access and modification.
@@ -187,51 +181,46 @@ reservation_object_lock_slow_interruptible(struct reservation_object *obj,
*
* Returns true if the lock was acquired, false otherwise.
*/
-static inline bool __must_check
-reservation_object_trylock(struct reservation_object *obj)
+static inline bool __must_check dma_resv_trylock(struct dma_resv *obj)
{
return ww_mutex_trylock(&obj->lock);
}
/**
- * reservation_object_is_locked - is the reservation object locked
+ * dma_resv_is_locked - is the reservation object locked
* @obj: the reservation object
*
* Returns true if the mutex is locked, false if unlocked.
*/
-static inline bool
-reservation_object_is_locked(struct reservation_object *obj)
+static inline bool dma_resv_is_locked(struct dma_resv *obj)
{
return ww_mutex_is_locked(&obj->lock);
}
/**
- * reservation_object_locking_ctx - returns the context used to lock the object
+ * dma_resv_locking_ctx - returns the context used to lock the object
* @obj: the reservation object
*
* Returns the context used to lock a reservation object or NULL if no context
* was used or the object is not locked at all.
*/
-static inline struct ww_acquire_ctx *
-reservation_object_locking_ctx(struct reservation_object *obj)
+static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj)
{
return READ_ONCE(obj->lock.ctx);
}
/**
- * reservation_object_unlock - unlock the reservation object
+ * dma_resv_unlock - unlock the reservation object
* @obj: the reservation object
*
* Unlocks the reservation object following exclusive access.
*/
-static inline void
-reservation_object_unlock(struct reservation_object *obj)
+static inline void dma_resv_unlock(struct dma_resv *obj)
{
#ifdef CONFIG_DEBUG_MUTEXES
/* Test shared fence slot reservation */
if (rcu_access_pointer(obj->fence)) {
- struct reservation_object_list *fence =
- reservation_object_get_list(obj);
+ struct dma_resv_list *fence = dma_resv_get_list(obj);
fence->shared_max = fence->shared_count;
}
@@ -240,7 +229,7 @@ reservation_object_unlock(struct reservation_object *obj)
}
/**
- * reservation_object_get_excl - get the reservation object's
+ * dma_resv_get_excl - get the reservation object's
* exclusive fence, with update-side lock held
* @obj: the reservation object
*
@@ -252,14 +241,14 @@ reservation_object_unlock(struct reservation_object *obj)
* The exclusive fence or NULL
*/
static inline struct dma_fence *
-reservation_object_get_excl(struct reservation_object *obj)
+dma_resv_get_excl(struct dma_resv *obj)
{
return rcu_dereference_protected(obj->fence_excl,
- reservation_object_held(obj));
+ dma_resv_held(obj));
}
/**
- * reservation_object_get_excl_rcu - get the reservation object's
+ * dma_resv_get_excl_rcu - get the reservation object's
* exclusive fence, without lock held.
* @obj: the reservation object
*
@@ -270,7 +259,7 @@ reservation_object_get_excl(struct reservation_object *obj)
* The exclusive fence or NULL if none
*/
static inline struct dma_fence *
-reservation_object_get_excl_rcu(struct reservation_object *obj)
+dma_resv_get_excl_rcu(struct dma_resv *obj)
{
struct dma_fence *fence;
@@ -284,29 +273,23 @@ reservation_object_get_excl_rcu(struct reservation_object *obj)
return fence;
}
-void reservation_object_init(struct reservation_object *obj);
-void reservation_object_fini(struct reservation_object *obj);
-int reservation_object_reserve_shared(struct reservation_object *obj,
- unsigned int num_fences);
-void reservation_object_add_shared_fence(struct reservation_object *obj,
- struct dma_fence *fence);
+void dma_resv_init(struct dma_resv *obj);
+void dma_resv_fini(struct dma_resv *obj);
+int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
+void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
-void reservation_object_add_excl_fence(struct reservation_object *obj,
- struct dma_fence *fence);
+void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
-int reservation_object_get_fences_rcu(struct reservation_object *obj,
- struct dma_fence **pfence_excl,
- unsigned *pshared_count,
- struct dma_fence ***pshared);
+int dma_resv_get_fences_rcu(struct dma_resv *obj,
+ struct dma_fence **pfence_excl,
+ unsigned *pshared_count,
+ struct dma_fence ***pshared);
-int reservation_object_copy_fences(struct reservation_object *dst,
- struct reservation_object *src);
+int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
-long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
- bool wait_all, bool intr,
- unsigned long timeout);
+long dma_resv_wait_timeout_rcu(struct dma_resv *obj, bool wait_all, bool intr,
+ unsigned long timeout);
-bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
- bool test_all);
+bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all);
#endif /* _LINUX_RESERVATION_H */
diff --git a/include/uapi/drm/etnaviv_drm.h b/include/uapi/drm/etnaviv_drm.h
index 0d5c49dc478c..09d0df8b71c5 100644
--- a/include/uapi/drm/etnaviv_drm.h
+++ b/include/uapi/drm/etnaviv_drm.h
@@ -73,6 +73,7 @@ struct drm_etnaviv_timespec {
#define ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT 0x18
#define ETNAVIV_PARAM_GPU_NUM_CONSTANTS 0x19
#define ETNAVIV_PARAM_GPU_NUM_VARYINGS 0x1a
+#define ETNAVIV_PARAM_SOFTPIN_START_ADDR 0x1b
#define ETNA_MAX_PIPES 4
@@ -148,6 +149,11 @@ struct drm_etnaviv_gem_submit_reloc {
* then patching the cmdstream for this entry is skipped. This can
* avoid kernel needing to map/access the cmdstream bo in the common
* case.
+ * If the submit is a softpin submit (ETNA_SUBMIT_SOFTPIN) the 'presumed'
+ * field is interpreted as the fixed location to map the bo into the gpu
+ * virtual address space. If the kernel is unable to map the buffer at
+ * this location the submit will fail. This means userspace is responsible
+ * for the whole gpu virtual address management.
*/
#define ETNA_SUBMIT_BO_READ 0x0001
#define ETNA_SUBMIT_BO_WRITE 0x0002
@@ -177,9 +183,11 @@ struct drm_etnaviv_gem_submit_pmr {
#define ETNA_SUBMIT_NO_IMPLICIT 0x0001
#define ETNA_SUBMIT_FENCE_FD_IN 0x0002
#define ETNA_SUBMIT_FENCE_FD_OUT 0x0004
+#define ETNA_SUBMIT_SOFTPIN 0x0008
#define ETNA_SUBMIT_FLAGS (ETNA_SUBMIT_NO_IMPLICIT | \
ETNA_SUBMIT_FENCE_FD_IN | \
- ETNA_SUBMIT_FENCE_FD_OUT)
+ ETNA_SUBMIT_FENCE_FD_OUT| \
+ ETNA_SUBMIT_SOFTPIN)
#define ETNA_PIPE_3D 0x00
#define ETNA_PIPE_2D 0x01
#define ETNA_PIPE_VG 0x02
diff --git a/include/uapi/drm/panfrost_drm.h b/include/uapi/drm/panfrost_drm.h
index cb577fb96b38..ec19db1eead8 100644
--- a/include/uapi/drm/panfrost_drm.h
+++ b/include/uapi/drm/panfrost_drm.h
@@ -20,6 +20,7 @@ extern "C" {
#define DRM_PANFROST_GET_BO_OFFSET 0x05
#define DRM_PANFROST_PERFCNT_ENABLE 0x06
#define DRM_PANFROST_PERFCNT_DUMP 0x07
+#define DRM_PANFROST_MADVISE 0x08
#define DRM_IOCTL_PANFROST_SUBMIT DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_SUBMIT, struct drm_panfrost_submit)
#define DRM_IOCTL_PANFROST_WAIT_BO DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_WAIT_BO, struct drm_panfrost_wait_bo)
@@ -27,6 +28,7 @@ extern "C" {
#define DRM_IOCTL_PANFROST_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_MMAP_BO, struct drm_panfrost_mmap_bo)
#define DRM_IOCTL_PANFROST_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GET_PARAM, struct drm_panfrost_get_param)
#define DRM_IOCTL_PANFROST_GET_BO_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GET_BO_OFFSET, struct drm_panfrost_get_bo_offset)
+#define DRM_IOCTL_PANFROST_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_MADVISE, struct drm_panfrost_madvise)
/*
* Unstable ioctl(s): only exposed when the unsafe unstable_ioctls module
@@ -82,6 +84,9 @@ struct drm_panfrost_wait_bo {
__s64 timeout_ns; /* absolute */
};
+#define PANFROST_BO_NOEXEC 1
+#define PANFROST_BO_HEAP 2
+
/**
* struct drm_panfrost_create_bo - ioctl argument for creating Panfrost BOs.
*
@@ -198,6 +203,26 @@ struct drm_panfrost_perfcnt_dump {
__u64 buf_ptr;
};
+/* madvise provides a way to tell the kernel in case a buffers contents
+ * can be discarded under memory pressure, which is useful for userspace
+ * bo cache where we want to optimistically hold on to buffer allocate
+ * and potential mmap, but allow the pages to be discarded under memory
+ * pressure.
+ *
+ * Typical usage would involve madvise(DONTNEED) when buffer enters BO
+ * cache, and madvise(WILLNEED) if trying to recycle buffer from BO cache.
+ * In the WILLNEED case, 'retained' indicates to userspace whether the
+ * backing pages still exist.
+ */
+#define PANFROST_MADV_WILLNEED 0 /* backing pages are needed, status returned in 'retained' */
+#define PANFROST_MADV_DONTNEED 1 /* backing pages not needed */
+
+struct drm_panfrost_madvise {
+ __u32 handle; /* in, GEM handle */
+ __u32 madv; /* in, PANFROST_MADV_x */
+ __u32 retained; /* out, whether backing store still exists */
+};
+
#if defined(__cplusplus)
}
#endif