aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/Kconfig8
-rw-r--r--drivers/gpu/drm/Makefile4
-rw-r--r--drivers/gpu/drm/amd/acp/Kconfig1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c95
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c169
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c183
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c144
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c124
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c82
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c249
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c111
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c98
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c93
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c111
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c191
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c250
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_dp.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c282
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c148
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c504
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c67
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c146
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmsch_v2_0.h338
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c90
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15_common.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_1.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c265
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c45
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c32
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c152
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c150
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c27
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c388
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h37
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c91
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c242
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h9
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c30
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c50
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c46
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile6
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c41
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c162
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c197
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c60
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c309
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c58
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c200
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h48
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c2204
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters_old.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c92
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c58
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c172
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c129
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c78
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c126
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c318
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c138
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c214
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_cp_psp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/opp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h72
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h13
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_gpint_cmd.h75
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h51
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c69
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h9
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c67
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h28
-rw-r--r--drivers/gpu/drm/amd/display/include/dpcd_defs.h8
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h63
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c8
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c69
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h60
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c6
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c15
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c16
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c6
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c36
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c4
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h17
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c183
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h32
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h10
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h3
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c20
-rw-r--r--drivers/gpu/drm/amd/display/modules/vmid/vmid.c16
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_sh_mask.h69
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_smn.h29
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_sh_mask.h69
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_smn.h29
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h31
-rw-r--r--drivers/gpu/drm/amd/powerplay/amdgpu_smu.c168
-rw-r--r--drivers/gpu/drm/amd/powerplay/arcturus_ppt.c56
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c7
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h9
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h6
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/pp_debug.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h10
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h46
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.c95
-rw-r--r--drivers/gpu/drm/amd/powerplay/renoir_ppt.c54
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_internal.h11
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v11_0.c136
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v12_0.c91
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c11
-rw-r--r--drivers/gpu/drm/amd/powerplay/vega20_ppt.c70
-rw-r--r--drivers/gpu/drm/arc/arcpgu_crtc.c16
-rw-r--r--drivers/gpu/drm/arc/arcpgu_hdmi.c2
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c8
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h6
-rw-r--r--drivers/gpu/drm/ast/ast_main.c24
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c27
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c2
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c6
-rw-r--r--drivers/gpu/drm/bochs/bochs_hw.c24
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c34
-rw-r--r--drivers/gpu/drm/bridge/Kconfig51
-rw-r--r--drivers/gpu/drm/bridge/Makefile6
-rw-r--r--drivers/gpu/drm/bridge/adv7511/Kconfig13
-rw-r--r--drivers/gpu/drm/bridge/adv7511/Makefile3
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511.h40
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c28
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx6345.c23
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c13
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c54
-rw-r--r--drivers/gpu/drm/bridge/cdns-dsi.c6
-rw-r--r--drivers/gpu/drm/bridge/display-connector.c295
-rw-r--r--drivers/gpu/drm/bridge/dumb-vga-dac.c300
-rw-r--r--drivers/gpu/drm/bridge/lvds-codec.c21
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c8
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c8
-rw-r--r--drivers/gpu/drm/bridge/panel.c23
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c8
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8640.c349
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c8
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c3
-rw-r--r--drivers/gpu/drm/bridge/simple-bridge.c342
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c329
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c11
-rw-r--r--drivers/gpu/drm/bridge/tc358764.c11
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c17
-rw-r--r--drivers/gpu/drm/bridge/tc358768.c1046
-rw-r--r--drivers/gpu/drm/bridge/thc63lvd1024.c5
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c267
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c238
-rw-r--r--drivers/gpu/drm/bridge/ti-tpd12s015.c211
-rw-r--r--drivers/gpu/drm/cirrus/cirrus.c51
-rw-r--r--drivers/gpu/drm/drm_atomic.c117
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c83
-rw-r--r--drivers/gpu/drm/drm_atomic_state_helper.c102
-rw-r--r--drivers/gpu/drm/drm_auth.c8
-rw-r--r--drivers/gpu/drm/drm_bridge.c751
-rw-r--r--drivers/gpu/drm/drm_bridge_connector.c379
-rw-r--r--drivers/gpu/drm/drm_bufs.c40
-rw-r--r--drivers/gpu/drm/drm_client.c2
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c15
-rw-r--r--drivers/gpu/drm/drm_connector.c96
-rw-r--r--drivers/gpu/drm/drm_context.c28
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c4
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h2
-rw-r--r--drivers/gpu/drm/drm_debugfs.c3
-rw-r--r--drivers/gpu/drm/drm_debugfs_crc.c2
-rw-r--r--drivers/gpu/drm/drm_dma.c21
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c141
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c343
-rw-r--r--drivers/gpu/drm/drm_drv.c20
-rw-r--r--drivers/gpu/drm/drm_edid.c213
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c22
-rw-r--r--drivers/gpu/drm/drm_file.c90
-rw-r--r--drivers/gpu/drm/drm_format_helper.c2
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c122
-rw-r--r--drivers/gpu/drm/drm_gem.c2
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c16
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c61
-rw-r--r--drivers/gpu/drm/drm_hdcp.c158
-rw-r--r--drivers/gpu/drm/drm_internal.h5
-rw-r--r--drivers/gpu/drm/drm_ioctl.c1
-rw-r--r--drivers/gpu/drm/drm_irq.c4
-rw-r--r--drivers/gpu/drm/drm_lock.c11
-rw-r--r--drivers/gpu/drm/drm_mipi_dbi.c39
-rw-r--r--drivers/gpu/drm/drm_mm.c10
-rw-r--r--drivers/gpu/drm/drm_modes.c7
-rw-r--r--drivers/gpu/drm/drm_pci.c82
-rw-r--r--drivers/gpu/drm/drm_scatter.c3
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c46
-rw-r--r--drivers/gpu/drm/drm_syncobj.c87
-rw-r--r--drivers/gpu/drm/drm_sysfs.c4
-rw-r--r--drivers/gpu/drm/drm_vblank.c177
-rw-r--r--drivers/gpu/drm/drm_vm.c26
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c11
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.h11
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.h2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c5
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h5
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c17
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c24
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c3
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c6
-rw-r--r--drivers/gpu/drm/gma500/gtt.c2
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.h2
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c6
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h6
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c3
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c12
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.h7
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c79
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c11
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h13
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c11
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c1
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c2
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h1
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c20
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c10
-rw-r--r--drivers/gpu/drm/i915/Kconfig.profile25
-rw-r--r--drivers/gpu/drm/i915/Makefile8
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c106
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c444
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_csr.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c545
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c411
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c221
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h65
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c152
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c63
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c848
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c28
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c62
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.c29
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c34
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c135
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c121
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c44
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c21
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c27
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c80
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c2
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c58
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_client_blt.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c364
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.h9
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c737
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_internal.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_blt.c18
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c15
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_context.c3
-rw-r--r--drivers/gpu/drm/i915/gt/gen7_renderclear.c402
-rw-r--r--drivers/gpu/drm/i915/gt/gen7_renderclear.h15
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c27
-rw-r--r--drivers/gpu/drm/i915/gt/hsw_clear_kernel.c61
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_param.c63
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_param.h14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c39
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c19
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gpu_commands.h17
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.c14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c29
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c214
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c19
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c26
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c149
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c58
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c154
-rw-r--r--drivers/gpu/drm/i915/gt/ivb_clear_kernel.c61
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.c4
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c30
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c4
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c802
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rc6.c23
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_reset.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_ring_submission.c296
-rw-r--r--drivers/gpu/drm/i915/gt/sysfs_engines.c445
-rw-r--r--drivers/gpu/drm/i915/gt/sysfs_engines.h13
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c26
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h15
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c13
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h19
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c7
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.h8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c59
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.h62
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c11
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h18
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c2
-rw-r--r--drivers/gpu/drm/i915/i915_active.c124
-rw-r--r--drivers/gpu/drm/i915/i915_active.h8
-rw-r--r--drivers/gpu/drm/i915/i915_buddy.c3
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c241
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c570
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h155
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c17
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c2
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c7
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.h17
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c51
-rw-r--r--drivers/gpu/drm/i915/i915_irq.h6
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c6
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c108
-rw-r--r--drivers/gpu/drm/i915/i915_perf_types.h3
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c59
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h13
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h32
-rw-r--r--drivers/gpu/drm/i915/i915_request.c284
-rw-r--r--drivers/gpu/drm/i915/i915_request.h14
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c20
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c2
-rw-r--r--drivers/gpu/drm/i915/i915_switcheroo.c2
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c3
-rw-r--r--drivers/gpu/drm/i915/i915_utils.c1
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h27
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c31
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.h25
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c37
-rw-r--r--drivers/gpu/drm/i915/i915_vma_types.h11
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c1
-rw-r--r--drivers/gpu/drm/i915/intel_dram.c500
-rw-r--r--drivers/gpu/drm/i915/intel_dram.h14
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c8
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c265
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_active.c78
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_buddy.c25
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c1
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c2
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c2
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c176
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm.c2
-rw-r--r--drivers/gpu/drm/lima/lima_drv.c16
-rw-r--r--drivers/gpu/drm/lima/lima_drv.h1
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c134
-rw-r--r--drivers/gpu/drm/lima/lima_gem.h4
-rw-r--r--drivers/gpu/drm/lima/lima_gp.c63
-rw-r--r--drivers/gpu/drm/lima/lima_mmu.c5
-rw-r--r--drivers/gpu/drm/lima/lima_mmu.h1
-rw-r--r--drivers/gpu/drm/lima/lima_regs.h1
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c35
-rw-r--r--drivers/gpu/drm/lima/lima_sched.h6
-rw-r--r--drivers/gpu/drm/lima/lima_vm.c46
-rw-r--r--drivers/gpu/drm/lima/lima_vm.h1
-rw-r--r--drivers/gpu/drm/mcde/mcde_drv.c9
-rw-r--r--drivers/gpu/drm/mcde/mcde_dsi.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c20
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c30
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.c7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c10
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c180
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.c93
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.h7
-rw-r--r--drivers/gpu/drm/meson/meson_venc.c10
-rw-r--r--drivers/gpu/drm/meson/meson_venc.h4
-rw-r--r--drivers/gpu/drm/meson/meson_venc_cvbs.c6
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h9
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c86
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c37
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c65
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.c85
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c58
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c86
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c95
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c11
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c10
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c4
-rw-r--r--drivers/gpu/drm/msm/edp/edp.c2
-rw-r--r--drivers/gpu/drm/msm/edp/edp_bridge.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c20
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h6
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c26
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.c5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c28
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/Kconfig22
-rw-r--r--drivers/gpu/drm/omapdrm/displays/Makefile4
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c97
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-hdmi.c183
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-opa362.c137
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c217
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c6
-rw-r--r--drivers/gpu/drm/omapdrm/dss/Makefile2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/base.c55
-rw-r--r--drivers/gpu/drm/omapdrm/dss/display.c9
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c349
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss-of.c28
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c46
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi.h4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c313
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_core.c59
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_core.h4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c295
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5_core.c48
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5_core.h5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c9
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss.h46
-rw-r--r--drivers/gpu/drm/omapdrm/dss/output.c53
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c178
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c269
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c247
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.h3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c88
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c83
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c6
-rw-r--r--drivers/gpu/drm/panel/Kconfig44
-rw-r--r--drivers/gpu/drm/panel/Makefile5
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c854
-rw-r--r--drivers/gpu/drm/panel/panel-elida-kd35t133.c352
-rw-r--r--drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c526
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9322.c14
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lg4573.c2
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35510.c1098
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ld9040.c6
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c293
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c332
-rw-r--r--drivers/gpu/drm/panel/panel-sony-acx424akp.c4
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td028ttec1.c17
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.c123
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h26
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c122
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c124
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.h47
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c6
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.c18
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c14
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.h1
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c118
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.h6
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_perfcnt.c45
-rw-r--r--drivers/gpu/drm/pl111/pl111_versatile.c73
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c43
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c26
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c6
-rw-r--r--drivers/gpu/drm/radeon/Makefile2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c25
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c61
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c35
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h9
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c8
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c22
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c9
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.c5
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler_trace.h27
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c56
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c86
-rw-r--r--drivers/gpu/drm/selftests/drm_cmdline_selftests.h1
-rw-r--r--drivers/gpu/drm/selftests/test-drm_cmdline_parser.c15
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.c11
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.h2
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c4
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c4
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c6
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c4
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c2
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c2
-rw-r--r--drivers/gpu/drm/stm/drv.c2
-rw-r--r--drivers/gpu/drm/stm/dw_mipi_dsi-stm.c4
-rw-r--r--drivers/gpu/drm/stm/ltdc.c103
-rw-r--r--drivers/gpu/drm/stm/ltdc.h6
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_lvds.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c104
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h14
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c129
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h2
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c104
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.h11
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.c66
-rw-r--r--drivers/gpu/drm/tegra/dc.c20
-rw-r--r--drivers/gpu/drm/tegra/fb.c8
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c34
-rw-r--r--drivers/gpu/drm/tidss/Kconfig14
-rw-r--r--drivers/gpu/drm/tidss/Makefile12
-rw-r--r--drivers/gpu/drm/tidss/tidss_crtc.c432
-rw-r--r--drivers/gpu/drm/tidss/tidss_crtc.h48
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.c2753
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.h137
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc_regs.h243
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.c285
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.h39
-rw-r--r--drivers/gpu/drm/tidss/tidss_encoder.c88
-rw-r--r--drivers/gpu/drm/tidss/tidss_encoder.h17
-rw-r--r--drivers/gpu/drm/tidss/tidss_irq.c146
-rw-r--r--drivers/gpu/drm/tidss/tidss_irq.h77
-rw-r--r--drivers/gpu/drm/tidss/tidss_kms.c299
-rw-r--r--drivers/gpu/drm/tidss/tidss_kms.h15
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.c217
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.h25
-rw-r--r--drivers/gpu/drm/tidss/tidss_scale_coefs.c202
-rw-r--r--drivers/gpu/drm/tidss/tidss_scale_coefs.h22
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_external.c2
-rw-r--r--drivers/gpu/drm/tiny/Kconfig22
-rw-r--r--drivers/gpu/drm/tiny/Makefile1
-rw-r--r--drivers/gpu/drm/tiny/gm12u320.c9
-rw-r--r--drivers/gpu/drm/tiny/ili9225.c9
-rw-r--r--drivers/gpu/drm/tiny/ili9486.c286
-rw-r--r--drivers/gpu/drm/tiny/repaper.c21
-rw-r--r--drivers/gpu/drm/tiny/st7586.c9
-rw-r--r--drivers/gpu/drm/tiny/st7735r.c76
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c271
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c2
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c1
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c11
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h41
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_mode.c13
-rw-r--r--drivers/gpu/drm/vboxvideo/vboxvideo.h2
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c13
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c3
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h49
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c2
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c9
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c6
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_debugfs.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c12
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c6
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h36
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c90
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c41
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c114
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c7
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c369
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c9
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c1
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h4
-rw-r--r--drivers/gpu/drm/vkms/vkms_plane.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h34
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c87
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c31
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c6
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_kms.c19
-rw-r--r--drivers/gpu/drm/zte/zx_plane.c4
796 files changed, 35150 insertions, 13483 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index d0aa6cff2e02..43594978958e 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -54,9 +54,6 @@ config DRM_DEBUG_MM
If in doubt, say "N".
-config DRM_EXPORT_FOR_TESTS
- bool
-
config DRM_DEBUG_SELFTEST
tristate "kselftests for DRM"
depends on DRM
@@ -389,6 +386,8 @@ source "drivers/gpu/drm/aspeed/Kconfig"
source "drivers/gpu/drm/mcde/Kconfig"
+source "drivers/gpu/drm/tidss/Kconfig"
+
# Keep legacy drivers last
menuconfig DRM_LEGACY
@@ -468,6 +467,9 @@ config DRM_SAVAGE
endif # DRM_LEGACY
+config DRM_EXPORT_FOR_TESTS
+ bool
+
# Separate option because drm_panel_orientation_quirks.c is shared with fbdev
config DRM_PANEL_ORIENTATION_QUIRKS
tristate
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 6493088a0fdd..7f72ef5e7811 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -39,7 +39,8 @@ obj-$(CONFIG_DRM_VRAM_HELPER) += drm_vram_helper.o
drm_ttm_helper-y := drm_gem_ttm_helper.o
obj-$(CONFIG_DRM_TTM_HELPER) += drm_ttm_helper.o
-drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_dsc.o drm_probe_helper.o \
+drm_kms_helper-y := drm_bridge_connector.o drm_crtc_helper.o drm_dp_helper.o \
+ drm_dsc.o drm_probe_helper.o \
drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
drm_kms_helper_common.o drm_dp_dual_mode_helper.o \
drm_simple_kms_helper.o drm_modeset_helper.o \
@@ -122,3 +123,4 @@ obj-$(CONFIG_DRM_LIMA) += lima/
obj-$(CONFIG_DRM_PANFROST) += panfrost/
obj-$(CONFIG_DRM_ASPEED_GFX) += aspeed/
obj-$(CONFIG_DRM_MCDE) += mcde/
+obj-$(CONFIG_DRM_TIDSS) += tidss/
diff --git a/drivers/gpu/drm/amd/acp/Kconfig b/drivers/gpu/drm/amd/acp/Kconfig
index 13340f353ea8..216d932a7831 100644
--- a/drivers/gpu/drm/amd/acp/Kconfig
+++ b/drivers/gpu/drm/amd/acp/Kconfig
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: MIT
menu "ACP (Audio CoProcessor) Configuration"
+ depends on DRM_AMDGPU
config DRM_AMD_ACP
bool "Enable AMD Audio CoProcessor IP support"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index da3bcff61b97..2992a49ad4a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -579,6 +579,7 @@ struct amdgpu_asic_funcs {
/* invalidate hdp read cache */
void (*invalidate_hdp)(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
+ void (*reset_hdp_ras_error_count)(struct amdgpu_device *adev);
/* check if the asic needs a full reset of if soft reset will work */
bool (*need_full_reset)(struct amdgpu_device *adev);
/* initialize doorbell layout for specific asic*/
@@ -969,6 +970,7 @@ struct amdgpu_device {
int pstate;
/* enable runtime pm on the device */
bool runpm;
+ bool in_runpm;
bool pm_sysfs_en;
bool ucode_sysfs_en;
@@ -992,6 +994,8 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
uint32_t acc_flags);
void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
uint32_t acc_flags);
+void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
+ uint32_t acc_flags);
void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value);
uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset);
@@ -1174,9 +1178,9 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
int amdgpu_device_ip_suspend(struct amdgpu_device *adev);
int amdgpu_device_suspend(struct drm_device *dev, bool fbcon);
int amdgpu_device_resume(struct drm_device *dev, bool fbcon);
-u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
-int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
-void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
+u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc);
+int amdgpu_enable_vblank_kms(struct drm_crtc *crtc);
+void amdgpu_disable_vblank_kms(struct drm_crtc *crtc);
long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 8609287620ea..abfbe89e805e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -29,6 +29,7 @@
#include <linux/module.h>
#include <linux/dma-buf.h>
#include "amdgpu_xgmi.h"
+#include <uapi/linux/kfd_ioctl.h>
static const unsigned int compute_vmid_bitmap = 0xFF00;
@@ -126,7 +127,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
/* this is going to have a few of the MSBs set that we need to
* clear
*/
- bitmap_complement(gpu_resources.queue_bitmap,
+ bitmap_complement(gpu_resources.cp_queue_bitmap,
adev->gfx.mec.queue_bitmap,
KGD_MAX_QUEUES);
@@ -137,7 +138,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
* adev->gfx.mec.num_pipe_per_mec
* adev->gfx.mec.num_queue_per_pipe;
for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i)
- clear_bit(i, gpu_resources.queue_bitmap);
+ clear_bit(i, gpu_resources.cp_queue_bitmap);
amdgpu_doorbell_get_kfd_info(adev,
&gpu_resources.doorbell_physical_address,
@@ -178,18 +179,18 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
kgd2kfd_interrupt(adev->kfd.dev, ih_ring_entry);
}
-void amdgpu_amdkfd_suspend(struct amdgpu_device *adev)
+void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm)
{
if (adev->kfd.dev)
- kgd2kfd_suspend(adev->kfd.dev);
+ kgd2kfd_suspend(adev->kfd.dev, run_pm);
}
-int amdgpu_amdkfd_resume(struct amdgpu_device *adev)
+int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm)
{
int r = 0;
if (adev->kfd.dev)
- r = kgd2kfd_resume(adev->kfd.dev);
+ r = kgd2kfd_resume(adev->kfd.dev, run_pm);
return r;
}
@@ -224,7 +225,7 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd)
int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
void **mem_obj, uint64_t *gpu_addr,
- void **cpu_ptr, bool mqd_gfx9)
+ void **cpu_ptr, bool cp_mqd_gfx9)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
struct amdgpu_bo *bo = NULL;
@@ -240,8 +241,8 @@ int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
bp.type = ttm_bo_type_kernel;
bp.resv = NULL;
- if (mqd_gfx9)
- bp.flags |= AMDGPU_GEM_CREATE_MQD_GFX9;
+ if (cp_mqd_gfx9)
+ bp.flags |= AMDGPU_GEM_CREATE_CP_MQD_GFX9;
r = amdgpu_bo_create(adev, &bp, &bo);
if (r) {
@@ -402,7 +403,7 @@ void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
if (amdgpu_sriov_vf(adev))
mem_info->mem_clk_max = adev->clock.default_mclk / 100;
- else if (adev->powerplay.pp_funcs) {
+ else if (adev->pm.dpm_enabled) {
if (amdgpu_emu_mode == 1)
mem_info->mem_clk_max = 0;
else
@@ -427,7 +428,7 @@ uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
/* the sclk is in quantas of 10kHz */
if (amdgpu_sriov_vf(adev))
return adev->clock.default_sclk / 100;
- else if (adev->powerplay.pp_funcs)
+ else if (adev->pm.dpm_enabled)
return amdgpu_dpm_get_sclk(adev, false) / 100;
else
return 100;
@@ -501,10 +502,11 @@ int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
metadata_size, &metadata_flags);
if (flags) {
*flags = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
- ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT;
+ KFD_IOC_ALLOC_MEM_FLAGS_VRAM
+ : KFD_IOC_ALLOC_MEM_FLAGS_GTT;
if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
- *flags |= ALLOC_MEM_FLAGS_PUBLIC;
+ *flags |= KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC;
}
out_put:
@@ -525,6 +527,14 @@ uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd)
return adev->gmc.xgmi.hive_id;
}
+
+uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev *kgd)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+ return adev->unique_id;
+}
+
uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src)
{
struct amdgpu_device *peer_adev = (struct amdgpu_device *)src;
@@ -647,13 +657,9 @@ int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid)
int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
- uint32_t flush_type = 0;
+ const uint32_t flush_type = 0;
bool all_hub = false;
- if (adev->gmc.xgmi.num_physical_nodes &&
- adev->asic_type == CHIP_VEGA20)
- flush_type = 2;
-
if (adev->family == AMDGPU_FAMILY_AI)
all_hub = true;
@@ -677,6 +683,11 @@ void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
{
}
+int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
+{
+ return 0;
+}
+
void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
struct amdgpu_vm *vm)
{
@@ -713,11 +724,11 @@ void kgd2kfd_exit(void)
{
}
-void kgd2kfd_suspend(struct kfd_dev *kfd)
+void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
{
}
-int kgd2kfd_resume(struct kfd_dev *kfd)
+int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
{
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 47b0f2957d1f..13feb313e9b3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -96,6 +96,7 @@ struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
struct mm_struct *mm);
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f);
+int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo);
struct amdkfd_process_info {
/* List head of all VMs that belong to a KFD process */
@@ -122,8 +123,8 @@ struct amdkfd_process_info {
int amdgpu_amdkfd_init(void);
void amdgpu_amdkfd_fini(void);
-void amdgpu_amdkfd_suspend(struct amdgpu_device *adev);
-int amdgpu_amdkfd_resume(struct amdgpu_device *adev);
+void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm);
+int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm);
void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
const void *ih_ring_entry);
void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev);
@@ -171,6 +172,7 @@ int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
uint32_t *flags);
uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd);
uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd);
+uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev *kgd);
uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd);
uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd);
uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src);
@@ -240,6 +242,9 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo);
+int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
+ struct tile_config *config);
+
/* KGD2KFD callbacks */
int kgd2kfd_init(void);
void kgd2kfd_exit(void);
@@ -249,8 +254,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
struct drm_device *ddev,
const struct kgd2kfd_shared_resources *gpu_resources);
void kgd2kfd_device_exit(struct kfd_dev *kfd);
-void kgd2kfd_suspend(struct kfd_dev *kfd);
-int kgd2kfd_resume(struct kfd_dev *kfd);
+void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm);
+int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm);
int kgd2kfd_pre_reset(struct kfd_dev *kfd);
int kgd2kfd_post_reset(struct kfd_dev *kfd);
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
index 4bcc175a149d..6529caca88fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
@@ -79,7 +79,7 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
dev_warn(adev->dev,
"Invalid sdma engine id (%d), using engine id 0\n",
engine_id);
- /* fall through */
+ fallthrough;
case 0:
sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
@@ -319,7 +319,6 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = {
.address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset,
.get_atc_vmid_pasid_mapping_info =
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
- .get_tile_config = kgd_gfx_v9_get_tile_config,
.set_vm_context_page_table_base = kgd_set_vm_context_page_table_base,
.get_hive_id = amdgpu_amdkfd_get_hive_id,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index a7b17c8deb00..4ec6d0c03201 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -42,38 +42,6 @@ enum hqd_dequeue_request_type {
SAVE_WAVES
};
-/* Because of REG_GET_FIELD() being used, we put this function in the
- * asic specific file.
- */
-static int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
- struct tile_config *config)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
- config->gb_addr_config = adev->gfx.config.gb_addr_config;
-#if 0
-/* TODO - confirm REG_GET_FIELD x2, should be OK as is... but
- * MC_ARB_RAMCFG register doesn't exist on Vega10 - initial amdgpu
- * changes commented out related code, doing the same here for now but
- * need to sync with Ken et al
- */
- config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
- MC_ARB_RAMCFG, NOOFBANK);
- config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
- MC_ARB_RAMCFG, NOOFRANKS);
-#endif
-
- config->tile_config_ptr = adev->gfx.config.tile_mode_array;
- config->num_tile_configs =
- ARRAY_SIZE(adev->gfx.config.tile_mode_array);
- config->macro_tile_config_ptr =
- adev->gfx.config.macrotile_mode_array;
- config->num_macro_tile_configs =
- ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
-
- return 0;
-}
-
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
{
return (struct amdgpu_device *)kgd;
@@ -805,7 +773,7 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
.address_watch_get_offset = kgd_address_watch_get_offset,
.get_atc_vmid_pasid_mapping_info =
get_atc_vmid_pasid_mapping_info,
- .get_tile_config = amdgpu_amdkfd_get_tile_config,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
.get_hive_id = amdgpu_amdkfd_get_hive_id,
+ .get_unique_id = amdgpu_amdkfd_get_unique_id,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 8f052e98a3c6..0b7e78748540 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -84,31 +84,6 @@ union TCP_WATCH_CNTL_BITS {
float f32All;
};
-/* Because of REG_GET_FIELD() being used, we put this function in the
- * asic specific file.
- */
-static int get_tile_config(struct kgd_dev *kgd,
- struct tile_config *config)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
- config->gb_addr_config = adev->gfx.config.gb_addr_config;
- config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
- MC_ARB_RAMCFG, NOOFBANK);
- config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
- MC_ARB_RAMCFG, NOOFRANKS);
-
- config->tile_config_ptr = adev->gfx.config.tile_mode_array;
- config->num_tile_configs =
- ARRAY_SIZE(adev->gfx.config.tile_mode_array);
- config->macro_tile_config_ptr =
- adev->gfx.config.macrotile_mode_array;
- config->num_macro_tile_configs =
- ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
-
- return 0;
-}
-
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
{
return (struct amdgpu_device *)kgd;
@@ -730,7 +705,6 @@ const struct kfd2kgd_calls gfx_v7_kfd2kgd = {
.address_watch_get_offset = kgd_address_watch_get_offset,
.get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info,
.set_scratch_backing_va = set_scratch_backing_va,
- .get_tile_config = get_tile_config,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
.read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 19a10db93d68..ccd635b812b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -41,31 +41,6 @@ enum hqd_dequeue_request_type {
RESET_WAVES
};
-/* Because of REG_GET_FIELD() being used, we put this function in the
- * asic specific file.
- */
-static int get_tile_config(struct kgd_dev *kgd,
- struct tile_config *config)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
- config->gb_addr_config = adev->gfx.config.gb_addr_config;
- config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
- MC_ARB_RAMCFG, NOOFBANK);
- config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
- MC_ARB_RAMCFG, NOOFRANKS);
-
- config->tile_config_ptr = adev->gfx.config.tile_mode_array;
- config->num_tile_configs =
- ARRAY_SIZE(adev->gfx.config.tile_mode_array);
- config->macro_tile_config_ptr =
- adev->gfx.config.macrotile_mode_array;
- config->num_macro_tile_configs =
- ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
-
- return 0;
-}
-
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
{
return (struct amdgpu_device *)kgd;
@@ -676,6 +651,5 @@ const struct kfd2kgd_calls gfx_v8_kfd2kgd = {
.get_atc_vmid_pasid_mapping_info =
get_atc_vmid_pasid_mapping_info,
.set_scratch_backing_va = set_scratch_backing_va,
- .get_tile_config = get_tile_config,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index 8562afe5b761..df841c2ac5e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -48,28 +48,6 @@ enum hqd_dequeue_request_type {
RESET_WAVES
};
-
-/* Because of REG_GET_FIELD() being used, we put this function in the
- * asic specific file.
- */
-int kgd_gfx_v9_get_tile_config(struct kgd_dev *kgd,
- struct tile_config *config)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
- config->gb_addr_config = adev->gfx.config.gb_addr_config;
-
- config->tile_config_ptr = adev->gfx.config.tile_mode_array;
- config->num_tile_configs =
- ARRAY_SIZE(adev->gfx.config.tile_mode_array);
- config->macro_tile_config_ptr =
- adev->gfx.config.macrotile_mode_array;
- config->num_macro_tile_configs =
- ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
-
- return 0;
-}
-
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
{
return (struct amdgpu_device *)kgd;
@@ -736,7 +714,7 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
.address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset,
.get_atc_vmid_pasid_mapping_info =
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
- .get_tile_config = kgd_gfx_v9_get_tile_config,
.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
.get_hive_id = amdgpu_amdkfd_get_hive_id,
+ .get_unique_id = amdgpu_amdkfd_get_unique_id,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
index 63d3e6683dfe..aedf67d57449 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
@@ -60,5 +60,3 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd,
bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
uint8_t vmid, uint16_t *p_pasid);
-int kgd_gfx_v9_get_tile_config(struct kgd_dev *kgd,
- struct tile_config *config);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index fa8ac9d19a7a..9dff792c9290 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -29,6 +29,7 @@
#include "amdgpu_vm.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_dma_buf.h"
+#include <uapi/linux/kfd_ioctl.h>
/* BO flag to indicate a KFD userptr BO */
#define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
@@ -276,6 +277,42 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
return 0;
}
+int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
+{
+ struct amdgpu_bo *root = bo;
+ struct amdgpu_vm_bo_base *vm_bo;
+ struct amdgpu_vm *vm;
+ struct amdkfd_process_info *info;
+ struct amdgpu_amdkfd_fence *ef;
+ int ret;
+
+ /* we can always get vm_bo from root PD bo.*/
+ while (root->parent)
+ root = root->parent;
+
+ vm_bo = root->vm_bo;
+ if (!vm_bo)
+ return 0;
+
+ vm = vm_bo->vm;
+ if (!vm)
+ return 0;
+
+ info = vm->process_info;
+ if (!info || !info->eviction_fence)
+ return 0;
+
+ ef = container_of(dma_fence_get(&info->eviction_fence->base),
+ struct amdgpu_amdkfd_fence, base);
+
+ BUG_ON(!dma_resv_trylock(bo->tbo.base.resv));
+ ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef);
+ dma_resv_unlock(bo->tbo.base.resv);
+
+ dma_fence_put(&ef->base);
+ return ret;
+}
+
static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
bool wait)
{
@@ -364,18 +401,18 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
{
struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
- bool coherent = mem->alloc_flags & ALLOC_MEM_FLAGS_COHERENT;
+ bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT;
uint32_t mapping_flags;
mapping_flags = AMDGPU_VM_PAGE_READABLE;
- if (mem->alloc_flags & ALLOC_MEM_FLAGS_WRITABLE)
+ if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE)
mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
- if (mem->alloc_flags & ALLOC_MEM_FLAGS_EXECUTABLE)
+ if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
switch (adev->asic_type) {
case CHIP_ARCTURUS:
- if (mem->alloc_flags & ALLOC_MEM_FLAGS_VRAM) {
+ if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
if (bo_adev == adev)
mapping_flags |= coherent ?
AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
@@ -847,9 +884,9 @@ static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
vm_list_node) {
struct amdgpu_bo *pd = peer_vm->root.base.bo;
- ret = amdgpu_sync_resv(NULL,
- sync, pd->tbo.base.resv,
- AMDGPU_FENCE_OWNER_KFD, false);
+ ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
+ AMDGPU_SYNC_NE_OWNER,
+ AMDGPU_FENCE_OWNER_KFD);
if (ret)
return ret;
}
@@ -1044,6 +1081,8 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
list_del(&vm->vm_list_node);
mutex_unlock(&process_info->lock);
+ vm->process_info = NULL;
+
/* Release per-process resources when last compute VM is destroyed */
if (!process_info->n_vms) {
WARN_ON(!list_empty(&process_info->kfd_bo_list));
@@ -1122,24 +1161,24 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
/*
* Check on which domain to allocate BO
*/
- if (flags & ALLOC_MEM_FLAGS_VRAM) {
+ if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
- alloc_flags |= (flags & ALLOC_MEM_FLAGS_PUBLIC) ?
+ alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
- } else if (flags & ALLOC_MEM_FLAGS_GTT) {
+ } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_flags = 0;
- } else if (flags & ALLOC_MEM_FLAGS_USERPTR) {
+ } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
alloc_flags = 0;
if (!offset || !*offset)
return -EINVAL;
user_addr = untagged_addr(*offset);
- } else if (flags & (ALLOC_MEM_FLAGS_DOORBELL |
- ALLOC_MEM_FLAGS_MMIO_REMAP)) {
+ } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
+ KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
bo_type = ttm_bo_type_sg;
@@ -1160,7 +1199,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
}
INIT_LIST_HEAD(&(*mem)->bo_va_list);
mutex_init(&(*mem)->lock);
- (*mem)->aql_queue = !!(flags & ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
+ (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
/* Workaround for AQL queue wraparound bug. Map the same
* memory twice. That means we only actually allocate half
@@ -1642,10 +1681,12 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
INIT_LIST_HEAD(&(*mem)->bo_va_list);
mutex_init(&(*mem)->lock);
+
(*mem)->alloc_flags =
((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
- ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT) |
- ALLOC_MEM_FLAGS_WRITABLE | ALLOC_MEM_FLAGS_EXECUTABLE;
+ KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT)
+ | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
+ | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
(*mem)->bo = amdgpu_bo_ref(bo);
(*mem)->va = va;
@@ -2204,3 +2245,25 @@ int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
kfree(mem);
return 0;
}
+
+/* Returns GPU-specific tiling mode information */
+int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
+ struct tile_config *config)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+ config->gb_addr_config = adev->gfx.config.gb_addr_config;
+ config->tile_config_ptr = adev->gfx.config.tile_mode_array;
+ config->num_tile_configs =
+ ARRAY_SIZE(adev->gfx.config.tile_mode_array);
+ config->macro_tile_config_ptr =
+ adev->gfx.config.macrotile_mode_array;
+ config->num_macro_tile_configs =
+ ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
+
+ /* Those values are not set from GFX9 onwards */
+ config->num_banks = adev->gfx.config.num_banks;
+ config->num_ranks = adev->gfx.config.num_ranks;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index a62cbc8199de..f355d9a752d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -1461,6 +1461,20 @@ static enum drm_mode_status amdgpu_connector_dp_mode_valid(struct drm_connector
return MODE_OK;
}
+static int
+amdgpu_connector_late_register(struct drm_connector *connector)
+{
+ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+ int r = 0;
+
+ if (amdgpu_connector->ddc_bus->has_aux) {
+ amdgpu_connector->ddc_bus->aux.dev = amdgpu_connector->base.kdev;
+ r = drm_dp_aux_register(&amdgpu_connector->ddc_bus->aux);
+ }
+
+ return r;
+}
+
static const struct drm_connector_helper_funcs amdgpu_connector_dp_helper_funcs = {
.get_modes = amdgpu_connector_dp_get_modes,
.mode_valid = amdgpu_connector_dp_mode_valid,
@@ -1475,6 +1489,7 @@ static const struct drm_connector_funcs amdgpu_connector_dp_funcs = {
.early_unregister = amdgpu_connector_unregister,
.destroy = amdgpu_connector_destroy,
.force = amdgpu_connector_dvi_force,
+ .late_register = amdgpu_connector_late_register,
};
static const struct drm_connector_funcs amdgpu_connector_edp_funcs = {
@@ -1485,6 +1500,7 @@ static const struct drm_connector_funcs amdgpu_connector_edp_funcs = {
.early_unregister = amdgpu_connector_unregister,
.destroy = amdgpu_connector_destroy,
.force = amdgpu_connector_dvi_force,
+ .late_register = amdgpu_connector_late_register,
};
void
@@ -1931,7 +1947,6 @@ amdgpu_connector_add(struct amdgpu_device *adev,
connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->display_info.subpixel_order = subpixel_order;
- drm_connector_register(connector);
if (has_aux)
amdgpu_atombios_dp_aux_init(amdgpu_connector);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index a52a084158b1..af91627b19b0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -28,6 +28,7 @@
#include <linux/file.h>
#include <linux/pagemap.h>
#include <linux/sync_file.h>
+#include <linux/dma-buf.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_syncobj.h>
@@ -415,7 +416,9 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
/* Don't move this buffer if we have depleted our allowance
* to move it. Don't move anything if the threshold is zero.
*/
- if (p->bytes_moved < p->bytes_moved_threshold) {
+ if (p->bytes_moved < p->bytes_moved_threshold &&
+ (!bo->tbo.base.dma_buf ||
+ list_empty(&bo->tbo.base.dma_buf->attachments))) {
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
(bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
/* And don't move a CPU_ACCESS_REQUIRED BO to limited
@@ -651,16 +654,19 @@ out:
static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
{
+ struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_bo_list_entry *e;
int r;
list_for_each_entry(e, &p->validated, tv.head) {
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
struct dma_resv *resv = bo->tbo.base.resv;
+ enum amdgpu_sync_mode sync_mode;
- r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp,
- amdgpu_bo_explicit_sync(bo));
-
+ sync_mode = amdgpu_bo_explicit_sync(bo) ?
+ AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER;
+ r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode,
+ &fpriv->vm);
if (r)
return r;
}
@@ -1202,7 +1208,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct drm_sched_entity *entity = p->entity;
enum drm_sched_priority priority;
- struct amdgpu_ring *ring;
struct amdgpu_bo_list_entry *e;
struct amdgpu_job *job;
uint64_t seq;
@@ -1211,7 +1216,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job = p->job;
p->job = NULL;
- r = drm_sched_job_init(&job->base, entity, p->filp);
+ r = drm_sched_job_init(&job->base, entity, &fpriv->vm);
if (r)
goto error_unlock;
@@ -1255,9 +1260,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
priority = job->base.s_priority;
drm_sched_entity_push_job(&job->base, entity);
- ring = to_amdgpu_ring(entity->rq->sched);
- amdgpu_ring_priority_get(ring, priority);
-
amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 94a6c42f29ea..6ed36a2c5f73 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -61,12 +61,24 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp,
return -EACCES;
}
+static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sched_priority prio)
+{
+ switch (prio) {
+ case DRM_SCHED_PRIORITY_HIGH_HW:
+ case DRM_SCHED_PRIORITY_KERNEL:
+ return AMDGPU_GFX_PIPE_PRIO_HIGH;
+ default:
+ return AMDGPU_GFX_PIPE_PRIO_NORMAL;
+ }
+}
+
static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const u32 ring)
{
struct amdgpu_device *adev = ctx->adev;
struct amdgpu_ctx_entity *entity;
struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
unsigned num_scheds = 0;
+ enum gfx_pipe_priority hw_prio;
enum drm_sched_priority priority;
int r;
@@ -79,46 +91,51 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const
priority = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
ctx->init_priority : ctx->override_priority;
switch (hw_ip) {
- case AMDGPU_HW_IP_GFX:
- sched = &adev->gfx.gfx_ring[0].sched;
- scheds = &sched;
- num_scheds = 1;
- break;
- case AMDGPU_HW_IP_COMPUTE:
- scheds = adev->gfx.compute_sched;
- num_scheds = adev->gfx.num_compute_sched;
- break;
- case AMDGPU_HW_IP_DMA:
- scheds = adev->sdma.sdma_sched;
- num_scheds = adev->sdma.num_sdma_sched;
- break;
- case AMDGPU_HW_IP_UVD:
- sched = &adev->uvd.inst[0].ring.sched;
- scheds = &sched;
- num_scheds = 1;
- break;
- case AMDGPU_HW_IP_VCE:
- sched = &adev->vce.ring[0].sched;
- scheds = &sched;
- num_scheds = 1;
- break;
- case AMDGPU_HW_IP_UVD_ENC:
- sched = &adev->uvd.inst[0].ring_enc[0].sched;
- scheds = &sched;
- num_scheds = 1;
- break;
- case AMDGPU_HW_IP_VCN_DEC:
- scheds = adev->vcn.vcn_dec_sched;
- num_scheds = adev->vcn.num_vcn_dec_sched;
- break;
- case AMDGPU_HW_IP_VCN_ENC:
- scheds = adev->vcn.vcn_enc_sched;
- num_scheds = adev->vcn.num_vcn_enc_sched;
- break;
- case AMDGPU_HW_IP_VCN_JPEG:
- scheds = adev->jpeg.jpeg_sched;
- num_scheds = adev->jpeg.num_jpeg_sched;
- break;
+ case AMDGPU_HW_IP_GFX:
+ sched = &adev->gfx.gfx_ring[0].sched;
+ scheds = &sched;
+ num_scheds = 1;
+ break;
+ case AMDGPU_HW_IP_COMPUTE:
+ hw_prio = amdgpu_ctx_sched_prio_to_compute_prio(priority);
+ scheds = adev->gfx.compute_prio_sched[hw_prio];
+ num_scheds = adev->gfx.num_compute_sched[hw_prio];
+ break;
+ case AMDGPU_HW_IP_DMA:
+ scheds = adev->sdma.sdma_sched;
+ num_scheds = adev->sdma.num_sdma_sched;
+ break;
+ case AMDGPU_HW_IP_UVD:
+ sched = &adev->uvd.inst[0].ring.sched;
+ scheds = &sched;
+ num_scheds = 1;
+ break;
+ case AMDGPU_HW_IP_VCE:
+ sched = &adev->vce.ring[0].sched;
+ scheds = &sched;
+ num_scheds = 1;
+ break;
+ case AMDGPU_HW_IP_UVD_ENC:
+ sched = &adev->uvd.inst[0].ring_enc[0].sched;
+ scheds = &sched;
+ num_scheds = 1;
+ break;
+ case AMDGPU_HW_IP_VCN_DEC:
+ sched = drm_sched_pick_best(adev->vcn.vcn_dec_sched,
+ adev->vcn.num_vcn_dec_sched);
+ scheds = &sched;
+ num_scheds = 1;
+ break;
+ case AMDGPU_HW_IP_VCN_ENC:
+ sched = drm_sched_pick_best(adev->vcn.vcn_enc_sched,
+ adev->vcn.num_vcn_enc_sched);
+ scheds = &sched;
+ num_scheds = 1;
+ break;
+ case AMDGPU_HW_IP_VCN_JPEG:
+ scheds = adev->jpeg.jpeg_sched;
+ num_scheds = adev->jpeg.num_jpeg_sched;
+ break;
}
r = drm_sched_entity_init(&entity->entity, priority, scheds, num_scheds,
@@ -502,6 +519,29 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
return fence;
}
+static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
+ struct amdgpu_ctx_entity *aentity,
+ int hw_ip,
+ enum drm_sched_priority priority)
+{
+ struct amdgpu_device *adev = ctx->adev;
+ enum gfx_pipe_priority hw_prio;
+ struct drm_gpu_scheduler **scheds = NULL;
+ unsigned num_scheds;
+
+ /* set sw priority */
+ drm_sched_entity_set_priority(&aentity->entity, priority);
+
+ /* set hw priority */
+ if (hw_ip == AMDGPU_HW_IP_COMPUTE) {
+ hw_prio = amdgpu_ctx_sched_prio_to_compute_prio(priority);
+ scheds = adev->gfx.compute_prio_sched[hw_prio];
+ num_scheds = adev->gfx.num_compute_sched[hw_prio];
+ drm_sched_entity_modify_sched(&aentity->entity, scheds,
+ num_scheds);
+ }
+}
+
void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
enum drm_sched_priority priority)
{
@@ -514,13 +554,11 @@ void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
ctx->init_priority : ctx->override_priority;
for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
- struct drm_sched_entity *entity;
-
if (!ctx->entities[i][j])
continue;
- entity = &ctx->entities[i][j]->entity;
- drm_sched_entity_set_priority(entity, ctx_prio);
+ amdgpu_ctx_set_entity_priority(ctx, ctx->entities[i][j],
+ i, ctx_prio);
}
}
}
@@ -628,20 +666,53 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
mutex_destroy(&mgr->lock);
}
+
+static void amdgpu_ctx_init_compute_sched(struct amdgpu_device *adev)
+{
+ int num_compute_sched_normal = 0;
+ int num_compute_sched_high = AMDGPU_MAX_COMPUTE_RINGS - 1;
+ int i;
+
+ /* use one drm sched array, gfx.compute_sched to store both high and
+ * normal priority drm compute schedulers */
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ if (!adev->gfx.compute_ring[i].has_high_prio)
+ adev->gfx.compute_sched[num_compute_sched_normal++] =
+ &adev->gfx.compute_ring[i].sched;
+ else
+ adev->gfx.compute_sched[num_compute_sched_high--] =
+ &adev->gfx.compute_ring[i].sched;
+ }
+
+ /* compute ring only has two priority for now */
+ i = AMDGPU_GFX_PIPE_PRIO_NORMAL;
+ adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0];
+ adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
+
+ i = AMDGPU_GFX_PIPE_PRIO_HIGH;
+ if (num_compute_sched_high == (AMDGPU_MAX_COMPUTE_RINGS - 1)) {
+ /* When compute has no high priority rings then use */
+ /* normal priority sched array */
+ adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0];
+ adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
+ } else {
+ adev->gfx.compute_prio_sched[i] =
+ &adev->gfx.compute_sched[num_compute_sched_high - 1];
+ adev->gfx.num_compute_sched[i] =
+ adev->gfx.num_compute_rings - num_compute_sched_normal;
+ }
+}
+
void amdgpu_ctx_init_sched(struct amdgpu_device *adev)
{
int i, j;
+ amdgpu_ctx_init_compute_sched(adev);
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
adev->gfx.gfx_sched[i] = &adev->gfx.gfx_ring[i].sched;
adev->gfx.num_gfx_sched++;
}
- for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- adev->gfx.compute_sched[i] = &adev->gfx.compute_ring[i].sched;
- adev->gfx.num_compute_sched++;
- }
-
for (i = 0; i < adev->sdma.num_instances; i++) {
adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched;
adev->sdma.num_sdma_sched++;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index f24ed9a1a3e5..c0f9a651dc06 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -31,6 +31,9 @@
#include <drm/drm_debugfs.h>
#include "amdgpu.h"
+#include "amdgpu_pm.h"
+#include "amdgpu_dm_debugfs.h"
+#include "amdgpu_ras.h"
/**
* amdgpu_debugfs_add_files - Add simple debugfs entries
@@ -176,7 +179,7 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
} else {
r = get_user(value, (uint32_t *)buf);
if (!r)
- WREG32(*pos >> 2, value);
+ amdgpu_mm_wreg_mmio_rlc(adev, *pos >> 2, value, 0);
}
if (r) {
result = r;
@@ -781,11 +784,11 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
ssize_t result = 0;
uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
- if (size & 3 || *pos & 3)
+ if (size > 4096 || size & 3 || *pos & 3)
return -EINVAL;
/* decode offset */
- offset = *pos & GENMASK_ULL(11, 0);
+ offset = (*pos & GENMASK_ULL(11, 0)) >> 2;
se = (*pos & GENMASK_ULL(19, 12)) >> 12;
sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
@@ -823,7 +826,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
while (size) {
uint32_t value;
- value = data[offset++];
+ value = data[result >> 2];
r = put_user(value, (uint32_t *)buf);
if (r) {
result = r;
@@ -840,6 +843,55 @@ err:
return result;
}
+/**
+ * amdgpu_debugfs_regs_gfxoff_write - Enable/disable GFXOFF
+ *
+ * @f: open file handle
+ * @buf: User buffer to write data from
+ * @size: Number of bytes to write
+ * @pos: Offset to seek to
+ *
+ * Write a 32-bit zero to disable or a 32-bit non-zero to enable
+ */
+static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
+
+ while (size) {
+ uint32_t value;
+
+ r = get_user(value, (uint32_t *)buf);
+ if (r) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+ return r;
+ }
+
+ amdgpu_gfx_off_ctrl(adev, value ? true : false);
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
+ return result;
+}
+
+
static const struct file_operations amdgpu_debugfs_regs_fops = {
.owner = THIS_MODULE,
.read = amdgpu_debugfs_regs_read,
@@ -888,6 +940,11 @@ static const struct file_operations amdgpu_debugfs_gpr_fops = {
.llseek = default_llseek
};
+static const struct file_operations amdgpu_debugfs_gfxoff_fops = {
+ .owner = THIS_MODULE,
+ .write = amdgpu_debugfs_gfxoff_write,
+};
+
static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_regs_fops,
&amdgpu_debugfs_regs_didt_fops,
@@ -897,6 +954,7 @@ static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_sensors_fops,
&amdgpu_debugfs_wave_fops,
&amdgpu_debugfs_gpr_fops,
+ &amdgpu_debugfs_gfxoff_fops,
};
static const char *debugfs_regs_names[] = {
@@ -908,6 +966,7 @@ static const char *debugfs_regs_names[] = {
"amdgpu_sensors",
"amdgpu_wave",
"amdgpu_gpr",
+ "amdgpu_gfxoff",
};
/**
@@ -934,18 +993,6 @@ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
return 0;
}
-void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
-{
- unsigned i;
-
- for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
- if (adev->debugfs_regs[i]) {
- debugfs_remove(adev->debugfs_regs[i]);
- adev->debugfs_regs[i] = NULL;
- }
- }
-}
-
static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1211,11 +1258,47 @@ failure:
return 0;
}
+static int amdgpu_debugfs_sclk_set(void *data, u64 val)
+{
+ int ret = 0;
+ uint32_t max_freq, min_freq;
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return -EINVAL;
+
+ ret = pm_runtime_get_sync(adev->ddev->dev);
+ if (ret < 0)
+ return ret;
+
+ if (is_support_sw_smu(adev)) {
+ ret = smu_get_dpm_freq_range(&adev->smu, SMU_SCLK, &min_freq, &max_freq, true);
+ if (ret || val > max_freq || val < min_freq)
+ return -EINVAL;
+ ret = smu_set_soft_freq_range(&adev->smu, SMU_SCLK, (uint32_t)val, (uint32_t)val, true);
+ } else {
+ return 0;
+ }
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
+ if (ret)
+ return -EINVAL;
+
+ return 0;
+}
+
DEFINE_SIMPLE_ATTRIBUTE(fops_ib_preempt, NULL,
amdgpu_debugfs_ib_preempt, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(fops_sclk_set, NULL,
+ amdgpu_debugfs_sclk_set, "%llu\n");
+
int amdgpu_debugfs_init(struct amdgpu_device *adev)
{
+ int r, i;
+
adev->debugfs_preempt =
debugfs_create_file("amdgpu_preempt_ib", 0600,
adev->ddev->primary->debugfs_root, adev,
@@ -1225,24 +1308,78 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
return -EIO;
}
+ adev->smu.debugfs_sclk =
+ debugfs_create_file("amdgpu_force_sclk", 0200,
+ adev->ddev->primary->debugfs_root, adev,
+ &fops_sclk_set);
+ if (!(adev->smu.debugfs_sclk)) {
+ DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n");
+ return -EIO;
+ }
+
+ /* Register debugfs entries for amdgpu_ttm */
+ r = amdgpu_ttm_debugfs_init(adev);
+ if (r) {
+ DRM_ERROR("Failed to init debugfs\n");
+ return r;
+ }
+
+ r = amdgpu_debugfs_pm_init(adev);
+ if (r) {
+ DRM_ERROR("Failed to register debugfs file for dpm!\n");
+ return r;
+ }
+
+ if (amdgpu_debugfs_sa_init(adev)) {
+ dev_err(adev->dev, "failed to register debugfs file for SA\n");
+ }
+
+ if (amdgpu_debugfs_fence_init(adev))
+ dev_err(adev->dev, "fence debugfs file creation failed\n");
+
+ r = amdgpu_debugfs_gem_init(adev);
+ if (r)
+ DRM_ERROR("registering gem debugfs failed (%d).\n", r);
+
+ r = amdgpu_debugfs_regs_init(adev);
+ if (r)
+ DRM_ERROR("registering register debugfs failed (%d).\n", r);
+
+ r = amdgpu_debugfs_firmware_init(adev);
+ if (r)
+ DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
+
+#if defined(CONFIG_DRM_AMD_DC)
+ if (amdgpu_device_has_dc_support(adev)) {
+ if (dtn_debugfs_init(adev))
+ DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n");
+ }
+#endif
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ struct amdgpu_ring *ring = adev->rings[i];
+
+ if (!ring)
+ continue;
+
+ if (amdgpu_debugfs_ring_init(adev, ring)) {
+ DRM_ERROR("Failed to register debugfs file for rings !\n");
+ }
+ }
+
+ amdgpu_ras_debugfs_create_all(adev);
+
return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list,
ARRAY_SIZE(amdgpu_debugfs_list));
}
-void amdgpu_debugfs_preempt_cleanup(struct amdgpu_device *adev)
-{
- debugfs_remove(adev->debugfs_preempt);
-}
-
#else
int amdgpu_debugfs_init(struct amdgpu_device *adev)
{
return 0;
}
-void amdgpu_debugfs_preempt_cleanup(struct amdgpu_device *adev) { }
int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
{
return 0;
}
-void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
index f289d28ad6b2..de12d1101526 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
@@ -32,9 +32,8 @@ struct amdgpu_debugfs {
};
int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
-void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
int amdgpu_debugfs_init(struct amdgpu_device *adev);
-void amdgpu_debugfs_preempt_cleanup(struct amdgpu_device *adev);
+void amdgpu_debugfs_fini(struct amdgpu_device *adev);
int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
const struct drm_info_list *files,
unsigned nfiles);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 39cd545976b7..6f469facabfb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -183,20 +183,51 @@ bool amdgpu_device_supports_baco(struct drm_device *dev)
void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
uint32_t *buf, size_t size, bool write)
{
- uint64_t last;
unsigned long flags;
+ uint32_t hi = ~0;
+ uint64_t last;
+
+
+#ifdef CONFIG_64BIT
+ last = min(pos + size, adev->gmc.visible_vram_size);
+ if (last > pos) {
+ void __iomem *addr = adev->mman.aper_base_kaddr + pos;
+ size_t count = last - pos;
+
+ if (write) {
+ memcpy_toio(addr, buf, count);
+ mb();
+ amdgpu_asic_flush_hdp(adev, NULL);
+ } else {
+ amdgpu_asic_invalidate_hdp(adev, NULL);
+ mb();
+ memcpy_fromio(buf, addr, count);
+ }
+
+ if (count == size)
+ return;
+
+ pos += count;
+ buf += count / 4;
+ size -= count;
+ }
+#endif
+
+ spin_lock_irqsave(&adev->mmio_idx_lock, flags);
+ for (last = pos + size; pos < last; pos += 4) {
+ uint32_t tmp = pos >> 31;
- last = size - 4;
- for (last += pos; pos <= last; pos += 4) {
- spin_lock_irqsave(&adev->mmio_idx_lock, flags);
WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
- WREG32_NO_KIQ(mmMM_INDEX_HI, pos >> 31);
+ if (tmp != hi) {
+ WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
+ hi = tmp;
+ }
if (write)
WREG32_NO_KIQ(mmMM_DATA, *buf++);
else
*buf++ = RREG32_NO_KIQ(mmMM_DATA);
- spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
}
+ spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
}
/*
@@ -275,6 +306,26 @@ void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
BUG();
}
+void static inline amdgpu_mm_wreg_mmio(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t acc_flags)
+{
+ trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
+
+ if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
+ writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
+ else {
+ unsigned long flags;
+
+ spin_lock_irqsave(&adev->mmio_idx_lock, flags);
+ writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
+ writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
+ spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
+ }
+
+ if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
+ udelay(500);
+ }
+}
+
/**
* amdgpu_mm_wreg - write to a memory mapped IO register
*
@@ -288,8 +339,6 @@ void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
uint32_t acc_flags)
{
- trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
-
if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
adev->last_mm_index = v;
}
@@ -297,20 +346,26 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)))
return amdgpu_kiq_wreg(adev, reg, v);
- if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
- writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
- else {
- unsigned long flags;
+ amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
+}
- spin_lock_irqsave(&adev->mmio_idx_lock, flags);
- writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
- writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
- spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
- }
+/*
+ * amdgpu_mm_wreg_mmio_rlc - write register either with mmio or with RLC path if in range
+ *
+ * this function is invoked only the debugfs register access
+ * */
+void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
+ uint32_t acc_flags)
+{
+ if (amdgpu_sriov_fullaccess(adev) &&
+ adev->gfx.rlc.funcs &&
+ adev->gfx.rlc.funcs->is_rlcg_access_range) {
- if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
- udelay(500);
+ if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
+ return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v);
}
+
+ amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
}
/**
@@ -1136,7 +1191,7 @@ static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
* locking inversion with the driver load path. And the access here is
* completely racy anyway. So don't bother with locking for now.
*/
- return dev->open_count == 0;
+ return atomic_read(&dev->open_count) == 0;
}
static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
@@ -2344,15 +2399,16 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
}
adev->ip_blocks[i].status.hw = false;
/* handle putting the SMC in the appropriate state */
- if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
- r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
- if (r) {
- DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
- adev->mp1_state, r);
- return r;
+ if(!amdgpu_sriov_vf(adev)){
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
+ r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
+ if (r) {
+ DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
+ adev->mp1_state, r);
+ return r;
+ }
}
}
-
adev->ip_blocks[i].status.hw = false;
}
@@ -2800,7 +2856,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
if (amdgpu_emu_mode == 1)
- adev->usec_timeout *= 2;
+ adev->usec_timeout *= 10;
adev->gmc.gart_size = 512 * 1024 * 1024;
adev->accel_working = false;
adev->num_rings = 0;
@@ -3088,22 +3144,6 @@ fence_driver_init:
} else
adev->ucode_sysfs_en = true;
- r = amdgpu_debugfs_gem_init(adev);
- if (r)
- DRM_ERROR("registering gem debugfs failed (%d).\n", r);
-
- r = amdgpu_debugfs_regs_init(adev);
- if (r)
- DRM_ERROR("registering register debugfs failed (%d).\n", r);
-
- r = amdgpu_debugfs_firmware_init(adev);
- if (r)
- DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
-
- r = amdgpu_debugfs_init(adev);
- if (r)
- DRM_ERROR("Creating debugfs files failed (%d).\n", r);
-
if ((amdgpu_testing & 1)) {
if (adev->accel_working)
amdgpu_test_moves(adev);
@@ -3177,6 +3217,12 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
flush_delayed_work(&adev->delayed_init_work);
adev->shutdown = true;
+ /* make sure IB test finished before entering exclusive mode
+ * to avoid preemption on IB test
+ * */
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_request_full_gpu(adev, false);
+
/* disable all interrupts */
amdgpu_irq_disable_all(adev);
if (adev->mode_info.mode_config_initialized){
@@ -3219,13 +3265,11 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
adev->rmmio = NULL;
amdgpu_device_doorbell_fini(adev);
- amdgpu_debugfs_regs_cleanup(adev);
device_remove_file(adev->dev, &dev_attr_pcie_replay_count);
if (adev->ucode_sysfs_en)
amdgpu_ucode_sysfs_fini(adev);
if (IS_ENABLED(CONFIG_PERF_EVENTS))
amdgpu_pmu_fini(adev);
- amdgpu_debugfs_preempt_cleanup(adev);
if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
amdgpu_discovery_fini(adev);
}
@@ -3309,7 +3353,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
}
}
- amdgpu_amdkfd_suspend(adev);
+ amdgpu_amdkfd_suspend(adev, !fbcon);
amdgpu_ras_suspend(adev);
@@ -3393,7 +3437,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
}
}
}
- r = amdgpu_amdkfd_resume(adev);
+ r = amdgpu_amdkfd_resume(adev, !fbcon);
if (r)
return r;
@@ -3913,6 +3957,8 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
if (r)
goto out;
+ amdgpu_fbdev_set_suspend(tmp_adev, 0);
+
/* must succeed. */
amdgpu_ras_resume(tmp_adev);
@@ -4086,6 +4132,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
*/
amdgpu_unregister_gpu_instance(tmp_adev);
+ amdgpu_fbdev_set_suspend(adev, 1);
+
/* disable ras on ALL IPs */
if (!(in_ras_intr && !use_baco) &&
amdgpu_device_ip_need_full_reset(tmp_adev))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index f95092741c38..27d8ae19a7a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -307,7 +307,7 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) {
if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id)) {
- DRM_INFO("set register base offset for %s\n",
+ DRM_DEBUG("set register base offset for %s\n",
hw_id_names[le16_to_cpu(ip->hw_id)]);
adev->reg_offset[hw_ip][ip->number_instance] =
ip->base_address;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 6d520a3eec40..84cee27cd7ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -99,7 +99,7 @@ static void amdgpu_display_flip_work_func(struct work_struct *__work)
& (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
(int)(work->target_vblank -
- amdgpu_get_vblank_counter_kms(adev->ddev, amdgpu_crtc->crtc_id)) > 0) {
+ amdgpu_get_vblank_counter_kms(crtc)) > 0) {
schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000));
return;
}
@@ -219,7 +219,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
if (!adev->enable_virtual_display)
work->base = amdgpu_bo_gpu_offset(new_abo);
work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
- amdgpu_get_vblank_counter_kms(dev, work->crtc_id);
+ amdgpu_get_vblank_counter_kms(crtc);
/* we borrow the event spin lock for protecting flip_wrok */
spin_lock_irqsave(&crtc->dev->event_lock, flags);
@@ -924,3 +924,15 @@ int amdgpu_display_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc)
return AMDGPU_CRTC_IRQ_NONE;
}
}
+
+bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc,
+ bool in_vblank_irq, int *vpos,
+ int *hpos, ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
+{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
+
+ return amdgpu_display_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
+ stime, etime, mode);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index a59cd47aa6c1..ffeb20f11c07 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -223,6 +223,37 @@ static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
}
/**
+ * amdgpu_dma_buf_pin - &dma_buf_ops.pin implementation
+ *
+ * @attach: attachment to pin down
+ *
+ * Pin the BO which is backing the DMA-buf so that it can't move any more.
+ */
+static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
+{
+ struct drm_gem_object *obj = attach->dmabuf->priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+
+ /* pin buffer into GTT */
+ return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
+}
+
+/**
+ * amdgpu_dma_buf_unpin - &dma_buf_ops.unpin implementation
+ *
+ * @attach: attachment to unpin
+ *
+ * Unpin a previously pinned BO to make it movable again.
+ */
+static void amdgpu_dma_buf_unpin(struct dma_buf_attachment *attach)
+{
+ struct drm_gem_object *obj = attach->dmabuf->priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+
+ amdgpu_bo_unpin(bo);
+}
+
+/**
* amdgpu_dma_buf_map - &dma_buf_ops.map_dma_buf implementation
* @attach: DMA-buf attachment
* @dir: DMA direction
@@ -244,9 +275,19 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
struct sg_table *sgt;
long r;
- r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
- if (r)
- return ERR_PTR(r);
+ if (!bo->pin_count) {
+ /* move buffer into GTT */
+ struct ttm_operation_ctx ctx = { false, false };
+
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (r)
+ return ERR_PTR(r);
+
+ } else if (!(amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type) &
+ AMDGPU_GEM_DOMAIN_GTT)) {
+ return ERR_PTR(-EBUSY);
+ }
sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages, bo->tbo.num_pages);
if (IS_ERR(sgt))
@@ -277,13 +318,9 @@ static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
struct sg_table *sgt,
enum dma_data_direction dir)
{
- struct drm_gem_object *obj = attach->dmabuf->priv;
- struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
-
dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
sg_free_table(sgt);
kfree(sgt);
- amdgpu_bo_unpin(bo);
}
/**
@@ -327,9 +364,10 @@ static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
}
const struct dma_buf_ops amdgpu_dmabuf_ops = {
- .dynamic_mapping = true,
.attach = amdgpu_dma_buf_attach,
.detach = amdgpu_dma_buf_detach,
+ .pin = amdgpu_dma_buf_pin,
+ .unpin = amdgpu_dma_buf_unpin,
.map_dma_buf = amdgpu_dma_buf_map,
.unmap_dma_buf = amdgpu_dma_buf_unmap,
.release = drm_gem_dmabuf_release,
@@ -413,6 +451,73 @@ error:
}
/**
+ * amdgpu_dma_buf_move_notify - &attach.move_notify implementation
+ *
+ * @attach: the DMA-buf attachment
+ *
+ * Invalidate the DMA-buf attachment, making sure that the we re-create the
+ * mapping before the next use.
+ */
+static void
+amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
+{
+ struct drm_gem_object *obj = attach->importer_priv;
+ struct ww_acquire_ctx *ticket = dma_resv_locking_ctx(obj->resv);
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct ttm_operation_ctx ctx = { false, false };
+ struct ttm_placement placement = {};
+ struct amdgpu_vm_bo_base *bo_base;
+ int r;
+
+ if (bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
+ return;
+
+ r = ttm_bo_validate(&bo->tbo, &placement, &ctx);
+ if (r) {
+ DRM_ERROR("Failed to invalidate DMA-buf import (%d))\n", r);
+ return;
+ }
+
+ for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
+ struct amdgpu_vm *vm = bo_base->vm;
+ struct dma_resv *resv = vm->root.base.bo->tbo.base.resv;
+
+ if (ticket) {
+ /* When we get an error here it means that somebody
+ * else is holding the VM lock and updating page tables
+ * So we can just continue here.
+ */
+ r = dma_resv_lock(resv, ticket);
+ if (r)
+ continue;
+
+ } else {
+ /* TODO: This is more problematic and we actually need
+ * to allow page tables updates without holding the
+ * lock.
+ */
+ if (!dma_resv_trylock(resv))
+ continue;
+ }
+
+ r = amdgpu_vm_clear_freed(adev, vm, NULL);
+ if (!r)
+ r = amdgpu_vm_handle_moved(adev, vm);
+
+ if (r && r != -EBUSY)
+ DRM_ERROR("Failed to invalidate VM page tables (%d))\n",
+ r);
+
+ dma_resv_unlock(resv);
+ }
+}
+
+static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops = {
+ .move_notify = amdgpu_dma_buf_move_notify
+};
+
+/**
* amdgpu_gem_prime_import - &drm_driver.gem_prime_import implementation
* @dev: DRM device
* @dma_buf: Shared DMA buffer
@@ -444,7 +549,8 @@ struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
if (IS_ERR(obj))
return obj;
- attach = dma_buf_dynamic_attach(dma_buf, dev->dev, true);
+ attach = dma_buf_dynamic_attach(dma_buf, dev->dev,
+ &amdgpu_dma_buf_attach_ops, obj);
if (IS_ERR(attach)) {
drm_gem_object_put(obj);
return ERR_CAST(attach);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index a2e8c3dfb4f1..ba1bb95a3cf9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -1171,3 +1171,20 @@ int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
return ret;
}
+
+int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
+ uint32_t cstate)
+{
+ int ret = 0;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ void *pp_handle = adev->powerplay.pp_handle;
+ struct smu_context *smu = &adev->smu;
+
+ if (is_support_sw_smu(adev))
+ ret = smu_set_df_cstate(smu, cstate);
+ else if (pp_funcs &&
+ pp_funcs->set_df_cstate)
+ ret = pp_funcs->set_df_cstate(pp_handle, cstate);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index 902ca6c00cca..936d85aa0fbc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -448,6 +448,8 @@ struct amdgpu_pm {
/* powerplay feature */
uint32_t pp_feature;
+ /* Used for I2C access to various EEPROMs on relevant ASICs */
+ struct i2c_adapter smu_i2c;
};
#define R600_SSTU_DFLT 0
@@ -533,4 +535,7 @@ int amdgpu_dpm_baco_exit(struct amdgpu_device *adev);
int amdgpu_dpm_baco_enter(struct amdgpu_device *adev);
+int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
+ uint32_t cstate);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index a9c4edca70c9..8ea86ffdea0d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1004,7 +1004,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
/* Renoir */
- {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU|AMD_EXP_HW_SUPPORT},
+ {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
/* Navi12 */
{0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT},
@@ -1021,6 +1021,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct drm_device *dev;
+ struct amdgpu_device *adev;
unsigned long flags = ent->driver_data;
int ret, retry = 0;
bool supports_atomic = false;
@@ -1090,6 +1091,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, dev);
+ amdgpu_driver_load_kms(dev, ent->driver_data);
+
retry_init:
ret = drm_dev_register(dev, ent->driver_data);
if (ret == -EAGAIN && ++retry <= 3) {
@@ -1100,6 +1103,11 @@ retry_init:
} else if (ret)
goto err_pci;
+ adev = dev->dev_private;
+ ret = amdgpu_debugfs_init(adev);
+ if (ret)
+ DRM_ERROR("Creating debugfs files failed (%d).\n", ret);
+
return 0;
err_pci:
@@ -1119,9 +1127,10 @@ amdgpu_pci_remove(struct pci_dev *pdev)
#endif
DRM_ERROR("Hotplug removal is not supported\n");
drm_dev_unplug(dev);
- drm_dev_put(dev);
+ amdgpu_driver_unload_kms(dev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
+ drm_dev_put(dev);
}
static void
@@ -1220,11 +1229,15 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
}
}
+ adev->in_runpm = true;
if (amdgpu_device_supports_boco(drm_dev))
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
drm_kms_helper_poll_disable(drm_dev);
ret = amdgpu_device_suspend(drm_dev, false);
+ if (ret)
+ return ret;
+
if (amdgpu_device_supports_boco(drm_dev)) {
/* Only need to handle PCI state in the driver for ATPX
* PCI core handles it for _PR3.
@@ -1278,6 +1291,7 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
drm_kms_helper_poll_enable(drm_dev);
if (amdgpu_device_supports_boco(drm_dev))
drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
+ adev->in_runpm = false;
return 0;
}
@@ -1285,24 +1299,55 @@ static int amdgpu_pmops_runtime_idle(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_dev->dev_private;
- struct drm_crtc *crtc;
+ /* we don't want the main rpm_idle to call suspend - we want to autosuspend */
+ int ret = 1;
if (!adev->runpm) {
pm_runtime_forbid(dev);
return -EBUSY;
}
- list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head) {
- if (crtc->enabled) {
- DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
- return -EBUSY;
+ if (amdgpu_device_has_dc_support(adev)) {
+ struct drm_crtc *crtc;
+
+ drm_modeset_lock_all(drm_dev);
+
+ drm_for_each_crtc(crtc, drm_dev) {
+ if (crtc->state->active) {
+ ret = -EBUSY;
+ break;
+ }
+ }
+
+ drm_modeset_unlock_all(drm_dev);
+
+ } else {
+ struct drm_connector *list_connector;
+ struct drm_connector_list_iter iter;
+
+ mutex_lock(&drm_dev->mode_config.mutex);
+ drm_modeset_lock(&drm_dev->mode_config.connection_mutex, NULL);
+
+ drm_connector_list_iter_begin(drm_dev, &iter);
+ drm_for_each_connector_iter(list_connector, &iter) {
+ if (list_connector->dpms == DRM_MODE_DPMS_ON) {
+ ret = -EBUSY;
+ break;
+ }
}
+
+ drm_connector_list_iter_end(&iter);
+
+ drm_modeset_unlock(&drm_dev->mode_config.connection_mutex);
+ mutex_unlock(&drm_dev->mode_config.mutex);
}
+ if (ret == -EBUSY)
+ DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
+
pm_runtime_mark_last_busy(dev);
pm_runtime_autosuspend(dev);
- /* we don't want the main rpm_idle to call suspend - we want to autosuspend */
- return 1;
+ return ret;
}
long amdgpu_drm_ioctl(struct file *filp,
@@ -1377,32 +1422,15 @@ int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv)
return 0;
}
-static bool
-amdgpu_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe,
- bool in_vblank_irq, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode)
-{
- return amdgpu_display_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
- stime, etime, mode);
-}
-
static struct drm_driver kms_driver = {
.driver_features =
- DRIVER_USE_AGP | DRIVER_ATOMIC |
+ DRIVER_ATOMIC |
DRIVER_GEM |
DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ |
DRIVER_SYNCOBJ_TIMELINE,
- .load = amdgpu_driver_load_kms,
.open = amdgpu_driver_open_kms,
.postclose = amdgpu_driver_postclose_kms,
.lastclose = amdgpu_driver_lastclose_kms,
- .unload = amdgpu_driver_unload_kms,
- .get_vblank_counter = amdgpu_get_vblank_counter_kms,
- .enable_vblank = amdgpu_enable_vblank_kms,
- .disable_vblank = amdgpu_disable_vblank_kms,
- .get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
- .get_scanout_position = amdgpu_get_crtc_scanout_position,
.irq_handler = amdgpu_irq_handler,
.ioctls = amdgpu_ioctls_kms,
.gem_free_object_unlocked = amdgpu_gem_object_free,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 2672dc64a310..9ae7b61f696a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -336,15 +336,12 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev)
drm_fb_helper_prepare(adev->ddev, &rfbdev->helper,
&amdgpu_fb_helper_funcs);
- ret = drm_fb_helper_init(adev->ddev, &rfbdev->helper,
- AMDGPUFB_CONN_LIMIT);
+ ret = drm_fb_helper_init(adev->ddev, &rfbdev->helper);
if (ret) {
kfree(rfbdev);
return ret;
}
- drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
-
/* disable all the possible outputs/crtcs before entering KMS mode */
if (!amdgpu_device_has_dc_support(adev))
drm_helper_disable_unused_functions(adev->ddev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 3c01252b1e0e..7531527067df 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -503,9 +503,6 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
*/
int amdgpu_fence_driver_init(struct amdgpu_device *adev)
{
- if (amdgpu_debugfs_fence_init(adev))
- dev_err(adev->dev, "fence debugfs file creation failed\n");
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 0f960b498792..6b9c9193cdfa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -192,6 +192,14 @@ static bool amdgpu_gfx_is_multipipe_capable(struct amdgpu_device *adev)
return adev->gfx.mec.num_mec > 1;
}
+bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
+ int queue)
+{
+ /* Policy: make queue 0 of each pipe as high priority compute queue */
+ return (queue == 0);
+
+}
+
void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
{
int i, queue, pipe, mec;
@@ -477,7 +485,7 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev)
kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.compute_ring[i],
RESET_QUEUES, 0, 0);
- return amdgpu_ring_test_ring(kiq_ring);
+ return amdgpu_ring_test_helper(kiq_ring);
}
int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
@@ -565,7 +573,6 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev)
int r;
struct ras_fs_if fs_info = {
.sysfs_name = "gfx_err_count",
- .debugfs_name = "gfx_err_inject",
};
struct ras_ih_if ih_info = {
.cb = amdgpu_gfx_process_ras_data_cb,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index ca17ffb01301..5825692d07e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -41,6 +41,15 @@
#define AMDGPU_MAX_GFX_QUEUES KGD_MAX_QUEUES
#define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES
+enum gfx_pipe_priority {
+ AMDGPU_GFX_PIPE_PRIO_NORMAL = 1,
+ AMDGPU_GFX_PIPE_PRIO_HIGH,
+ AMDGPU_GFX_PIPE_PRIO_MAX
+};
+
+#define AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM 0
+#define AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM 15
+
struct amdgpu_mec {
struct amdgpu_bo *hpd_eop_obj;
u64 hpd_eop_gpu_addr;
@@ -151,6 +160,8 @@ struct amdgpu_gfx_config {
unsigned num_gpus;
unsigned multi_gpu_tile_size;
unsigned mc_arb_ramcfg;
+ unsigned num_banks;
+ unsigned num_ranks;
unsigned gb_addr_config;
unsigned num_rbs;
unsigned gs_vgt_table_depth;
@@ -204,6 +215,7 @@ struct amdgpu_gfx_funcs {
u32 queue, u32 vmid);
int (*ras_error_inject)(struct amdgpu_device *adev, void *inject_if);
int (*query_ras_error_count) (struct amdgpu_device *adev, void *ras_error_status);
+ void (*reset_ras_error_count) (struct amdgpu_device *adev);
};
struct sq_work {
@@ -278,8 +290,9 @@ struct amdgpu_gfx {
uint32_t num_gfx_sched;
unsigned num_gfx_rings;
struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
+ struct drm_gpu_scheduler **compute_prio_sched[AMDGPU_GFX_PIPE_PRIO_MAX];
struct drm_gpu_scheduler *compute_sched[AMDGPU_MAX_COMPUTE_RINGS];
- uint32_t num_compute_sched;
+ uint32_t num_compute_sched[AMDGPU_GFX_PIPE_PRIO_MAX];
unsigned num_compute_rings;
struct amdgpu_irq_src eop_irq;
struct amdgpu_irq_src priv_reg_irq;
@@ -361,6 +374,8 @@ void amdgpu_gfx_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
int *mec, int *pipe, int *queue);
bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int mec,
int pipe, int queue);
+bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
+ int queue);
int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, int me,
int pipe, int queue);
void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index d3c27a3c43f6..7546da0cc70c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -195,6 +195,7 @@ struct amdgpu_gmc {
uint32_t srbm_soft_reset;
bool prt_warning;
uint64_t stolen_size;
+ uint32_t sdpif_register;
/* apertures */
u64 shared_aperture_start;
u64 shared_aperture_end;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 60655834d649..ccbd7acfc4cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -48,7 +48,6 @@
* produce command buffers which are send to the kernel and
* put in IBs for execution by the requested ring.
*/
-static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev);
/**
* amdgpu_ib_get - request an IB (Indirect Buffer)
@@ -295,9 +294,7 @@ int amdgpu_ib_pool_init(struct amdgpu_device *adev)
}
adev->ib_pool_ready = true;
- if (amdgpu_debugfs_sa_init(adev)) {
- dev_err(adev->dev, "failed to register debugfs file for SA\n");
- }
+
return 0;
}
@@ -421,7 +418,7 @@ static const struct drm_info_list amdgpu_debugfs_sa_list[] = {
#endif
-static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev)
+int amdgpu_debugfs_sa_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_sa_list, 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index d42be880a236..4981e443a884 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -117,12 +117,10 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
{
- struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
struct amdgpu_job *job = to_amdgpu_job(s_job);
drm_sched_job_cleanup(s_job);
- amdgpu_ring_priority_put(ring, s_job->s_priority);
dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
amdgpu_sync_free(&job->sched_sync);
@@ -143,7 +141,6 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
void *owner, struct dma_fence **f)
{
enum drm_sched_priority priority;
- struct amdgpu_ring *ring;
int r;
if (!f)
@@ -158,9 +155,6 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
priority = job->base.s_priority;
drm_sched_entity_push_job(&job->base, entity);
- ring = to_amdgpu_ring(entity->rq->sched);
- amdgpu_ring_priority_get(ring, priority);
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 60591dbc2097..fd1dc3236eca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -88,9 +88,6 @@ void amdgpu_driver_unload_kms(struct drm_device *dev)
if (adev->rmmio == NULL)
goto done_free;
- if (amdgpu_sriov_vf(adev))
- amdgpu_virt_request_full_gpu(adev, false);
-
if (adev->runpm) {
pm_runtime_get_sync(dev->dev);
pm_runtime_forbid(dev->dev);
@@ -170,10 +167,17 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
}
if (amdgpu_device_supports_boco(dev) &&
- (amdgpu_runtime_pm != 0)) /* enable runpm by default */
+ (amdgpu_runtime_pm != 0)) /* enable runpm by default for boco */
+ adev->runpm = true;
+ else if (amdgpu_device_supports_baco(dev) &&
+ (amdgpu_runtime_pm != 0) &&
+ (adev->asic_type >= CHIP_TOPAZ) &&
+ (adev->asic_type != CHIP_VEGA10) &&
+ (adev->asic_type != CHIP_VEGA20) &&
+ (adev->asic_type != CHIP_ARCTURUS)) /* enable runpm on VI+ */
adev->runpm = true;
else if (amdgpu_device_supports_baco(dev) &&
- (amdgpu_runtime_pm > 0)) /* enable runpm if runpm=1 */
+ (amdgpu_runtime_pm > 0)) /* enable runpm if runpm=1 on CI */
adev->runpm = true;
/* Call ACPI methods: require modeset init
@@ -1110,14 +1114,15 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
/**
* amdgpu_get_vblank_counter_kms - get frame count
*
- * @dev: drm dev pointer
- * @pipe: crtc to get the frame count from
+ * @crtc: crtc to get the frame count from
*
* Gets the frame count on the requested crtc (all asics).
* Returns frame count on success, -EINVAL on failure.
*/
-u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
+u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct amdgpu_device *adev = dev->dev_private;
int vpos, hpos, stat;
u32 count;
@@ -1177,14 +1182,15 @@ u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
/**
* amdgpu_enable_vblank_kms - enable vblank interrupt
*
- * @dev: drm dev pointer
- * @pipe: crtc to enable vblank interrupt for
+ * @crtc: crtc to enable vblank interrupt for
*
* Enable the interrupt on the requested crtc (all asics).
* Returns 0 on success, -EINVAL on failure.
*/
-int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe)
+int amdgpu_enable_vblank_kms(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct amdgpu_device *adev = dev->dev_private;
int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
@@ -1194,13 +1200,14 @@ int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe)
/**
* amdgpu_disable_vblank_kms - disable vblank interrupt
*
- * @dev: drm dev pointer
- * @pipe: crtc to disable vblank interrupt for
+ * @crtc: crtc to disable vblank interrupt for
*
* Disable the interrupt on the requested crtc (all asics).
*/
-void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe)
+void amdgpu_disable_vblank_kms(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct amdgpu_device *adev = dev->dev_private;
int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c
index 676c48c02d77..ead3dc572ec5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c
@@ -32,7 +32,6 @@ int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev)
};
struct ras_fs_if fs_info = {
.sysfs_name = "mmhub_err_count",
- .debugfs_name = "mmhub_err_inject",
};
if (!adev->mmhub.ras_if) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
index 1cd78940cf82..e89fb35fec71 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
@@ -26,6 +26,7 @@ struct amdgpu_mmhub_funcs {
int (*ras_late_init)(struct amdgpu_device *adev);
void (*query_ras_error_count)(struct amdgpu_device *adev,
void *ras_error_status);
+ void (*reset_ras_error_count)(struct amdgpu_device *adev);
};
struct amdgpu_mmhub {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index eb9975f4decb..37ba07e2feb5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -612,6 +612,11 @@ void amdgpu_panel_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode);
int amdgpu_display_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc);
+bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc,
+ bool in_vblank_irq, int *vpos,
+ int *hpos, ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode);
+
/* fbdev layer */
int amdgpu_fbdev_init(struct amdgpu_device *adev);
void amdgpu_fbdev_fini(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
index 7d5c3a9de9ea..6201a5f4b4fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
@@ -30,7 +30,6 @@ int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev)
};
struct ras_fs_if fs_info = {
.sysfs_name = "pcie_bif_err_count",
- .debugfs_name = "pcie_bif_err_inject",
};
if (!adev->nbio.ras_if) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index e3f16b49e970..c687f5415b3f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -31,6 +31,7 @@
*/
#include <linux/list.h>
#include <linux/slab.h>
+#include <linux/dma-buf.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_cache.h>
@@ -925,6 +926,9 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
return 0;
}
+ if (bo->tbo.base.import_attach)
+ dma_buf_pin(bo->tbo.base.import_attach);
+
bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
/* force to pin into visible video ram */
if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
@@ -1008,6 +1012,9 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
amdgpu_bo_subtract_pin_size(bo);
+ if (bo->tbo.base.import_attach)
+ dma_buf_unpin(bo->tbo.base.import_attach);
+
for (i = 0; i < bo->placement.num_placement; i++) {
bo->placements[i].lpfn = 0;
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
@@ -1274,6 +1281,10 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
amdgpu_bo_kunmap(abo);
+ if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach &&
+ bo->mem.mem_type != TTM_PL_SYSTEM)
+ dma_buf_move_notify(abo->tbo.base.dma_buf);
+
/* remember the eviction */
if (evict)
atomic64_inc(&adev->num_evictions);
@@ -1307,6 +1318,12 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
if (abo->kfd_bo)
amdgpu_amdkfd_unreserve_memory_limit(abo);
+ /* We only remove the fence if the resv has individualized. */
+ WARN_ON_ONCE(bo->type == ttm_bo_type_kernel
+ && bo->base.resv != &bo->base._resv);
+ if (bo->base.resv == &bo->base._resv)
+ amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
+
if (bo->mem.mem_type != TTM_PL_VRAM || !bo->mem.mm_node ||
!(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
return;
@@ -1403,30 +1420,52 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
}
/**
- * amdgpu_sync_wait_resv - Wait for BO reservation fences
+ * amdgpu_bo_sync_wait_resv - Wait for BO reservation fences
*
- * @bo: buffer object
+ * @adev: amdgpu device pointer
+ * @resv: reservation object to sync to
+ * @sync_mode: synchronization mode
* @owner: fence owner
* @intr: Whether the wait is interruptible
*
+ * Extract the fences from the reservation object and waits for them to finish.
+ *
* Returns:
* 0 on success, errno otherwise.
*/
-int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
+int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
+ enum amdgpu_sync_mode sync_mode, void *owner,
+ bool intr)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_sync sync;
int r;
amdgpu_sync_create(&sync);
- amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv, owner, false);
+ amdgpu_sync_resv(adev, &sync, resv, sync_mode, owner);
r = amdgpu_sync_wait(&sync, intr);
amdgpu_sync_free(&sync);
-
return r;
}
/**
+ * amdgpu_bo_sync_wait - Wrapper for amdgpu_bo_sync_wait_resv
+ * @bo: buffer object to wait for
+ * @owner: fence owner
+ * @intr: Whether the wait is interruptible
+ *
+ * Wrapper to wait for fences in a BO.
+ * Returns:
+ * 0 on success, errno otherwise.
+ */
+int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+
+ return amdgpu_bo_sync_wait_resv(adev, bo->tbo.base.resv,
+ AMDGPU_SYNC_NE_OWNER, owner, intr);
+}
+
+/**
* amdgpu_bo_gpu_offset - return GPU offset of bo
* @bo: amdgpu object for which we query the offset
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 36dec51d1ef1..5e39ecd8cc28 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -277,6 +277,9 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
bool shared);
+int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
+ enum amdgpu_sync_mode sync_mode, void *owner,
+ bool intr);
int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
int amdgpu_bo_validate(struct amdgpu_bo *bo);
@@ -316,6 +319,7 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev,
void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
struct seq_file *m);
#endif
+int amdgpu_debugfs_sa_init(struct amdgpu_device *adev);
bool amdgpu_bo_support_uswc(u64 bo_flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index b03b1eb7ba04..bc3cf04a1a94 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -41,8 +41,6 @@
#include "hwmgr.h"
#define WIDTH_4K 3840
-static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
-
static const struct cg_flag_name clocks[] = {
{AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
{AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
@@ -3398,11 +3396,6 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
DRM_ERROR("failed to create device file unique_id\n");
return ret;
}
- ret = amdgpu_debugfs_pm_init(adev);
- if (ret) {
- DRM_ERROR("Failed to register debugfs file for dpm!\n");
- return ret;
- }
if ((adev->asic_type >= CHIP_VEGA10) &&
!(adev->flags & AMD_IS_APU)) {
@@ -3669,7 +3662,7 @@ static const struct drm_info_list amdgpu_pm_info_list[] = {
};
#endif
-static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
+int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list));
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
index 3da1da277805..5db0ef86e84c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
@@ -43,4 +43,6 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable);
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable);
void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable);
+int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
index 07914e34bc25..1311d6aec5d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
@@ -52,7 +52,7 @@ static int amdgpu_perf_event_init(struct perf_event *event)
return -ENOENT;
/* update the hw_perf_event struct with config data */
- hwc->conf = event->attr.config;
+ hwc->config = event->attr.config;
return 0;
}
@@ -74,9 +74,9 @@ static void amdgpu_perf_start(struct perf_event *event, int flags)
switch (pe->pmu_perf_type) {
case PERF_TYPE_AMDGPU_DF:
if (!(flags & PERF_EF_RELOAD))
- pe->adev->df.funcs->pmc_start(pe->adev, hwc->conf, 1);
+ pe->adev->df.funcs->pmc_start(pe->adev, hwc->config, 1);
- pe->adev->df.funcs->pmc_start(pe->adev, hwc->conf, 0);
+ pe->adev->df.funcs->pmc_start(pe->adev, hwc->config, 0);
break;
default:
break;
@@ -101,7 +101,7 @@ static void amdgpu_perf_read(struct perf_event *event)
switch (pe->pmu_perf_type) {
case PERF_TYPE_AMDGPU_DF:
- pe->adev->df.funcs->pmc_get_count(pe->adev, hwc->conf,
+ pe->adev->df.funcs->pmc_get_count(pe->adev, hwc->config,
&count);
break;
default:
@@ -126,7 +126,7 @@ static void amdgpu_perf_stop(struct perf_event *event, int flags)
switch (pe->pmu_perf_type) {
case PERF_TYPE_AMDGPU_DF:
- pe->adev->df.funcs->pmc_stop(pe->adev, hwc->conf, 0);
+ pe->adev->df.funcs->pmc_stop(pe->adev, hwc->config, 0);
break;
default:
break;
@@ -156,7 +156,8 @@ static int amdgpu_perf_add(struct perf_event *event, int flags)
switch (pe->pmu_perf_type) {
case PERF_TYPE_AMDGPU_DF:
- retval = pe->adev->df.funcs->pmc_start(pe->adev, hwc->conf, 1);
+ retval = pe->adev->df.funcs->pmc_start(pe->adev,
+ hwc->config, 1);
break;
default:
return 0;
@@ -184,7 +185,7 @@ static void amdgpu_perf_del(struct perf_event *event, int flags)
switch (pe->pmu_perf_type) {
case PERF_TYPE_AMDGPU_DF:
- pe->adev->df.funcs->pmc_stop(pe->adev, hwc->conf, 1);
+ pe->adev->df.funcs->pmc_stop(pe->adev, hwc->config, 1);
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 3a1570dafe34..dc42086a672b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -24,6 +24,7 @@
*/
#include <linux/firmware.h>
+#include <linux/dma-mapping.h>
#include "amdgpu.h"
#include "amdgpu_psp.h"
@@ -38,6 +39,42 @@
static void psp_set_funcs(struct amdgpu_device *adev);
+static int psp_sysfs_init(struct amdgpu_device *adev);
+static void psp_sysfs_fini(struct amdgpu_device *adev);
+
+/*
+ * Due to DF Cstate management centralized to PMFW, the firmware
+ * loading sequence will be updated as below:
+ * - Load KDB
+ * - Load SYS_DRV
+ * - Load tOS
+ * - Load PMFW
+ * - Setup TMR
+ * - Load other non-psp fw
+ * - Load ASD
+ * - Load XGMI/RAS/HDCP/DTM TA if any
+ *
+ * This new sequence is required for
+ * - Arcturus
+ * - Navi12 and onwards
+ */
+static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+
+ psp->pmfw_centralized_cstate_management = false;
+
+ if (amdgpu_sriov_vf(adev))
+ return;
+
+ if (adev->flags & AMD_IS_APU)
+ return;
+
+ if ((adev->asic_type == CHIP_ARCTURUS) ||
+ (adev->asic_type >= CHIP_NAVI12))
+ psp->pmfw_centralized_cstate_management = true;
+}
+
static int psp_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -75,6 +112,8 @@ static int psp_early_init(void *handle)
psp->adev = adev;
+ psp_check_pmfw_centralized_cstate_management(psp);
+
return 0;
}
@@ -101,6 +140,13 @@ static int psp_sw_init(void *handle)
return ret;
}
+ if (adev->asic_type == CHIP_NAVI10) {
+ ret= psp_sysfs_init(adev);
+ if (ret) {
+ return ret;
+ }
+ }
+
return 0;
}
@@ -113,10 +159,18 @@ static int psp_sw_fini(void *handle)
adev->psp.sos_fw = NULL;
release_firmware(adev->psp.asd_fw);
adev->psp.asd_fw = NULL;
+ if (adev->psp.cap_fw) {
+ release_firmware(adev->psp.cap_fw);
+ adev->psp.cap_fw = NULL;
+ }
if (adev->psp.ta_fw) {
release_firmware(adev->psp.ta_fw);
adev->psp.ta_fw = NULL;
}
+
+ if (adev->asic_type == CHIP_NAVI10)
+ psp_sysfs_fini(adev);
+
return 0;
}
@@ -150,6 +204,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
int ret;
int index;
int timeout = 2000;
+ bool ras_intr = false;
mutex_lock(&psp->mutex);
@@ -174,7 +229,8 @@ psp_cmd_submit_buf(struct psp_context *psp,
* because gpu reset thread triggered and lock resource should
* be released for psp resume sequence.
*/
- if (amdgpu_ras_intr_triggered())
+ ras_intr = amdgpu_ras_intr_triggered();
+ if (ras_intr)
break;
msleep(1);
amdgpu_asic_invalidate_hdp(psp->adev, NULL);
@@ -187,14 +243,14 @@ psp_cmd_submit_buf(struct psp_context *psp,
* during psp initialization to avoid breaking hw_init and it doesn't
* return -EINVAL.
*/
- if (psp->cmd_buf_mem->resp.status || !timeout) {
+ if ((psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
if (ucode)
DRM_WARN("failed to load ucode id (%d) ",
ucode->ucode_id);
DRM_WARN("psp command (0x%X) failed and response status is (0x%X)\n",
psp->cmd_buf_mem->cmd_id,
psp->cmd_buf_mem->resp.status);
- if (!timeout) {
+ if ((ucode->ucode_id == AMDGPU_UCODE_ID_CAP) || !timeout) {
mutex_unlock(&psp->mutex);
return -EINVAL;
}
@@ -558,7 +614,7 @@ int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
return psp_ta_invoke(psp, ta_cmd_id, psp->xgmi_context.session_id);
}
-static int psp_xgmi_terminate(struct psp_context *psp)
+int psp_xgmi_terminate(struct psp_context *psp)
{
int ret;
@@ -579,7 +635,7 @@ static int psp_xgmi_terminate(struct psp_context *psp)
return 0;
}
-static int psp_xgmi_initialize(struct psp_context *psp)
+int psp_xgmi_initialize(struct psp_context *psp)
{
struct ta_xgmi_shared_memory *xgmi_cmd;
int ret;
@@ -1013,6 +1069,30 @@ static int psp_dtm_initialize(struct psp_context *psp)
return 0;
}
+static int psp_dtm_unload(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ /*
+ * TODO: bypass the unloading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ psp_prep_ta_unload_cmd_buf(cmd, psp->dtm_context.session_id);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+ kfree(cmd);
+
+ return ret;
+}
+
int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
{
/*
@@ -1037,7 +1117,7 @@ static int psp_dtm_terminate(struct psp_context *psp)
if (!psp->dtm_context.dtm_initialized)
return 0;
- ret = psp_hdcp_unload(psp);
+ ret = psp_dtm_unload(psp);
if (ret)
return ret;
@@ -1057,7 +1137,7 @@ static int psp_hw_start(struct psp_context *psp)
struct amdgpu_device *adev = psp->adev;
int ret;
- if (!amdgpu_sriov_vf(adev) || !adev->in_gpu_reset) {
+ if (!amdgpu_sriov_vf(adev)) {
if (psp->kdb_bin_size &&
(psp->funcs->bootloader_load_kdb != NULL)) {
ret = psp_bootloader_load_kdb(psp);
@@ -1092,10 +1172,17 @@ static int psp_hw_start(struct psp_context *psp)
return ret;
}
- ret = psp_tmr_load(psp);
- if (ret) {
- DRM_ERROR("PSP load tmr failed!\n");
- return ret;
+ /*
+ * For those ASICs with DF Cstate management centralized
+ * to PMFW, TMR setup should be performed after PMFW
+ * loaded and before other non-psp firmware loaded.
+ */
+ if (!psp->pmfw_centralized_cstate_management) {
+ ret = psp_tmr_load(psp);
+ if (ret) {
+ DRM_ERROR("PSP load tmr failed!\n");
+ return ret;
+ }
}
return 0;
@@ -1105,6 +1192,9 @@ static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
enum psp_gfx_fw_type *type)
{
switch (ucode->ucode_id) {
+ case AMDGPU_UCODE_ID_CAP:
+ *type = GFX_FW_TYPE_CAP;
+ break;
case AMDGPU_UCODE_ID_SDMA0:
*type = GFX_FW_TYPE_SDMA0;
break;
@@ -1292,9 +1382,10 @@ static int psp_np_fw_load(struct psp_context *psp)
struct amdgpu_firmware_info *ucode;
struct amdgpu_device* adev = psp->adev;
- if (psp->autoload_supported) {
+ if (psp->autoload_supported ||
+ psp->pmfw_centralized_cstate_management) {
ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
- if (!ucode->fw)
+ if (!ucode->fw || amdgpu_sriov_vf(adev))
goto out;
ret = psp_execute_np_fw_load(psp, ucode);
@@ -1302,6 +1393,14 @@ static int psp_np_fw_load(struct psp_context *psp)
return ret;
}
+ if (psp->pmfw_centralized_cstate_management) {
+ ret = psp_tmr_load(psp);
+ if (ret) {
+ DRM_ERROR("PSP load tmr failed!\n");
+ return ret;
+ }
+ }
+
out:
for (i = 0; i < adev->firmware.max_ucodes; i++) {
ucode = &adev->firmware.ucode[i];
@@ -1309,7 +1408,9 @@ out:
continue;
if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
- (psp_smu_reload_quirk(psp) || psp->autoload_supported))
+ (psp_smu_reload_quirk(psp) ||
+ psp->autoload_supported ||
+ psp->pmfw_centralized_cstate_management))
continue;
if (amdgpu_sriov_vf(adev) &&
@@ -1420,16 +1521,6 @@ skip_memalloc:
return ret;
}
- if (adev->gmc.xgmi.num_physical_nodes > 1) {
- ret = psp_xgmi_initialize(psp);
- /* Warning the XGMI seesion initialize failure
- * Instead of stop driver initialization
- */
- if (ret)
- dev_err(psp->adev->dev,
- "XGMI: Failed to initialize XGMI session\n");
- }
-
if (psp->adev->psp.ta_fw) {
ret = psp_ras_initialize(psp);
if (ret)
@@ -1494,10 +1585,6 @@ static int psp_hw_fini(void *handle)
void *tmr_buf;
void **pptr;
- if (adev->gmc.xgmi.num_physical_nodes > 1 &&
- psp->xgmi_context.initialized == 1)
- psp_xgmi_terminate(psp);
-
if (psp->adev->psp.ta_fw) {
psp_ras_terminate(psp);
psp_dtm_terminate(psp);
@@ -1753,6 +1840,97 @@ static int psp_set_powergating_state(void *handle,
return 0;
}
+static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ uint32_t fw_ver;
+ int ret;
+
+ if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
+ DRM_INFO("PSP block is not ready yet.");
+ return -EBUSY;
+ }
+
+ mutex_lock(&adev->psp.mutex);
+ ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver);
+ mutex_unlock(&adev->psp.mutex);
+
+ if (ret) {
+ DRM_ERROR("Failed to read USBC PD FW, err = %d", ret);
+ return ret;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", fw_ver);
+}
+
+static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ void *cpu_addr;
+ dma_addr_t dma_addr;
+ int ret;
+ char fw_name[100];
+ const struct firmware *usbc_pd_fw;
+
+ if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
+ DRM_INFO("PSP block is not ready yet.");
+ return -EBUSY;
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s", buf);
+ ret = request_firmware(&usbc_pd_fw, fw_name, adev->dev);
+ if (ret)
+ goto fail;
+
+ /* We need contiguous physical mem to place the FW for psp to access */
+ cpu_addr = dma_alloc_coherent(adev->dev, usbc_pd_fw->size, &dma_addr, GFP_KERNEL);
+
+ ret = dma_mapping_error(adev->dev, dma_addr);
+ if (ret)
+ goto rel_buf;
+
+ memcpy_toio(cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size);
+
+ /*
+ * x86 specific workaround.
+ * Without it the buffer is invisible in PSP.
+ *
+ * TODO Remove once PSP starts snooping CPU cache
+ */
+#ifdef CONFIG_X86
+ clflush_cache_range(cpu_addr, (usbc_pd_fw->size & ~(L1_CACHE_BYTES - 1)));
+#endif
+
+ mutex_lock(&adev->psp.mutex);
+ ret = psp_load_usbc_pd_fw(&adev->psp, dma_addr);
+ mutex_unlock(&adev->psp.mutex);
+
+rel_buf:
+ dma_free_coherent(adev->dev, usbc_pd_fw->size, cpu_addr, dma_addr);
+ release_firmware(usbc_pd_fw);
+
+fail:
+ if (ret) {
+ DRM_ERROR("Failed to load USBC PD FW, err = %d", ret);
+ return ret;
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR(usbc_pd_fw, S_IRUGO | S_IWUSR,
+ psp_usbc_pd_fw_sysfs_read,
+ psp_usbc_pd_fw_sysfs_write);
+
+
+
const struct amd_ip_funcs psp_ip_funcs = {
.name = "psp",
.early_init = psp_early_init,
@@ -1771,6 +1949,21 @@ const struct amd_ip_funcs psp_ip_funcs = {
.set_powergating_state = psp_set_powergating_state,
};
+static int psp_sysfs_init(struct amdgpu_device *adev)
+{
+ int ret = device_create_file(adev->dev, &dev_attr_usbc_pd_fw);
+
+ if (ret)
+ DRM_ERROR("Failed to create USBC PD FW control file!");
+
+ return ret;
+}
+
+static void psp_sysfs_fini(struct amdgpu_device *adev)
+{
+ device_remove_file(adev->dev, &dev_attr_usbc_pd_fw);
+}
+
static const struct amdgpu_psp_funcs psp_funcs = {
.check_fw_loading_status = psp_check_fw_loading_status,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 611021514c52..4a4d8f2ccca2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -114,6 +114,8 @@ struct psp_funcs
int (*mem_training)(struct psp_context *psp, uint32_t ops);
uint32_t (*ring_get_wptr)(struct psp_context *psp);
void (*ring_set_wptr)(struct psp_context *psp, uint32_t value);
+ int (*load_usbc_pd_fw)(struct psp_context *psp, dma_addr_t dma_addr);
+ int (*read_usbc_pd_fw)(struct psp_context *psp, uint32_t *fw_ver);
};
#define AMDGPU_XGMI_MAX_CONNECTED_NODES 64
@@ -250,6 +252,9 @@ struct psp_context
uint32_t asd_ucode_size;
uint8_t *asd_start_addr;
+ /* cap firmware */
+ const struct firmware *cap_fw;
+
/* fence buffer */
struct amdgpu_bo *fence_buf_bo;
uint64_t fence_buf_mc_addr;
@@ -264,6 +269,8 @@ struct psp_context
atomic_t fence_value;
/* flag to mark whether gfx fw autoload is supported or not */
bool autoload_supported;
+ /* flag to mark whether df cstate management centralized to PMFW */
+ bool pmfw_centralized_cstate_management;
/* xgmi ta firmware and buffer */
const struct firmware *ta_fw;
@@ -349,6 +356,14 @@ struct amdgpu_psp_funcs {
#define psp_ring_get_wptr(psp) (psp)->funcs->ring_get_wptr((psp))
#define psp_ring_set_wptr(psp, value) (psp)->funcs->ring_set_wptr((psp), (value))
+#define psp_load_usbc_pd_fw(psp, dma_addr) \
+ ((psp)->funcs->load_usbc_pd_fw ? \
+ (psp)->funcs->load_usbc_pd_fw((psp), (dma_addr)) : -EINVAL)
+
+#define psp_read_usbc_pd_fw(psp, fw_ver) \
+ ((psp)->funcs->read_usbc_pd_fw ? \
+ (psp)->funcs->read_usbc_pd_fw((psp), fw_ver) : -EINVAL)
+
extern const struct amd_ip_funcs psp_ip_funcs;
extern const struct amdgpu_ip_block_version psp_v3_1_ip_block;
@@ -362,6 +377,8 @@ int psp_gpu_reset(struct amdgpu_device *adev);
int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx,
uint64_t cmd_gpu_addr, int cmd_size);
+int psp_xgmi_initialize(struct psp_context *psp);
+int psp_xgmi_terminate(struct psp_context *psp);
int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index cef94e2169fe..43055a01f35e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -31,6 +31,7 @@
#include "amdgpu.h"
#include "amdgpu_ras.h"
#include "amdgpu_atomfirmware.h"
+#include "amdgpu_xgmi.h"
#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
const char *ras_error_string[] = {
@@ -720,6 +721,9 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev,
if (adev->nbio.funcs->query_ras_error_count)
adev->nbio.funcs->query_ras_error_count(adev, &err_data);
break;
+ case AMDGPU_RAS_BLOCK__XGMI_WAFL:
+ amdgpu_xgmi_query_ras_error_count(adev, &err_data);
+ break;
default:
break;
}
@@ -742,20 +746,6 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev,
return 0;
}
-uint64_t get_xgmi_relative_phy_addr(struct amdgpu_device *adev, uint64_t addr)
-{
- uint32_t df_inst_id;
-
- if ((!adev->df.funcs) ||
- (!adev->df.funcs->get_df_inst_id) ||
- (!adev->df.funcs->get_dram_base_addr))
- return addr;
-
- df_inst_id = adev->df.funcs->get_df_inst_id(adev);
-
- return addr + adev->df.funcs->get_dram_base_addr(adev, df_inst_id);
-}
-
/* wrapper of psp_ras_trigger_error */
int amdgpu_ras_error_inject(struct amdgpu_device *adev,
struct ras_inject_if *info)
@@ -775,8 +765,9 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
/* Calculate XGMI relative offset */
if (adev->gmc.xgmi.num_physical_nodes > 1) {
- block_info.address = get_xgmi_relative_phy_addr(adev,
- block_info.address);
+ block_info.address =
+ amdgpu_xgmi_get_relative_phy_addr(adev,
+ block_info.address);
}
switch (info->head.block) {
@@ -1122,6 +1113,32 @@ void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
&amdgpu_ras_debugfs_ops);
}
+void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj;
+ struct ras_fs_if fs_info;
+
+ /*
+ * it won't be called in resume path, no need to check
+ * suspend and gpu reset status
+ */
+ if (!con)
+ return;
+
+ amdgpu_ras_debugfs_create_ctrl_node(adev);
+
+ list_for_each_entry(obj, &con->head, node) {
+ if (amdgpu_ras_is_supported(adev, obj->head.block) &&
+ (obj->attr_inuse == 1)) {
+ sprintf(fs_info.debugfs_name, "%s_err_inject",
+ ras_block_str(obj->head.block));
+ fs_info.head = obj->head;
+ amdgpu_ras_debugfs_create(adev, &fs_info);
+ }
+ }
+}
+
void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
struct ras_common_if *head)
{
@@ -1154,7 +1171,6 @@ static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
{
amdgpu_ras_sysfs_create_feature_node(adev);
- amdgpu_ras_debugfs_create_ctrl_node(adev);
return 0;
}
@@ -1319,6 +1335,33 @@ static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
}
/* ih end */
+/* traversal all IPs except NBIO to query error counter */
+static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj;
+
+ if (!con)
+ return;
+
+ list_for_each_entry(obj, &con->head, node) {
+ struct ras_query_if info = {
+ .head = obj->head,
+ };
+
+ /*
+ * PCIE_BIF IP has one different isr by ras controller
+ * interrupt, the specific ras counter query will be
+ * done in that isr. So skip such block from common
+ * sync flood interrupt isr calling.
+ */
+ if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
+ continue;
+
+ amdgpu_ras_error_query(adev, &info);
+ }
+}
+
/* recovery begin */
/* return 0 on success.
@@ -1373,6 +1416,12 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
struct amdgpu_ras *ras =
container_of(work, struct amdgpu_ras, recovery_work);
+ /*
+ * Query and print non zero error counter per IP block for
+ * awareness before recovering GPU.
+ */
+ amdgpu_ras_log_on_err_counter(ras->adev);
+
if (amdgpu_device_should_recover_gpu(ras->adev))
amdgpu_device_gpu_recover(ras->adev, 0);
atomic_set(&ras->in_recovery, 0);
@@ -1713,18 +1762,30 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev,
*hw_supported = 0;
*supported = 0;
- if (amdgpu_sriov_vf(adev) ||
+ if (amdgpu_sriov_vf(adev) || !adev->is_atom_fw ||
(adev->asic_type != CHIP_VEGA20 &&
adev->asic_type != CHIP_ARCTURUS))
return;
- if (adev->is_atom_fw &&
- (amdgpu_atomfirmware_mem_ecc_supported(adev) ||
- amdgpu_atomfirmware_sram_ecc_supported(adev)))
- *hw_supported = AMDGPU_RAS_BLOCK_MASK;
+ if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
+ DRM_INFO("HBM ECC is active.\n");
+ *hw_supported |= (1 << AMDGPU_RAS_BLOCK__UMC |
+ 1 << AMDGPU_RAS_BLOCK__DF);
+ } else
+ DRM_INFO("HBM ECC is not presented.\n");
+
+ if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
+ DRM_INFO("SRAM ECC is active.\n");
+ *hw_supported |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
+ 1 << AMDGPU_RAS_BLOCK__DF);
+ } else
+ DRM_INFO("SRAM ECC is not presented.\n");
+
+ /* hw_supported needs to be aligned with RAS block mask. */
+ *hw_supported &= AMDGPU_RAS_BLOCK_MASK;
*supported = amdgpu_ras_enable == 0 ?
- 0 : *hw_supported & amdgpu_ras_mask;
+ 0 : *hw_supported & amdgpu_ras_mask;
}
int amdgpu_ras_init(struct amdgpu_device *adev)
@@ -1825,8 +1886,6 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev,
goto interrupt;
}
- amdgpu_ras_debugfs_create(adev, fs_info);
-
r = amdgpu_ras_sysfs_create(adev, fs_info);
if (r)
goto sysfs;
@@ -1835,7 +1894,6 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev,
cleanup:
amdgpu_ras_sysfs_remove(adev, ras_block);
sysfs:
- amdgpu_ras_debugfs_remove(adev, ras_block);
if (ih_info->cb)
amdgpu_ras_interrupt_remove_handler(adev, ih_info);
interrupt:
@@ -1852,7 +1910,6 @@ void amdgpu_ras_late_fini(struct amdgpu_device *adev,
return;
amdgpu_ras_sysfs_remove(adev, ras_block);
- amdgpu_ras_debugfs_remove(adev, ras_block);
if (ih_info->cb)
amdgpu_ras_interrupt_remove_handler(adev, ih_info);
amdgpu_ras_feature_enable(adev, ras_block, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index a5fe29a9373e..55c3eceb390d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -592,6 +592,8 @@ int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
struct ras_fs_if *head);
+void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev);
+
void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
struct ras_common_if *head);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 2a8e04895595..c0096097bbcf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -25,10 +25,11 @@
#include "amdgpu.h"
#include "amdgpu_ras.h"
#include <linux/bits.h>
-#include "smu_v11_0_i2c.h"
+#include "atom.h"
-#define EEPROM_I2C_TARGET_ADDR_ARCTURUS 0xA8
-#define EEPROM_I2C_TARGET_ADDR_VEGA20 0xA0
+#define EEPROM_I2C_TARGET_ADDR_VEGA20 0xA0
+#define EEPROM_I2C_TARGET_ADDR_ARCTURUS 0xA8
+#define EEPROM_I2C_TARGET_ADDR_ARCTURUS_D342 0xA0
/*
* The 2 macros bellow represent the actual size in bytes that
@@ -55,6 +56,45 @@
#define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control))->adev
+static bool __get_eeprom_i2c_addr_arct(struct amdgpu_device *adev,
+ uint16_t *i2c_addr)
+{
+ struct atom_context *atom_ctx = adev->mode_info.atom_context;
+
+ if (!i2c_addr || !atom_ctx)
+ return false;
+
+ if (strnstr(atom_ctx->vbios_version,
+ "D342",
+ sizeof(atom_ctx->vbios_version)))
+ *i2c_addr = EEPROM_I2C_TARGET_ADDR_ARCTURUS_D342;
+ else
+ *i2c_addr = EEPROM_I2C_TARGET_ADDR_ARCTURUS;
+
+ return true;
+}
+
+static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
+ uint16_t *i2c_addr)
+{
+ if (!i2c_addr)
+ return false;
+
+ switch (adev->asic_type) {
+ case CHIP_VEGA20:
+ *i2c_addr = EEPROM_I2C_TARGET_ADDR_VEGA20;
+ break;
+
+ case CHIP_ARCTURUS:
+ return __get_eeprom_i2c_addr_arct(adev, i2c_addr);
+
+ default:
+ return false;
+ }
+
+ return true;
+}
+
static void __encode_table_header_to_buff(struct amdgpu_ras_eeprom_table_header *hdr,
unsigned char *buff)
{
@@ -83,6 +123,7 @@ static int __update_table_header(struct amdgpu_ras_eeprom_control *control,
unsigned char *buff)
{
int ret = 0;
+ struct amdgpu_device *adev = to_amdgpu_device(control);
struct i2c_msg msg = {
.addr = 0,
.flags = 0,
@@ -96,15 +137,13 @@ static int __update_table_header(struct amdgpu_ras_eeprom_control *control,
msg.addr = control->i2c_address;
- ret = i2c_transfer(&control->eeprom_accessor, &msg, 1);
+ ret = i2c_transfer(&adev->pm.smu_i2c, &msg, 1);
if (ret < 1)
DRM_ERROR("Failed to write EEPROM table header, ret:%d", ret);
return ret;
}
-
-
static uint32_t __calc_hdr_byte_sum(struct amdgpu_ras_eeprom_control *control)
{
int i;
@@ -212,32 +251,18 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
.buf = buff,
};
- mutex_init(&control->tbl_mutex);
-
- switch (adev->asic_type) {
- case CHIP_VEGA20:
- control->i2c_address = EEPROM_I2C_TARGET_ADDR_VEGA20;
- ret = smu_v11_0_i2c_eeprom_control_init(&control->eeprom_accessor);
- break;
-
- case CHIP_ARCTURUS:
- control->i2c_address = EEPROM_I2C_TARGET_ADDR_ARCTURUS;
- ret = smu_i2c_eeprom_init(&adev->smu, &control->eeprom_accessor);
- break;
+ /* Verify i2c adapter is initialized */
+ if (!adev->pm.smu_i2c.algo)
+ return -ENOENT;
- default:
- return 0;
- }
+ if (!__get_eeprom_i2c_addr(adev, &control->i2c_address))
+ return -EINVAL;
- if (ret) {
- DRM_ERROR("Failed to init I2C controller, ret:%d", ret);
- return ret;
- }
+ mutex_init(&control->tbl_mutex);
msg.addr = control->i2c_address;
-
/* Read/Create table header from EEPROM address 0 */
- ret = i2c_transfer(&control->eeprom_accessor, &msg, 1);
+ ret = i2c_transfer(&adev->pm.smu_i2c, &msg, 1);
if (ret < 1) {
DRM_ERROR("Failed to read EEPROM table header, ret:%d", ret);
return ret;
@@ -263,23 +288,6 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
return ret == 1 ? 0 : -EIO;
}
-void amdgpu_ras_eeprom_fini(struct amdgpu_ras_eeprom_control *control)
-{
- struct amdgpu_device *adev = to_amdgpu_device(control);
-
- switch (adev->asic_type) {
- case CHIP_VEGA20:
- smu_v11_0_i2c_eeprom_control_fini(&control->eeprom_accessor);
- break;
- case CHIP_ARCTURUS:
- smu_i2c_eeprom_fini(&adev->smu, &control->eeprom_accessor);
- break;
-
- default:
- return;
- }
-}
-
static void __encode_table_record_to_buff(struct amdgpu_ras_eeprom_control *control,
struct eeprom_table_record *record,
unsigned char *buff)
@@ -436,7 +444,7 @@ int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
control->next_addr += EEPROM_TABLE_RECORD_SIZE;
}
- ret = i2c_transfer(&control->eeprom_accessor, msgs, num);
+ ret = i2c_transfer(&adev->pm.smu_i2c, msgs, num);
if (ret < 1) {
DRM_ERROR("Failed to process EEPROM table records, ret:%d", ret);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
index ca78f812d436..7e8647a05df7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
@@ -44,7 +44,6 @@ struct amdgpu_ras_eeprom_table_header {
struct amdgpu_ras_eeprom_control {
struct amdgpu_ras_eeprom_table_header tbl_hdr;
- struct i2c_adapter eeprom_accessor;
uint32_t next_addr;
unsigned int num_recs;
struct mutex tbl_mutex;
@@ -79,7 +78,6 @@ struct eeprom_table_record {
}__attribute__((__packed__));
int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control);
-void amdgpu_ras_eeprom_fini(struct amdgpu_ras_eeprom_control *control);
int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control);
int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index e5c83e164d82..a7e1d0425ed0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -48,9 +48,6 @@
* wptr. The GPU then starts fetching commands and executes
* them until the pointers are equal again.
*/
-static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
- struct amdgpu_ring *ring);
-static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring);
/**
* amdgpu_ring_alloc - allocate space on the ring buffer
@@ -154,76 +151,6 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
}
/**
- * amdgpu_ring_priority_put - restore a ring's priority
- *
- * @ring: amdgpu_ring structure holding the information
- * @priority: target priority
- *
- * Release a request for executing at @priority
- */
-void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
- enum drm_sched_priority priority)
-{
- int i;
-
- if (!ring->funcs->set_priority)
- return;
-
- if (atomic_dec_return(&ring->num_jobs[priority]) > 0)
- return;
-
- /* no need to restore if the job is already at the lowest priority */
- if (priority == DRM_SCHED_PRIORITY_NORMAL)
- return;
-
- mutex_lock(&ring->priority_mutex);
- /* something higher prio is executing, no need to decay */
- if (ring->priority > priority)
- goto out_unlock;
-
- /* decay priority to the next level with a job available */
- for (i = priority; i >= DRM_SCHED_PRIORITY_MIN; i--) {
- if (i == DRM_SCHED_PRIORITY_NORMAL
- || atomic_read(&ring->num_jobs[i])) {
- ring->priority = i;
- ring->funcs->set_priority(ring, i);
- break;
- }
- }
-
-out_unlock:
- mutex_unlock(&ring->priority_mutex);
-}
-
-/**
- * amdgpu_ring_priority_get - change the ring's priority
- *
- * @ring: amdgpu_ring structure holding the information
- * @priority: target priority
- *
- * Request a ring's priority to be raised to @priority (refcounted).
- */
-void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
- enum drm_sched_priority priority)
-{
- if (!ring->funcs->set_priority)
- return;
-
- if (atomic_inc_return(&ring->num_jobs[priority]) <= 0)
- return;
-
- mutex_lock(&ring->priority_mutex);
- if (priority <= ring->priority)
- goto out_unlock;
-
- ring->priority = priority;
- ring->funcs->set_priority(ring, priority);
-
-out_unlock:
- mutex_unlock(&ring->priority_mutex);
-}
-
-/**
* amdgpu_ring_init - init driver ring struct.
*
* @adev: amdgpu_device pointer
@@ -334,10 +261,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
for (i = 0; i < DRM_SCHED_PRIORITY_MAX; ++i)
atomic_set(&ring->num_jobs[i], 0);
- if (amdgpu_debugfs_ring_init(adev, ring)) {
- DRM_ERROR("Failed to register debugfs file for rings !\n");
- }
-
return 0;
}
@@ -351,12 +274,13 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
*/
void amdgpu_ring_fini(struct amdgpu_ring *ring)
{
- ring->sched.ready = false;
/* Not to finish a ring which is not initialized */
if (!(ring->adev) || !(ring->adev->rings[ring->idx]))
return;
+ ring->sched.ready = false;
+
amdgpu_device_wb_free(ring->adev, ring->rptr_offs);
amdgpu_device_wb_free(ring->adev, ring->wptr_offs);
@@ -367,8 +291,6 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
&ring->gpu_addr,
(void **)&ring->ring);
- amdgpu_debugfs_ring_fini(ring);
-
dma_fence_put(ring->vmid_wait);
ring->vmid_wait = NULL;
ring->me = 0;
@@ -485,8 +407,8 @@ static const struct file_operations amdgpu_debugfs_ring_fops = {
#endif
-static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
+int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
{
#if defined(CONFIG_DEBUG_FS)
struct drm_minor *minor = adev->ddev->primary;
@@ -507,13 +429,6 @@ static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
return 0;
}
-static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring)
-{
-#if defined(CONFIG_DEBUG_FS)
- debugfs_remove(ring->ent);
-#endif
-}
-
/**
* amdgpu_ring_test_helper - tests ring and set sched readiness status
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 930316e60155..9a443013d70d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -167,9 +167,6 @@ struct amdgpu_ring_funcs {
uint32_t reg0, uint32_t reg1,
uint32_t ref, uint32_t mask);
void (*emit_tmz)(struct amdgpu_ring *ring, bool start);
- /* priority functions */
- void (*set_priority) (struct amdgpu_ring *ring,
- enum drm_sched_priority priority);
/* Try to soft recover the ring to make the fence signal */
void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid);
int (*preempt_ib)(struct amdgpu_ring *ring);
@@ -222,6 +219,7 @@ struct amdgpu_ring {
struct mutex priority_mutex;
/* protected by priority_mutex */
int priority;
+ bool has_high_prio;
#if defined(CONFIG_DEBUG_FS)
struct dentry *ent;
@@ -258,10 +256,6 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
void amdgpu_ring_commit(struct amdgpu_ring *ring);
void amdgpu_ring_undo(struct amdgpu_ring *ring);
-void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
- enum drm_sched_priority priority);
-void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
- enum drm_sched_priority priority);
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
unsigned ring_size, struct amdgpu_irq_src *irq_src,
unsigned irq_type);
@@ -328,4 +322,8 @@ static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring,
int amdgpu_ring_test_helper(struct amdgpu_ring *ring);
+int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring);
+void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
index d3d4707f2168..60bb3e8b3118 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
@@ -126,6 +126,9 @@ struct amdgpu_rlc_funcs {
void (*stop)(struct amdgpu_device *adev);
void (*reset)(struct amdgpu_device *adev);
void (*start)(struct amdgpu_device *adev);
+ void (*update_spm_vmid)(struct amdgpu_device *adev, unsigned vmid);
+ void (*rlcg_wreg)(struct amdgpu_device *adev, u32 offset, u32 v);
+ bool (*is_rlcg_access_range)(struct amdgpu_device *adev, uint32_t reg);
};
struct amdgpu_rlc {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index a2ee30b16212..250a309e4dee 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -70,7 +70,8 @@ uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring,
uint32_t index = 0;
int r;
- if (vmid == 0 || !amdgpu_mcbp)
+ /* don't enable OS preemption on SDMA under SRIOV */
+ if (amdgpu_sriov_vf(adev) || vmid == 0 || !amdgpu_mcbp)
return 0;
r = amdgpu_sdma_get_index_from_ring(ring, &index);
@@ -92,7 +93,6 @@ int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev,
struct ras_ih_if *ih_info = (struct ras_ih_if *)ras_ih_info;
struct ras_fs_if fs_info = {
.sysfs_name = "sdma_err_count",
- .debugfs_name = "sdma_err_inject",
};
if (!ih_info)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index 485335267d78..4b352206354b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -56,6 +56,7 @@ struct amdgpu_sdma_ras_funcs {
void (*ras_fini)(struct amdgpu_device *adev);
int (*query_ras_error_count)(struct amdgpu_device *adev,
uint32_t instance, void *ras_error_status);
+ void (*reset_ras_error_count)(struct amdgpu_device *adev);
};
struct amdgpu_sdma {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index a09b6b9c27d1..b86392253696 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -202,18 +202,17 @@ int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence)
*
* @sync: sync object to add fences from reservation object to
* @resv: reservation object with embedded fence
- * @explicit_sync: true if we should only sync to the exclusive fence
+ * @mode: how owner affects which fences we sync to
+ * @owner: owner of the planned job submission
*
* Sync to the fence
*/
-int amdgpu_sync_resv(struct amdgpu_device *adev,
- struct amdgpu_sync *sync,
- struct dma_resv *resv,
- void *owner, bool explicit_sync)
+int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
+ struct dma_resv *resv, enum amdgpu_sync_mode mode,
+ void *owner)
{
struct dma_resv_list *flist;
struct dma_fence *f;
- void *fence_owner;
unsigned i;
int r = 0;
@@ -229,30 +228,46 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
return r;
for (i = 0; i < flist->shared_count; ++i) {
+ void *fence_owner;
+
f = rcu_dereference_protected(flist->shared[i],
dma_resv_held(resv));
+
+ fence_owner = amdgpu_sync_get_owner(f);
+
+ /* Always sync to moves, no matter what */
+ if (fence_owner == AMDGPU_FENCE_OWNER_UNDEFINED) {
+ r = amdgpu_sync_fence(sync, f, false);
+ if (r)
+ break;
+ }
+
/* We only want to trigger KFD eviction fences on
* evict or move jobs. Skip KFD fences otherwise.
*/
- fence_owner = amdgpu_sync_get_owner(f);
if (fence_owner == AMDGPU_FENCE_OWNER_KFD &&
owner != AMDGPU_FENCE_OWNER_UNDEFINED)
continue;
- if (amdgpu_sync_same_dev(adev, f)) {
- /* VM updates only sync with moves but not with user
- * command submissions or KFD evictions fences
- */
- if (owner == AMDGPU_FENCE_OWNER_VM &&
- fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED)
+ /* Ignore fences depending on the sync mode */
+ switch (mode) {
+ case AMDGPU_SYNC_ALWAYS:
+ break;
+
+ case AMDGPU_SYNC_NE_OWNER:
+ if (amdgpu_sync_same_dev(adev, f) &&
+ fence_owner == owner)
continue;
+ break;
- /* Ignore fence from the same owner and explicit one as
- * long as it isn't undefined.
- */
- if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
- (fence_owner == owner || explicit_sync))
+ case AMDGPU_SYNC_EQ_OWNER:
+ if (amdgpu_sync_same_dev(adev, f) &&
+ fence_owner != owner)
continue;
+ break;
+
+ case AMDGPU_SYNC_EXPLICIT:
+ continue;
}
r = amdgpu_sync_fence(sync, f, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
index d62c2b81d92b..cfbe5788b8b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
@@ -31,6 +31,13 @@ struct dma_resv;
struct amdgpu_device;
struct amdgpu_ring;
+enum amdgpu_sync_mode {
+ AMDGPU_SYNC_ALWAYS,
+ AMDGPU_SYNC_NE_OWNER,
+ AMDGPU_SYNC_EQ_OWNER,
+ AMDGPU_SYNC_EXPLICIT
+};
+
/*
* Container for fences used to sync command submissions.
*/
@@ -43,11 +50,9 @@ void amdgpu_sync_create(struct amdgpu_sync *sync);
int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f,
bool explicit);
int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence);
-int amdgpu_sync_resv(struct amdgpu_device *adev,
- struct amdgpu_sync *sync,
- struct dma_resv *resv,
- void *owner,
- bool explicit_sync);
+int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
+ struct dma_resv *resv, enum amdgpu_sync_mode mode,
+ void *owner);
struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
struct amdgpu_ring *ring);
struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index dee446278417..c10ae1cdc1b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -60,20 +60,14 @@
#include "amdgpu_ras.h"
#include "bif/bif_4_1_d.h"
+#define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128
+
static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem, unsigned num_pages,
uint64_t offset, unsigned window,
struct amdgpu_ring *ring,
uint64_t *addr);
-static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
-static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
-
-static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
-{
- return 0;
-}
-
/**
* amdgpu_init_mem_type - Initialize a memory manager for a specific type of
* memory request.
@@ -1034,7 +1028,7 @@ int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
struct amdgpu_ttm_tt *gtt = (void *)ttm;
int r;
- if (abo->flags & AMDGPU_GEM_CREATE_MQD_GFX9) {
+ if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) {
uint64_t page_idx = 1;
r = amdgpu_gart_bind(adev, gtt->offset, page_idx,
@@ -1042,7 +1036,10 @@ int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
if (r)
goto gart_bind_fail;
- /* Patch mtype of the second part BO */
+ /* The memory type of the first page defaults to UC. Now
+ * modify the memory type to NC from the second page of
+ * the BO onward.
+ */
flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC);
@@ -1596,7 +1593,7 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
while (len && pos < adev->gmc.mc_vram_size) {
uint64_t aligned_pos = pos & ~(uint64_t)3;
- uint32_t bytes = 4 - (pos & 3);
+ uint64_t bytes = 4 - (pos & 3);
uint32_t shift = (pos & 3) * 8;
uint32_t mask = 0xffffffff << shift;
@@ -1605,20 +1602,28 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
bytes = len;
}
- spin_lock_irqsave(&adev->mmio_idx_lock, flags);
- WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
- WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
- if (!write || mask != 0xffffffff)
- value = RREG32_NO_KIQ(mmMM_DATA);
- if (write) {
- value &= ~mask;
- value |= (*(uint32_t *)buf << shift) & mask;
- WREG32_NO_KIQ(mmMM_DATA, value);
- }
- spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
- if (!write) {
- value = (value & mask) >> shift;
- memcpy(buf, &value, bytes);
+ if (mask != 0xffffffff) {
+ spin_lock_irqsave(&adev->mmio_idx_lock, flags);
+ WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
+ WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
+ if (!write || mask != 0xffffffff)
+ value = RREG32_NO_KIQ(mmMM_DATA);
+ if (write) {
+ value &= ~mask;
+ value |= (*(uint32_t *)buf << shift) & mask;
+ WREG32_NO_KIQ(mmMM_DATA, value);
+ }
+ spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
+ if (!write) {
+ value = (value & mask) >> shift;
+ memcpy(buf, &value, bytes);
+ }
+ } else {
+ bytes = (nodes->start + nodes->size) << PAGE_SHIFT;
+ bytes = min(bytes - pos, (uint64_t)len & ~0x3ull);
+
+ amdgpu_device_vram_access(adev, pos, (uint32_t *)buf,
+ bytes, write);
}
ret += bytes;
@@ -1638,7 +1643,6 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
.ttm_tt_create = &amdgpu_ttm_tt_create,
.ttm_tt_populate = &amdgpu_ttm_tt_populate,
.ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
- .invalidate_caches = &amdgpu_invalidate_caches,
.init_mem_type = &amdgpu_init_mem_type,
.eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
.evict_flags = &amdgpu_evict_flags,
@@ -1911,12 +1915,6 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
return r;
}
- /* Register debugfs entries for amdgpu_ttm */
- r = amdgpu_ttm_debugfs_init(adev);
- if (r) {
- DRM_ERROR("Failed to init debugfs\n");
- return r;
- }
return 0;
}
@@ -1938,7 +1936,6 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
if (!adev->mman.initialized)
return;
- amdgpu_ttm_debugfs_fini(adev);
amdgpu_ttm_training_reserve_vram_fini(adev);
/* return the IP Discovery TMR memory back to VRAM */
amdgpu_bo_free_kernel(&adev->discovery_memory, NULL, NULL);
@@ -2113,8 +2110,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
}
if (resv) {
r = amdgpu_sync_resv(adev, &job->sync, resv,
- AMDGPU_FENCE_OWNER_UNDEFINED,
- false);
+ AMDGPU_SYNC_ALWAYS,
+ AMDGPU_FENCE_OWNER_UNDEFINED);
if (r) {
DRM_ERROR("sync failed (%d).\n", r);
goto error_free;
@@ -2198,7 +2195,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
if (resv) {
r = amdgpu_sync_resv(adev, &job->sync, resv,
- AMDGPU_FENCE_OWNER_UNDEFINED, false);
+ AMDGPU_SYNC_ALWAYS,
+ AMDGPU_FENCE_OWNER_UNDEFINED);
if (r) {
DRM_ERROR("sync failed (%d).\n", r);
goto error_free;
@@ -2279,7 +2277,6 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
{
struct amdgpu_device *adev = file_inode(f)->i_private;
ssize_t result = 0;
- int r;
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
@@ -2287,27 +2284,19 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
if (*pos >= adev->gmc.mc_vram_size)
return -ENXIO;
+ size = min(size, (size_t)(adev->gmc.mc_vram_size - *pos));
while (size) {
- unsigned long flags;
- uint32_t value;
+ size_t bytes = min(size, AMDGPU_TTM_VRAM_MAX_DW_READ * 4);
+ uint32_t value[AMDGPU_TTM_VRAM_MAX_DW_READ];
- if (*pos >= adev->gmc.mc_vram_size)
- return result;
-
- spin_lock_irqsave(&adev->mmio_idx_lock, flags);
- WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
- WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
- value = RREG32_NO_KIQ(mmMM_DATA);
- spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
-
- r = put_user(value, (uint32_t *)buf);
- if (r)
- return r;
+ amdgpu_device_vram_access(adev, *pos, value, bytes, false);
+ if (copy_to_user(buf, value, bytes))
+ return -EFAULT;
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
+ result += bytes;
+ buf += bytes;
+ *pos += bytes;
+ size -= bytes;
}
return result;
@@ -2544,7 +2533,7 @@ static const struct {
#endif
-static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
+int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
unsigned count;
@@ -2579,13 +2568,3 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
return 0;
#endif
}
-
-static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
-{
-#if defined(CONFIG_DEBUG_FS)
- unsigned i;
-
- for (i = 0; i < ARRAY_SIZE(ttm_debugfs_entries); i++)
- debugfs_remove(adev->mman.debugfs_entries[i]);
-#endif
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 0dddedc06ae3..bd05bbb4878d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -133,4 +133,6 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_mem_reg *mem);
uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
struct ttm_mem_reg *mem);
+int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index b0e656409c03..88f226070229 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -283,7 +283,8 @@ union amdgpu_firmware_header {
* fw loading support
*/
enum AMDGPU_UCODE_ID {
- AMDGPU_UCODE_ID_SDMA0 = 0,
+ AMDGPU_UCODE_ID_CAP = 0, /* CAP must be the 1st fw to be loaded */
+ AMDGPU_UCODE_ID_SDMA0,
AMDGPU_UCODE_ID_SDMA1,
AMDGPU_UCODE_ID_SDMA2,
AMDGPU_UCODE_ID_SDMA3,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index f4d40855147b..9dd51f0d2c11 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -28,7 +28,6 @@ int amdgpu_umc_ras_late_init(struct amdgpu_device *adev)
int r;
struct ras_fs_if fs_info = {
.sysfs_name = "umc_err_count",
- .debugfs_name = "umc_err_inject",
};
struct ras_ih_if ih_info = {
.cb = amdgpu_umc_process_ras_data_cb,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index a92f3b18e657..5fd32ad1c575 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1099,7 +1099,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
goto err_free;
} else {
r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.base.resv,
- AMDGPU_FENCE_OWNER_UNDEFINED, false);
+ AMDGPU_SYNC_ALWAYS,
+ AMDGPU_FENCE_OWNER_UNDEFINED);
if (r)
goto err_free;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index f96464e2c157..a41272fbcba2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -493,14 +493,9 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
- struct amdgpu_device *adev = ring->adev;
struct dma_fence *fence;
long r;
- /* temporarily disable ib test for sriov */
- if (amdgpu_sriov_vf(adev))
- return 0;
-
r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
if (r)
goto error;
@@ -527,6 +522,9 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
unsigned i;
int r;
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
r = amdgpu_ring_alloc(ring, 16);
if (r)
return r;
@@ -656,15 +654,10 @@ err:
int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
- struct amdgpu_device *adev = ring->adev;
struct dma_fence *fence = NULL;
struct amdgpu_bo *bo = NULL;
long r;
- /* temporarily disable ib test for sriov */
- if (amdgpu_sriov_vf(adev))
- return 0;
-
r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&bo, NULL, NULL);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index d6deb0eb1e15..6fe057329de2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -179,6 +179,7 @@ struct amdgpu_vcn_inst {
struct amdgpu_irq_src irq;
struct amdgpu_vcn_reg external;
struct amdgpu_bo *dpg_sram_bo;
+ struct dpg_pause_state pause_state;
void *dpg_sram_cpu_addr;
uint64_t dpg_sram_gpu_addr;
uint32_t *dpg_sram_curr_addr;
@@ -190,8 +191,6 @@ struct amdgpu_vcn {
const struct firmware *fw; /* VCN firmware */
unsigned num_enc_rings;
enum amd_powergating_state cur_state;
- struct dpg_pause_state pause_state;
-
bool indirect_sram;
uint8_t num_vcn_inst;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index daaf909d009a..f0128f745bd2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -270,6 +270,9 @@ struct amdgpu_virt {
#define amdgpu_sriov_runtime(adev) \
((adev)->virt.caps & AMDGPU_SRIOV_CAPS_RUNTIME)
+#define amdgpu_sriov_fullaccess(adev) \
+(amdgpu_sriov_vf((adev)) && !amdgpu_sriov_runtime((adev)))
+
#define amdgpu_passthrough(adev) \
((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index d16231d6a790..6d9252a27916 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -120,23 +120,17 @@ static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm)
static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
unsigned level)
{
- unsigned shift = 0xff;
-
switch (level) {
case AMDGPU_VM_PDB2:
case AMDGPU_VM_PDB1:
case AMDGPU_VM_PDB0:
- shift = 9 * (AMDGPU_VM_PDB0 - level) +
+ return 9 * (AMDGPU_VM_PDB0 - level) +
adev->vm_manager.block_size;
- break;
case AMDGPU_VM_PTB:
- shift = 0;
- break;
+ return 0;
default:
- dev_err(adev->dev, "the level%d isn't supported.\n", level);
+ return ~0;
}
-
- return shift;
}
/**
@@ -235,19 +229,6 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
else
list_move_tail(&vm_bo->vm_status, &vm->evicted);
}
-
-/**
- * amdgpu_vm_bo_relocated - vm_bo is reloacted
- *
- * @vm_bo: vm_bo which is relocated
- *
- * State for PDs/PTs which needs to update their parent PD.
- */
-static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
-{
- list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
-}
-
/**
* amdgpu_vm_bo_moved - vm_bo is moved
*
@@ -291,6 +272,22 @@ static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
}
/**
+ * amdgpu_vm_bo_relocated - vm_bo is reloacted
+ *
+ * @vm_bo: vm_bo which is relocated
+ *
+ * State for PDs/PTs which needs to update their parent PD.
+ * For the root PD, just move to idle state.
+ */
+static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
+{
+ if (vm_bo->bo->parent)
+ list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
+ else
+ amdgpu_vm_bo_idle(vm_bo);
+}
+
+/**
* amdgpu_vm_bo_done - vm_bo is done
*
* @vm_bo: vm_bo which is now done
@@ -588,8 +585,8 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
{
entry->priority = 0;
entry->tv.bo = &vm->root.base.bo->tbo;
- /* One for TTM and one for the CS job */
- entry->tv.num_shared = 2;
+ /* Two for VM updates, one for TTM and one for the CS job */
+ entry->tv.num_shared = 4;
entry->user_pages = NULL;
list_add(&entry->tv.head, validated);
}
@@ -697,10 +694,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
amdgpu_vm_bo_moved(bo_base);
} else {
vm->update_funcs->map_table(bo);
- if (bo->parent)
- amdgpu_vm_bo_relocated(bo_base);
- else
- amdgpu_vm_bo_idle(bo_base);
+ amdgpu_vm_bo_relocated(bo_base);
}
}
@@ -803,7 +797,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
params.vm = vm;
params.direct = direct;
- r = vm->update_funcs->prepare(&params, AMDGPU_FENCE_OWNER_KFD, NULL);
+ r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
if (r)
return r;
@@ -1086,8 +1080,12 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
struct dma_fence *fence = NULL;
bool pasid_mapping_needed = false;
unsigned patch_offset = 0;
+ bool update_spm_vmid_needed = (job->vm && (job->vm->reserved_vmid[vmhub] != NULL));
int r;
+ if (update_spm_vmid_needed && adev->gfx.rlc.funcs->update_spm_vmid)
+ adev->gfx.rlc.funcs->update_spm_vmid(adev, job->vmid);
+
if (amdgpu_vmid_had_gpu_reset(adev, id)) {
gds_switch_needed = true;
vm_flush_needed = true;
@@ -1299,7 +1297,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
params.vm = vm;
params.direct = direct;
- r = vm->update_funcs->prepare(&params, AMDGPU_FENCE_OWNER_VM, NULL);
+ r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
if (r)
return r;
@@ -1448,21 +1446,14 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
uint64_t incr, entry_end, pe_start;
struct amdgpu_bo *pt;
- /* make sure that the page tables covering the address range are
- * actually allocated
- */
- r = amdgpu_vm_alloc_pts(params->adev, params->vm, &cursor,
- params->direct);
- if (r)
- return r;
-
- pt = cursor.entry->base.bo;
-
- /* The root level can't be a huge page */
- if (cursor.level == adev->vm_manager.root_level) {
- if (!amdgpu_vm_pt_descendant(adev, &cursor))
- return -ENOENT;
- continue;
+ if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
+ /* make sure that the page tables covering the
+ * address range are actually allocated
+ */
+ r = amdgpu_vm_alloc_pts(params->adev, params->vm,
+ &cursor, params->direct);
+ if (r)
+ return r;
}
shift = amdgpu_vm_level_shift(adev, cursor.level);
@@ -1480,25 +1471,38 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
* smaller than the address shift. Go to the next
* child entry and try again.
*/
- if (!amdgpu_vm_pt_descendant(adev, &cursor))
- return -ENOENT;
- continue;
- } else if (frag >= parent_shift &&
- cursor.level - 1 != adev->vm_manager.root_level) {
+ if (amdgpu_vm_pt_descendant(adev, &cursor))
+ continue;
+ } else if (frag >= parent_shift) {
/* If the fragment size is even larger than the parent
- * shift we should go up one level and check it again
- * unless one level up is the root level.
+ * shift we should go up one level and check it again.
*/
if (!amdgpu_vm_pt_ancestor(&cursor))
- return -ENOENT;
+ return -EINVAL;
continue;
}
+ pt = cursor.entry->base.bo;
+ if (!pt) {
+ /* We need all PDs and PTs for mapping something, */
+ if (flags & AMDGPU_PTE_VALID)
+ return -ENOENT;
+
+ /* but unmapping something can happen at a higher
+ * level.
+ */
+ if (!amdgpu_vm_pt_ancestor(&cursor))
+ return -EINVAL;
+
+ pt = cursor.entry->base.bo;
+ shift = parent_shift;
+ }
+
/* Looks good so far, calculate parameters for the update */
incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift;
mask = amdgpu_vm_entries_mask(adev, cursor.level);
pe_start = ((cursor.pfn >> shift) & mask) * 8;
- entry_end = (uint64_t)(mask + 1) << shift;
+ entry_end = ((uint64_t)mask + 1) << shift;
entry_end += cursor.pfn & ~(entry_end - 1);
entry_end = min(entry_end, end);
@@ -1506,6 +1510,10 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
uint64_t upd_end = min(entry_end, frag_end);
unsigned nptes = (upd_end - frag_start) >> shift;
+ /* This can happen when we set higher level PDs to
+ * silent to stop fault floods.
+ */
+ nptes = max(nptes, 1u);
amdgpu_vm_update_flags(params, pt, cursor.level,
pe_start, dst, nptes, incr,
flags | AMDGPU_PTE_FRAG(frag));
@@ -1550,7 +1558,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
* @adev: amdgpu_device pointer
* @vm: requested vm
* @direct: direct submission in a page fault
- * @exclusive: fence we need to sync to
+ * @resv: fences we need to sync to
* @start: start of mapped range
* @last: last mapped entry
* @flags: flags for the entries
@@ -1565,14 +1573,14 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
*/
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
struct amdgpu_vm *vm, bool direct,
- struct dma_fence *exclusive,
+ struct dma_resv *resv,
uint64_t start, uint64_t last,
uint64_t flags, uint64_t addr,
dma_addr_t *pages_addr,
struct dma_fence **fence)
{
struct amdgpu_vm_update_params params;
- void *owner = AMDGPU_FENCE_OWNER_VM;
+ enum amdgpu_sync_mode sync_mode;
int r;
memset(&params, 0, sizeof(params));
@@ -1581,9 +1589,13 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
params.direct = direct;
params.pages_addr = pages_addr;
- /* sync to everything except eviction fences on unmapping */
+ /* Implicitly sync to command submissions in the same VM before
+ * unmapping. Sync to moving fences before mapping.
+ */
if (!(flags & AMDGPU_PTE_VALID))
- owner = AMDGPU_FENCE_OWNER_KFD;
+ sync_mode = AMDGPU_SYNC_EQ_OWNER;
+ else
+ sync_mode = AMDGPU_SYNC_EXPLICIT;
amdgpu_vm_eviction_lock(vm);
if (vm->evicting) {
@@ -1591,7 +1603,14 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
goto error_unlock;
}
- r = vm->update_funcs->prepare(&params, owner, exclusive);
+ if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
+ struct amdgpu_bo *root = vm->root.base.bo;
+
+ if (!dma_fence_is_signaled(vm->last_direct))
+ amdgpu_bo_fence(root, vm->last_direct, true);
+ }
+
+ r = vm->update_funcs->prepare(&params, resv, sync_mode);
if (r)
goto error_unlock;
@@ -1610,7 +1629,7 @@ error_unlock:
* amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
*
* @adev: amdgpu_device pointer
- * @exclusive: fence we need to sync to
+ * @resv: fences we need to sync to
* @pages_addr: DMA addresses to use for mapping
* @vm: requested vm
* @mapping: mapped range and flags to use for the update
@@ -1626,7 +1645,7 @@ error_unlock:
* 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
- struct dma_fence *exclusive,
+ struct dma_resv *resv,
dma_addr_t *pages_addr,
struct amdgpu_vm *vm,
struct amdgpu_bo_va_mapping *mapping,
@@ -1696,13 +1715,13 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
AMDGPU_GPU_PAGES_IN_CPU_PAGE;
}
- } else if (flags & AMDGPU_PTE_VALID) {
+ } else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
addr += bo_adev->vm_manager.vram_base_offset;
addr += pfn << PAGE_SHIFT;
}
last = min((uint64_t)mapping->last, start + max_entries - 1);
- r = amdgpu_vm_bo_update_mapping(adev, vm, false, exclusive,
+ r = amdgpu_vm_bo_update_mapping(adev, vm, false, resv,
start, last, flags, addr,
dma_addr, fence);
if (r)
@@ -1741,7 +1760,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
dma_addr_t *pages_addr = NULL;
struct ttm_mem_reg *mem;
struct drm_mm_node *nodes;
- struct dma_fence *exclusive, **last_update;
+ struct dma_fence **last_update;
+ struct dma_resv *resv;
uint64_t flags;
struct amdgpu_device *bo_adev = adev;
int r;
@@ -1749,7 +1769,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
if (clear || !bo) {
mem = NULL;
nodes = NULL;
- exclusive = NULL;
+ resv = vm->root.base.bo->tbo.base.resv;
} else {
struct ttm_dma_tt *ttm;
@@ -1759,7 +1779,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
pages_addr = ttm->dma_address;
}
- exclusive = bo->tbo.moving;
+ resv = bo->tbo.base.resv;
}
if (bo) {
@@ -1769,7 +1789,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
flags = 0x0;
}
- if (clear || (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv))
+ if (clear || (bo && bo->tbo.base.resv ==
+ vm->root.base.bo->tbo.base.resv))
last_update = &vm->last_update;
else
last_update = &bo_va->last_pt_update;
@@ -1783,7 +1804,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
}
list_for_each_entry(mapping, &bo_va->invalids, list) {
- r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
+ r = amdgpu_vm_bo_split_mapping(adev, resv, pages_addr, vm,
mapping, flags, bo_adev, nodes,
last_update);
if (r)
@@ -1978,6 +1999,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct dma_fence **fence)
{
+ struct dma_resv *resv = vm->root.base.bo->tbo.base.resv;
struct amdgpu_bo_va_mapping *mapping;
uint64_t init_pte_value = 0;
struct dma_fence *f = NULL;
@@ -1992,7 +2014,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
mapping->start < AMDGPU_GMC_HOLE_START)
init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
- r = amdgpu_vm_bo_update_mapping(adev, vm, false, NULL,
+ r = amdgpu_vm_bo_update_mapping(adev, vm, false, resv,
mapping->start, mapping->last,
init_pte_value, 0, NULL, &f);
amdgpu_vm_free_mapping(adev, vm, mapping, f);
@@ -2563,8 +2585,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
return false;
/* Don't evict VM page tables while they are updated */
- if (!dma_fence_is_signaled(bo_base->vm->last_direct) ||
- !dma_fence_is_signaled(bo_base->vm->last_delayed)) {
+ if (!dma_fence_is_signaled(bo_base->vm->last_direct)) {
amdgpu_vm_eviction_unlock(bo_base->vm);
return false;
}
@@ -2741,11 +2762,7 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
if (timeout <= 0)
return timeout;
- timeout = dma_fence_wait_timeout(vm->last_direct, true, timeout);
- if (timeout <= 0)
- return timeout;
-
- return dma_fence_wait_timeout(vm->last_delayed, true, timeout);
+ return dma_fence_wait_timeout(vm->last_direct, true, timeout);
}
/**
@@ -2818,7 +2835,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->update_funcs = &amdgpu_vm_sdma_funcs;
vm->last_update = NULL;
vm->last_direct = dma_fence_get_stub();
- vm->last_delayed = dma_fence_get_stub();
mutex_init(&vm->eviction_lock);
vm->evicting = false;
@@ -2873,7 +2889,6 @@ error_free_root:
error_free_delayed:
dma_fence_put(vm->last_direct);
- dma_fence_put(vm->last_delayed);
drm_sched_entity_destroy(&vm->delayed);
error_free_direct:
@@ -3076,8 +3091,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
dma_fence_wait(vm->last_direct, false);
dma_fence_put(vm->last_direct);
- dma_fence_wait(vm->last_delayed, false);
- dma_fence_put(vm->last_delayed);
list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
@@ -3188,6 +3201,7 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
union drm_amdgpu_vm *args = data;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ long timeout = msecs_to_jiffies(2000);
int r;
switch (args->in.op) {
@@ -3199,6 +3213,21 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
return r;
break;
case AMDGPU_VM_OP_UNRESERVE_VMID:
+ if (amdgpu_sriov_runtime(adev))
+ timeout = 8 * timeout;
+
+ /* Wait vm idle to make sure the vmid set in SPM_VMID is
+ * not referenced anymore.
+ */
+ r = amdgpu_bo_reserve(fpriv->vm.root.base.bo, true);
+ if (r)
+ return r;
+
+ r = amdgpu_vm_wait_idle(&fpriv->vm, timeout);
+ if (r < 0)
+ return r;
+
+ amdgpu_bo_unreserve(fpriv->vm.root.base.bo);
amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0);
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index b4640ab38c95..06fe30e1492d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -227,8 +227,8 @@ struct amdgpu_vm_update_params {
struct amdgpu_vm_update_funcs {
int (*map_table)(struct amdgpu_bo *bo);
- int (*prepare)(struct amdgpu_vm_update_params *p, void * owner,
- struct dma_fence *exclusive);
+ int (*prepare)(struct amdgpu_vm_update_params *p, struct dma_resv *resv,
+ enum amdgpu_sync_mode sync_mode);
int (*update)(struct amdgpu_vm_update_params *p,
struct amdgpu_bo *bo, uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr, uint64_t flags);
@@ -276,7 +276,6 @@ struct amdgpu_vm {
/* Last submission to the scheduler entities */
struct dma_fence *last_direct;
- struct dma_fence *last_delayed;
unsigned int pasid;
/* dedicated to vm */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
index 73fec7a0ced5..e38516304070 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
@@ -44,26 +44,14 @@ static int amdgpu_vm_cpu_map_table(struct amdgpu_bo *table)
* Returns:
* Negativ errno, 0 for success.
*/
-static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, void *owner,
- struct dma_fence *exclusive)
+static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p,
+ struct dma_resv *resv,
+ enum amdgpu_sync_mode sync_mode)
{
- int r;
-
- /* Wait for any BO move to be completed */
- if (exclusive) {
- r = dma_fence_wait(exclusive, true);
- if (unlikely(r))
- return r;
- }
-
- /* Don't wait for submissions during page fault */
- if (p->direct)
+ if (!resv)
return 0;
- /* Wait for PT BOs to be idle. PTs share the same resv. object
- * as the root PD BO
- */
- return amdgpu_bo_sync_wait(p->vm->root.base.bo, owner, true);
+ return amdgpu_bo_sync_wait_resv(p->adev, resv, sync_mode, p->vm, true);
}
/**
@@ -86,6 +74,13 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
{
unsigned int i;
uint64_t value;
+ int r;
+
+ if (bo->tbo.moving) {
+ r = dma_fence_wait(bo->tbo.moving, true);
+ if (r)
+ return r;
+ }
pe += (unsigned long)amdgpu_bo_kptr(bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index 19b7f80758f1..cf96c335b258 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -58,9 +58,9 @@ static int amdgpu_vm_sdma_map_table(struct amdgpu_bo *table)
* Negativ errno, 0 for success.
*/
static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
- void *owner, struct dma_fence *exclusive)
+ struct dma_resv *resv,
+ enum amdgpu_sync_mode sync_mode)
{
- struct amdgpu_bo *root = p->vm->root.base.bo;
unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
int r;
@@ -70,17 +70,10 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
p->num_dw_left = ndw;
- /* Wait for moves to be completed */
- r = amdgpu_sync_fence(&p->job->sync, exclusive, false);
- if (r)
- return r;
-
- /* Don't wait for any submissions during page fault handling */
- if (p->direct)
+ if (!resv)
return 0;
- return amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.base.resv,
- owner, false);
+ return amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode, p->vm);
}
/**
@@ -111,12 +104,13 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
if (r)
goto error;
- tmp = dma_fence_get(f);
- if (p->direct)
+ if (p->direct) {
+ tmp = dma_fence_get(f);
swap(p->vm->last_direct, tmp);
- else
- swap(p->vm->last_delayed, tmp);
- dma_fence_put(tmp);
+ dma_fence_put(tmp);
+ } else {
+ dma_resv_add_shared_fence(p->vm->root.base.bo->tbo.base.resv, f);
+ }
if (fence && !p->direct)
swap(*fence, f);
@@ -147,7 +141,7 @@ static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p,
src += p->num_dw_left * 4;
- pe += amdgpu_bo_gpu_offset(bo);
+ pe += amdgpu_gmc_sign_extend(bo->tbo.offset);
trace_amdgpu_vm_copy_ptes(pe, src, count, p->direct);
amdgpu_vm_copy_pte(p->adev, ib, pe, src, count);
@@ -174,7 +168,7 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
{
struct amdgpu_ib *ib = p->job->ibs;
- pe += amdgpu_bo_gpu_offset(bo);
+ pe += amdgpu_gmc_sign_extend(bo->tbo.offset);
trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct);
if (count < 3) {
amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags,
@@ -208,6 +202,11 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
uint64_t *pte;
int r;
+ /* Wait for PD/PT moves to be completed */
+ r = amdgpu_sync_fence(&p->job->sync, bo->tbo.moving, false);
+ if (r)
+ return r;
+
do {
ndw = p->num_dw_left;
ndw -= p->job->ibs->length_dw;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index a97af422575a..95b3327168ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -26,7 +26,12 @@
#include "amdgpu_xgmi.h"
#include "amdgpu_smu.h"
#include "amdgpu_ras.h"
+#include "soc15.h"
#include "df/df_3_6_offset.h"
+#include "xgmi/xgmi_4_0_0_smn.h"
+#include "xgmi/xgmi_4_0_0_sh_mask.h"
+#include "wafl/wafl2_4_0_0_smn.h"
+#include "wafl/wafl2_4_0_0_sh_mask.h"
static DEFINE_MUTEX(xgmi_mutex);
@@ -36,6 +41,109 @@ static DEFINE_MUTEX(xgmi_mutex);
static struct amdgpu_hive_info xgmi_hives[AMDGPU_MAX_XGMI_HIVE];
static unsigned hive_count = 0;
+static const int xgmi_pcs_err_status_reg_vg20[] = {
+ smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS,
+ smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x100000,
+};
+
+static const int wafl_pcs_err_status_reg_vg20[] = {
+ smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS,
+ smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000,
+};
+
+static const int xgmi_pcs_err_status_reg_arct[] = {
+ smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS,
+ smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x100000,
+ smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x500000,
+ smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x600000,
+ smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x700000,
+ smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x800000,
+};
+
+/* same as vg20*/
+static const int wafl_pcs_err_status_reg_arct[] = {
+ smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS,
+ smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000,
+};
+
+static const struct amdgpu_pcs_ras_field xgmi_pcs_ras_fields[] = {
+ {"XGMI PCS DataLossErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataLossErr)},
+ {"XGMI PCS TrainingErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, TrainingErr)},
+ {"XGMI PCS CRCErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, CRCErr)},
+ {"XGMI PCS BERExceededErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, BERExceededErr)},
+ {"XGMI PCS TxMetaDataErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, TxMetaDataErr)},
+ {"XGMI PCS ReplayBufParityErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayBufParityErr)},
+ {"XGMI PCS DataParityErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataParityErr)},
+ {"XGMI PCS ReplayFifoOverflowErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayFifoOverflowErr)},
+ {"XGMI PCS ReplayFifoUnderflowErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)},
+ {"XGMI PCS ElasticFifoOverflowErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ElasticFifoOverflowErr)},
+ {"XGMI PCS DeskewErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DeskewErr)},
+ {"XGMI PCS DataStartupLimitErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataStartupLimitErr)},
+ {"XGMI PCS FCInitTimeoutErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, FCInitTimeoutErr)},
+ {"XGMI PCS RecoveryTimeoutErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryTimeoutErr)},
+ {"XGMI PCS ReadySerialTimeoutErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReadySerialTimeoutErr)},
+ {"XGMI PCS ReadySerialAttemptErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReadySerialAttemptErr)},
+ {"XGMI PCS RecoveryAttemptErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryAttemptErr)},
+ {"XGMI PCS RecoveryRelockAttemptErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)},
+};
+
+static const struct amdgpu_pcs_ras_field wafl_pcs_ras_fields[] = {
+ {"WAFL PCS DataLossErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataLossErr)},
+ {"WAFL PCS TrainingErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, TrainingErr)},
+ {"WAFL PCS CRCErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, CRCErr)},
+ {"WAFL PCS BERExceededErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, BERExceededErr)},
+ {"WAFL PCS TxMetaDataErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, TxMetaDataErr)},
+ {"WAFL PCS ReplayBufParityErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayBufParityErr)},
+ {"WAFL PCS DataParityErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataParityErr)},
+ {"WAFL PCS ReplayFifoOverflowErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayFifoOverflowErr)},
+ {"WAFL PCS ReplayFifoUnderflowErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)},
+ {"WAFL PCS ElasticFifoOverflowErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ElasticFifoOverflowErr)},
+ {"WAFL PCS DeskewErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DeskewErr)},
+ {"WAFL PCS DataStartupLimitErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataStartupLimitErr)},
+ {"WAFL PCS FCInitTimeoutErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, FCInitTimeoutErr)},
+ {"WAFL PCS RecoveryTimeoutErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryTimeoutErr)},
+ {"WAFL PCS ReadySerialTimeoutErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReadySerialTimeoutErr)},
+ {"WAFL PCS ReadySerialAttemptErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReadySerialAttemptErr)},
+ {"WAFL PCS RecoveryAttemptErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryAttemptErr)},
+ {"WAFL PCS RecoveryRelockAttemptErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)},
+};
+
void *amdgpu_xgmi_hive_try_lock(struct amdgpu_hive_info *hive)
{
return &hive->device_list;
@@ -365,6 +473,13 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
return 0;
if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
+ ret = psp_xgmi_initialize(&adev->psp);
+ if (ret) {
+ dev_err(adev->dev,
+ "XGMI: Failed to initialize xgmi session\n");
+ return ret;
+ }
+
ret = psp_xgmi_get_hive_id(&adev->psp, &adev->gmc.xgmi.hive_id);
if (ret) {
dev_err(adev->dev,
@@ -451,16 +566,16 @@ exit:
return ret;
}
-void amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
+int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
{
struct amdgpu_hive_info *hive;
if (!adev->gmc.xgmi.supported)
- return;
+ return -EINVAL;
hive = amdgpu_get_xgmi_hive(adev, 1);
if (!hive)
- return;
+ return -EINVAL;
if (!(hive->number_devices--)) {
amdgpu_xgmi_sysfs_destroy(adev, hive);
@@ -471,6 +586,8 @@ void amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
amdgpu_xgmi_sysfs_rem_dev_info(adev, hive);
mutex_unlock(&hive->hive_lock);
}
+
+ return psp_xgmi_terminate(&adev->psp);
}
int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
@@ -481,7 +598,6 @@ int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
};
struct ras_fs_if fs_info = {
.sysfs_name = "xgmi_wafl_err_count",
- .debugfs_name = "xgmi_wafl_err_inject",
};
if (!adev->gmc.xgmi.supported ||
@@ -521,3 +637,129 @@ void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev)
kfree(ras_if);
}
}
+
+uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
+ uint64_t addr)
+{
+ uint32_t df_inst_id;
+ uint64_t dram_base_addr = 0;
+ const struct amdgpu_df_funcs *df_funcs = adev->df.funcs;
+
+ if ((!df_funcs) ||
+ (!df_funcs->get_df_inst_id) ||
+ (!df_funcs->get_dram_base_addr)) {
+ dev_warn(adev->dev,
+ "XGMI: relative phy_addr algorithm is not supported\n");
+ return addr;
+ }
+
+ if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) {
+ dev_warn(adev->dev,
+ "failed to disable DF-Cstate, DF register may not be accessible\n");
+ return addr;
+ }
+
+ df_inst_id = df_funcs->get_df_inst_id(adev);
+ dram_base_addr = df_funcs->get_dram_base_addr(adev, df_inst_id);
+
+ if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
+ dev_warn(adev->dev, "failed to enable DF-Cstate\n");
+
+ return addr + dram_base_addr;
+}
+
+static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev,
+ uint32_t value,
+ uint32_t *ue_count,
+ uint32_t *ce_count,
+ bool is_xgmi_pcs)
+{
+ int i;
+ int ue_cnt;
+
+ if (is_xgmi_pcs) {
+ /* query xgmi pcs error status,
+ * only ue is supported */
+ for (i = 0; i < ARRAY_SIZE(xgmi_pcs_ras_fields); i ++) {
+ ue_cnt = (value &
+ xgmi_pcs_ras_fields[i].pcs_err_mask) >>
+ xgmi_pcs_ras_fields[i].pcs_err_shift;
+ if (ue_cnt) {
+ dev_info(adev->dev, "%s detected\n",
+ xgmi_pcs_ras_fields[i].err_name);
+ *ue_count += ue_cnt;
+ }
+ }
+ } else {
+ /* query wafl pcs error status,
+ * only ue is supported */
+ for (i = 0; i < ARRAY_SIZE(wafl_pcs_ras_fields); i++) {
+ ue_cnt = (value &
+ wafl_pcs_ras_fields[i].pcs_err_mask) >>
+ wafl_pcs_ras_fields[i].pcs_err_shift;
+ if (ue_cnt) {
+ dev_info(adev->dev, "%s detected\n",
+ wafl_pcs_ras_fields[i].err_name);
+ *ue_count += ue_cnt;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+ int i;
+ uint32_t data;
+ uint32_t ue_cnt = 0, ce_cnt = 0;
+
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL))
+ return -EINVAL;
+
+ err_data->ue_count = 0;
+ err_data->ce_count = 0;
+
+ switch (adev->asic_type) {
+ case CHIP_ARCTURUS:
+ /* check xgmi pcs error */
+ for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_arct); i++) {
+ data = RREG32_PCIE(xgmi_pcs_err_status_reg_arct[i]);
+ if (data)
+ amdgpu_xgmi_query_pcs_error_status(adev,
+ data, &ue_cnt, &ce_cnt, true);
+ }
+ /* check wafl pcs error */
+ for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_arct); i++) {
+ data = RREG32_PCIE(wafl_pcs_err_status_reg_arct[i]);
+ if (data)
+ amdgpu_xgmi_query_pcs_error_status(adev,
+ data, &ue_cnt, &ce_cnt, false);
+ }
+ break;
+ case CHIP_VEGA20:
+ default:
+ /* check xgmi pcs error */
+ for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++) {
+ data = RREG32_PCIE(xgmi_pcs_err_status_reg_vg20[i]);
+ if (data)
+ amdgpu_xgmi_query_pcs_error_status(adev,
+ data, &ue_cnt, &ce_cnt, true);
+ }
+ /* check wafl pcs error */
+ for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_vg20); i++) {
+ data = RREG32_PCIE(wafl_pcs_err_status_reg_vg20[i]);
+ if (data)
+ amdgpu_xgmi_query_pcs_error_status(adev,
+ data, &ue_cnt, &ce_cnt, false);
+ }
+ break;
+ }
+
+ err_data->ue_count += ue_cnt;
+ err_data->ce_count += ce_cnt;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index 74011fbc2251..4a92067fe595 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -37,15 +37,25 @@ struct amdgpu_hive_info {
struct task_barrier tb;
};
+struct amdgpu_pcs_ras_field {
+ const char *err_name;
+ uint32_t pcs_err_mask;
+ uint32_t pcs_err_shift;
+};
+
struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock);
int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev);
int amdgpu_xgmi_add_device(struct amdgpu_device *adev);
-void amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
+int amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate);
int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
struct amdgpu_device *peer_adev);
int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev);
void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev);
+uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
+ uint64_t addr);
+int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status);
static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
struct amdgpu_device *bo_adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
index dd30f4e61a8c..cae426c7c086 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.c
+++ b/drivers/gpu/drm/amd/amdgpu/atom.c
@@ -744,8 +744,8 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
cjiffies = jiffies;
if (time_after(cjiffies, ctx->last_jump_jiffies)) {
cjiffies -= ctx->last_jump_jiffies;
- if ((jiffies_to_msecs(cjiffies) > 5000)) {
- DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n");
+ if ((jiffies_to_msecs(cjiffies) > 10000)) {
+ DRM_ERROR("atombios stuck in loop for more than 10secs aborting\n");
ctx->abort = true;
}
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
index ea702a64f807..9b74cfdba7b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
@@ -186,16 +186,10 @@ amdgpu_atombios_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *m
void amdgpu_atombios_dp_aux_init(struct amdgpu_connector *amdgpu_connector)
{
- int ret;
-
amdgpu_connector->ddc_bus->rec.hpd = amdgpu_connector->hpd.hpd;
- amdgpu_connector->ddc_bus->aux.dev = amdgpu_connector->base.kdev;
amdgpu_connector->ddc_bus->aux.transfer = amdgpu_atombios_dp_aux_transfer;
- ret = drm_dp_aux_register(&amdgpu_connector->ddc_bus->aux);
- if (!ret)
- amdgpu_connector->ddc_bus->has_aux = true;
-
- WARN(ret, "drm_dp_aux_register_i2c_bus() failed with error %d\n", ret);
+ drm_dp_aux_init(&amdgpu_connector->ddc_bus->aux);
+ amdgpu_connector->ddc_bus->has_aux = true;
}
/***** general DP utility functions *****/
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 40d2ac723dd6..2512e7ebfedf 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2494,6 +2494,10 @@ static const struct drm_crtc_funcs dce_v10_0_crtc_funcs = {
.set_config = amdgpu_display_crtc_set_config,
.destroy = dce_v10_0_crtc_destroy,
.page_flip_target = amdgpu_display_crtc_page_flip_target,
+ .get_vblank_counter = amdgpu_get_vblank_counter_kms,
+ .enable_vblank = amdgpu_enable_vblank_kms,
+ .disable_vblank = amdgpu_disable_vblank_kms,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2685,6 +2689,7 @@ static const struct drm_crtc_helper_funcs dce_v10_0_crtc_helper_funcs = {
.prepare = dce_v10_0_crtc_prepare,
.commit = dce_v10_0_crtc_commit,
.disable = dce_v10_0_crtc_disable,
+ .get_scanout_position = amdgpu_crtc_get_scanout_position,
};
static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 898ef72d423c..0dde22db9848 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2573,6 +2573,10 @@ static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = {
.set_config = amdgpu_display_crtc_set_config,
.destroy = dce_v11_0_crtc_destroy,
.page_flip_target = amdgpu_display_crtc_page_flip_target,
+ .get_vblank_counter = amdgpu_get_vblank_counter_kms,
+ .enable_vblank = amdgpu_enable_vblank_kms,
+ .disable_vblank = amdgpu_disable_vblank_kms,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2793,6 +2797,7 @@ static const struct drm_crtc_helper_funcs dce_v11_0_crtc_helper_funcs = {
.prepare = dce_v11_0_crtc_prepare,
.commit = dce_v11_0_crtc_commit,
.disable = dce_v11_0_crtc_disable,
+ .get_scanout_position = amdgpu_crtc_get_scanout_position,
};
static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index db15a112becc..84219534bd38 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -2388,6 +2388,10 @@ static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = {
.set_config = amdgpu_display_crtc_set_config,
.destroy = dce_v6_0_crtc_destroy,
.page_flip_target = amdgpu_display_crtc_page_flip_target,
+ .get_vblank_counter = amdgpu_get_vblank_counter_kms,
+ .enable_vblank = amdgpu_enable_vblank_kms,
+ .disable_vblank = amdgpu_disable_vblank_kms,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2575,6 +2579,7 @@ static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = {
.prepare = dce_v6_0_crtc_prepare,
.commit = dce_v6_0_crtc_commit,
.disable = dce_v6_0_crtc_disable,
+ .get_scanout_position = amdgpu_crtc_get_scanout_position,
};
static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index f06c9022c1fd..3a640702d7d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2395,6 +2395,10 @@ static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = {
.set_config = amdgpu_display_crtc_set_config,
.destroy = dce_v8_0_crtc_destroy,
.page_flip_target = amdgpu_display_crtc_page_flip_target,
+ .get_vblank_counter = amdgpu_get_vblank_counter_kms,
+ .enable_vblank = amdgpu_enable_vblank_kms,
+ .disable_vblank = amdgpu_disable_vblank_kms,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2593,6 +2597,7 @@ static const struct drm_crtc_helper_funcs dce_v8_0_crtc_helper_funcs = {
.prepare = dce_v8_0_crtc_prepare,
.commit = dce_v8_0_crtc_commit,
.disable = dce_v8_0_crtc_disable,
+ .get_scanout_position = amdgpu_crtc_get_scanout_position,
};
static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index e4f94863332c..13e12be667fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -123,6 +123,10 @@ static const struct drm_crtc_funcs dce_virtual_crtc_funcs = {
.set_config = amdgpu_display_crtc_set_config,
.destroy = dce_virtual_crtc_destroy,
.page_flip_target = amdgpu_display_crtc_page_flip_target,
+ .get_vblank_counter = amdgpu_get_vblank_counter_kms,
+ .enable_vblank = amdgpu_enable_vblank_kms,
+ .disable_vblank = amdgpu_disable_vblank_kms,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -218,6 +222,7 @@ static const struct drm_crtc_helper_funcs dce_virtual_crtc_helper_funcs = {
.prepare = dce_virtual_crtc_prepare,
.commit = dce_virtual_crtc_commit,
.disable = dce_virtual_crtc_disable,
+ .get_scanout_position = amdgpu_crtc_get_scanout_position,
};
static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index)
@@ -609,7 +614,6 @@ static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
- drm_connector_register(connector);
/* link them */
drm_connector_attach_encoder(connector, encoder);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 1785fdad6ecb..42bbc0070831 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -35,6 +35,8 @@
#include "gc/gc_10_1_0_offset.h"
#include "gc/gc_10_1_0_sh_mask.h"
+#include "smuio/smuio_11_0_0_offset.h"
+#include "smuio/smuio_11_0_0_sh_mask.h"
#include "navi10_enum.h"
#include "hdp/hdp_5_0_0_offset.h"
#include "ivsrcid/gfx/irqsrcs_gfx_10_1.h"
@@ -52,7 +54,7 @@
* 1. Primary ring
* 2. Async ring
*/
-#define GFX10_NUM_GFX_RINGS 2
+#define GFX10_NUM_GFX_RINGS_NV1X 1
#define GFX10_MEC_HPD_SIZE 2048
#define F32_CE_PROGRAM_RAM_SIZE 65536
@@ -222,6 +224,49 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00800000)
};
+static void gfx_v10_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v)
+{
+ static void *scratch_reg0;
+ static void *scratch_reg1;
+ static void *scratch_reg2;
+ static void *scratch_reg3;
+ static void *spare_int;
+ static uint32_t grbm_cntl;
+ static uint32_t grbm_idx;
+ uint32_t i = 0;
+ uint32_t retries = 50000;
+
+ scratch_reg0 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0)*4;
+ scratch_reg1 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1)*4;
+ scratch_reg2 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2)*4;
+ scratch_reg3 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3)*4;
+ spare_int = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT)*4;
+
+ grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL;
+ grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX;
+
+ if (amdgpu_sriov_runtime(adev)) {
+ pr_err("shouldn't call rlcg write register during runtime\n");
+ return;
+ }
+
+ writel(v, scratch_reg0);
+ writel(offset | 0x80000000, scratch_reg1);
+ writel(1, spare_int);
+ for (i = 0; i < retries; i++) {
+ u32 tmp;
+
+ tmp = readl(scratch_reg1);
+ if (!(tmp & 0x80000000))
+ break;
+
+ udelay(10);
+ }
+
+ if (i >= retries)
+ pr_err("timeout: rlcg program reg:0x%05x failed !\n", offset);
+}
+
static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] =
{
/* Pending on emulation bring up */
@@ -500,29 +545,28 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
struct dma_fence *f = NULL;
- uint32_t scratch;
- uint32_t tmp = 0;
+ unsigned index;
+ uint64_t gpu_addr;
+ uint32_t tmp;
long r;
- r = amdgpu_gfx_scratch_get(adev, &scratch);
- if (r) {
- DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r)
return r;
- }
-
- WREG32(scratch, 0xCAFEDEAD);
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 256, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ r = amdgpu_ib_get(adev, NULL, 16, &ib);
+ if (r)
goto err1;
- }
- ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
- ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
- ib.ptr[2] = 0xDEADBEEF;
- ib.length_dw = 3;
+ ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
+ ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
+ ib.ptr[2] = lower_32_bits(gpu_addr);
+ ib.ptr[3] = upper_32_bits(gpu_addr);
+ ib.ptr[4] = 0xDEADBEEF;
+ ib.length_dw = 5;
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
if (r)
@@ -530,15 +574,13 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out.\n");
r = -ETIMEDOUT;
goto err2;
} else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err2;
}
- tmp = RREG32(scratch);
+ tmp = adev->wb.wb[index];
if (tmp == 0xDEADBEEF)
r = 0;
else
@@ -547,8 +589,7 @@ err2:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
err1:
- amdgpu_gfx_scratch_free(adev, scratch);
-
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1016,6 +1057,10 @@ static int gfx_v10_0_rlc_init(struct amdgpu_device *adev)
return r;
}
+ /* init spm vmid with 0xf */
+ if (adev->gfx.rlc.funcs->update_spm_vmid)
+ adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
+
return 0;
}
@@ -1304,7 +1349,7 @@ static int gfx_v10_0_sw_init(void *handle)
case CHIP_NAVI14:
case CHIP_NAVI12:
adev->gfx.me.num_me = 1;
- adev->gfx.me.num_pipe_per_me = 2;
+ adev->gfx.me.num_pipe_per_me = 1;
adev->gfx.me.num_queue_per_pipe = 1;
adev->gfx.mec.num_mec = 2;
adev->gfx.mec.num_pipe_per_mec = 4;
@@ -1783,11 +1828,11 @@ static int gfx_v10_0_init_csb(struct amdgpu_device *adev)
adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
/* csib */
- WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_HI,
+ WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_HI,
adev->gfx.rlc.clear_state_gpu_addr >> 32);
- WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_LO,
+ WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_LO,
adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
- WREG32_SOC15(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
+ WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
return 0;
}
@@ -2395,7 +2440,7 @@ static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
adev->gfx.gfx_ring[i].sched.ready = false;
}
- WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
+ WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
for (i = 0; i < adev->usec_timeout; i++) {
if (RREG32_SOC15(GC, 0, mmCP_STAT) == 0)
@@ -2710,18 +2755,20 @@ static int gfx_v10_0_cp_gfx_start(struct amdgpu_device *adev)
amdgpu_ring_commit(ring);
/* submit cs packet to copy state 0 to next available state */
- ring = &adev->gfx.gfx_ring[1];
- r = amdgpu_ring_alloc(ring, 2);
- if (r) {
- DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
- return r;
- }
-
- amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
- amdgpu_ring_write(ring, 0);
+ if (adev->gfx.num_gfx_rings > 1) {
+ /* maximum supported gfx ring is 2 */
+ ring = &adev->gfx.gfx_ring[1];
+ r = amdgpu_ring_alloc(ring, 2);
+ if (r) {
+ DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
+ return r;
+ }
- amdgpu_ring_commit(ring);
+ amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_commit(ring);
+ }
return 0;
}
@@ -2818,39 +2865,41 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)
mutex_unlock(&adev->srbm_mutex);
/* Init gfx ring 1 for pipe 1 */
- mutex_lock(&adev->srbm_mutex);
- gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID1);
- ring = &adev->gfx.gfx_ring[1];
- rb_bufsz = order_base_2(ring->ring_size / 8);
- tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz);
- tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2);
- WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
- /* Initialize the ring buffer's write pointers */
- ring->wptr = 0;
- WREG32_SOC15(GC, 0, mmCP_RB1_WPTR, lower_32_bits(ring->wptr));
- WREG32_SOC15(GC, 0, mmCP_RB1_WPTR_HI, upper_32_bits(ring->wptr));
- /* Set the wb address wether it's enabled or not */
- rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
- WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
- WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
- CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
- wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
- WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO,
- lower_32_bits(wptr_gpu_addr));
- WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI,
- upper_32_bits(wptr_gpu_addr));
-
- mdelay(1);
- WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
-
- rb_addr = ring->gpu_addr >> 8;
- WREG32_SOC15(GC, 0, mmCP_RB1_BASE, rb_addr);
- WREG32_SOC15(GC, 0, mmCP_RB1_BASE_HI, upper_32_bits(rb_addr));
- WREG32_SOC15(GC, 0, mmCP_RB1_ACTIVE, 1);
-
- gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
- mutex_unlock(&adev->srbm_mutex);
-
+ if (adev->gfx.num_gfx_rings > 1) {
+ mutex_lock(&adev->srbm_mutex);
+ gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID1);
+ /* maximum supported gfx ring is 2 */
+ ring = &adev->gfx.gfx_ring[1];
+ rb_bufsz = order_base_2(ring->ring_size / 8);
+ tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz);
+ tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2);
+ WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
+ /* Initialize the ring buffer's write pointers */
+ ring->wptr = 0;
+ WREG32_SOC15(GC, 0, mmCP_RB1_WPTR, lower_32_bits(ring->wptr));
+ WREG32_SOC15(GC, 0, mmCP_RB1_WPTR_HI, upper_32_bits(ring->wptr));
+ /* Set the wb address wether it's enabled or not */
+ rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
+ WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
+ WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
+ CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
+ wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+ WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO,
+ lower_32_bits(wptr_gpu_addr));
+ WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI,
+ upper_32_bits(wptr_gpu_addr));
+
+ mdelay(1);
+ WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
+
+ rb_addr = ring->gpu_addr >> 8;
+ WREG32_SOC15(GC, 0, mmCP_RB1_BASE, rb_addr);
+ WREG32_SOC15(GC, 0, mmCP_RB1_BASE_HI, upper_32_bits(rb_addr));
+ WREG32_SOC15(GC, 0, mmCP_RB1_ACTIVE, 1);
+
+ gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
+ mutex_unlock(&adev->srbm_mutex);
+ }
/* Switch to pipe 0 */
mutex_lock(&adev->srbm_mutex);
gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
@@ -3164,12 +3213,7 @@ static int gfx_v10_0_kiq_enable_kgq(struct amdgpu_device *adev)
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.gfx_ring[i]);
- r = amdgpu_ring_test_ring(kiq_ring);
- if (r) {
- DRM_ERROR("kfq enable failed\n");
- kiq_ring->sched.ready = false;
- }
- return r;
+ return amdgpu_ring_test_helper(kiq_ring);
}
#endif
@@ -3212,6 +3256,22 @@ done:
return r;
}
+static void gfx_v10_0_compute_mqd_set_priority(struct amdgpu_ring *ring, struct v10_compute_mqd *mqd)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
+ if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) {
+ mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
+ ring->has_high_prio = true;
+ mqd->cp_hqd_queue_priority =
+ AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
+ } else {
+ ring->has_high_prio = false;
+ }
+ }
+}
+
static int gfx_v10_0_compute_mqd_init(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
@@ -3337,6 +3397,9 @@ static int gfx_v10_0_compute_mqd_init(struct amdgpu_ring *ring)
tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
mqd->cp_hqd_ib_control = tmp;
+ /* set static priority for a compute queue/ring */
+ gfx_v10_0_compute_mqd_set_priority(ring, mqd);
+
/* map_queues packet doesn't need activate the queue,
* so only kiq need set this field.
*/
@@ -3513,6 +3576,7 @@ static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring)
/* reset ring buffer */
ring->wptr = 0;
+ atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
amdgpu_ring_clear_ring(ring);
} else {
amdgpu_ring_clear_ring(ring);
@@ -3785,7 +3849,7 @@ static int gfx_v10_0_kiq_disable_kgq(struct amdgpu_device *adev)
kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.gfx_ring[i],
PREEMPT_QUEUES, 0, 0);
- return amdgpu_ring_test_ring(kiq_ring);
+ return amdgpu_ring_test_helper(kiq_ring);
}
#endif
@@ -3923,11 +3987,12 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
{
uint64_t clock;
+ amdgpu_gfx_off_ctrl(adev, false);
mutex_lock(&adev->gfx.gpu_clock_mutex);
- WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
- clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
- ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+ clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER) |
+ ((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER) << 32ULL);
mutex_unlock(&adev->gfx.gpu_clock_mutex);
+ amdgpu_gfx_off_ctrl(adev, true);
return clock;
}
@@ -3964,7 +4029,8 @@ static int gfx_v10_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS;
+ adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS_NV1X;
+
adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
gfx_v10_0_set_kiq_pm4_funcs(adev);
@@ -4212,6 +4278,45 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
return 0;
}
+static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
+{
+ u32 data;
+
+ data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL);
+
+ data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
+ data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
+
+ WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
+}
+
+static bool gfx_v10_0_check_rlcg_range(struct amdgpu_device *adev,
+ uint32_t offset,
+ struct soc15_reg_rlcg *entries, int arr_size)
+{
+ int i;
+ uint32_t reg;
+
+ if (!entries)
+ return false;
+
+ for (i = 0; i < arr_size; i++) {
+ const struct soc15_reg_rlcg *entry;
+
+ entry = &entries[i];
+ reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
+ if (offset == reg)
+ return true;
+ }
+
+ return false;
+}
+
+static bool gfx_v10_0_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset)
+{
+ return gfx_v10_0_check_rlcg_range(adev, offset, NULL, 0);
+}
+
static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs = {
.is_rlc_enabled = gfx_v10_0_is_rlc_enabled,
.set_safe_mode = gfx_v10_0_set_safe_mode,
@@ -4222,7 +4327,10 @@ static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs = {
.resume = gfx_v10_0_rlc_resume,
.stop = gfx_v10_0_rlc_stop,
.reset = gfx_v10_0_rlc_reset,
- .start = gfx_v10_0_rlc_start
+ .start = gfx_v10_0_rlc_start,
+ .update_spm_vmid = gfx_v10_0_update_spm_vmid,
+ .rlcg_wreg = gfx_v10_rlcg_wreg,
+ .is_rlcg_access_range = gfx_v10_0_is_rlcg_access_range,
};
static int gfx_v10_0_set_powergating_state(void *handle,
@@ -4411,15 +4519,15 @@ static void gfx_v10_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
control |= ib->length_dw | (vmid << 24);
- if (amdgpu_mcbp && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
+ if ((amdgpu_sriov_vf(ring->adev) || amdgpu_mcbp) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
control |= INDIRECT_BUFFER_PRE_ENB(1);
if (flags & AMDGPU_IB_PREEMPTED)
control |= INDIRECT_BUFFER_PRE_RESUME(1);
- if (!(ib->flags & AMDGPU_IB_FLAG_CE))
+ if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid)
gfx_v10_0_ring_emit_de_meta(ring,
- flags & AMDGPU_IB_PREEMPTED ? true : false);
+ (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
}
amdgpu_ring_write(ring, header);
@@ -4566,9 +4674,9 @@ static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flag
{
uint32_t dw2 = 0;
- if (amdgpu_mcbp)
+ if (amdgpu_mcbp || amdgpu_sriov_vf(ring->adev))
gfx_v10_0_ring_emit_ce_meta(ring,
- flags & AMDGPU_IB_PREEMPTED ? true : false);
+ (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
gfx_v10_0_ring_emit_tmz(ring, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 8f20a5dd44fe..733d398c61cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -3346,6 +3346,10 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
return r;
}
+ /* init spm vmid with 0xf */
+ if (adev->gfx.rlc.funcs->update_spm_vmid)
+ adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
+
return 0;
}
@@ -3570,6 +3574,18 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
return 0;
}
+static void gfx_v7_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
+{
+ u32 data;
+
+ data = RREG32(mmRLC_SPM_VMID);
+
+ data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK;
+ data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT;
+
+ WREG32(mmRLC_SPM_VMID, data);
+}
+
static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
{
u32 data, orig, tmp, tmp2;
@@ -4221,7 +4237,8 @@ static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
.resume = gfx_v7_0_rlc_resume,
.stop = gfx_v7_0_rlc_stop,
.reset = gfx_v7_0_rlc_reset,
- .start = gfx_v7_0_rlc_start
+ .start = gfx_v7_0_rlc_start,
+ .update_spm_vmid = gfx_v7_0_update_spm_vmid
};
static int gfx_v7_0_early_init(void *handle)
@@ -4338,6 +4355,11 @@ static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev)
adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
+ adev->gfx.config.num_banks = REG_GET_FIELD(mc_arb_ramcfg,
+ MC_ARB_RAMCFG, NOOFBANK);
+ adev->gfx.config.num_ranks = REG_GET_FIELD(mc_arb_ramcfg,
+ MC_ARB_RAMCFG, NOOFRANKS);
+
adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
adev->gfx.config.mem_max_burst_length_bytes = 256;
if (adev->flags & AMD_IS_APU) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index fa245973de12..fc32586ef80b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1318,6 +1318,10 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
return r;
}
+ /* init spm vmid with 0xf */
+ if (adev->gfx.rlc.funcs->update_spm_vmid)
+ adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
+
return 0;
}
@@ -1820,6 +1824,11 @@ static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
+ adev->gfx.config.num_banks = REG_GET_FIELD(mc_arb_ramcfg,
+ MC_ARB_RAMCFG, NOOFBANK);
+ adev->gfx.config.num_ranks = REG_GET_FIELD(mc_arb_ramcfg,
+ MC_ARB_RAMCFG, NOOFRANKS);
+
adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
adev->gfx.config.mem_max_burst_length_bytes = 256;
if (adev->flags & AMD_IS_APU) {
@@ -4421,6 +4430,22 @@ static int gfx_v8_0_deactivate_hqd(struct amdgpu_device *adev, u32 req)
return r;
}
+static void gfx_v8_0_mqd_set_priority(struct amdgpu_ring *ring, struct vi_mqd *mqd)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
+ if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) {
+ mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
+ ring->has_high_prio = true;
+ mqd->cp_hqd_queue_priority =
+ AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
+ } else {
+ ring->has_high_prio = false;
+ }
+ }
+}
+
static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
@@ -4544,9 +4569,6 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
/* defaults */
mqd->cp_hqd_eop_rptr = RREG32(mmCP_HQD_EOP_RPTR);
mqd->cp_hqd_eop_wptr = RREG32(mmCP_HQD_EOP_WPTR);
- mqd->cp_hqd_pipe_priority = RREG32(mmCP_HQD_PIPE_PRIORITY);
- mqd->cp_hqd_queue_priority = RREG32(mmCP_HQD_QUEUE_PRIORITY);
- mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
mqd->cp_hqd_ctx_save_base_addr_lo = RREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_LO);
mqd->cp_hqd_ctx_save_base_addr_hi = RREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_HI);
mqd->cp_hqd_cntl_stack_offset = RREG32(mmCP_HQD_CNTL_STACK_OFFSET);
@@ -4558,6 +4580,10 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
mqd->cp_hqd_eop_wptr_mem = RREG32(mmCP_HQD_EOP_WPTR_MEM);
mqd->cp_hqd_eop_dones = RREG32(mmCP_HQD_EOP_DONES);
+ /* set static priority for a queue/ring */
+ gfx_v8_0_mqd_set_priority(ring, mqd);
+ mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
+
/* map_queues packet doesn't need activate the queue,
* so only kiq need set this field.
*/
@@ -5589,6 +5615,18 @@ static void gfx_v8_0_unset_safe_mode(struct amdgpu_device *adev)
}
}
+static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
+{
+ u32 data;
+
+ data = RREG32(mmRLC_SPM_VMID);
+
+ data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK;
+ data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT;
+
+ WREG32(mmRLC_SPM_VMID, data);
+}
+
static const struct amdgpu_rlc_funcs iceland_rlc_funcs = {
.is_rlc_enabled = gfx_v8_0_is_rlc_enabled,
.set_safe_mode = gfx_v8_0_set_safe_mode,
@@ -5600,7 +5638,8 @@ static const struct amdgpu_rlc_funcs iceland_rlc_funcs = {
.resume = gfx_v8_0_rlc_resume,
.stop = gfx_v8_0_rlc_stop,
.reset = gfx_v8_0_rlc_reset,
- .start = gfx_v8_0_rlc_start
+ .start = gfx_v8_0_rlc_start,
+ .update_spm_vmid = gfx_v8_0_update_spm_vmid
};
static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
@@ -6094,7 +6133,7 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
control |= INDIRECT_BUFFER_PRE_ENB(1);
- if (!(ib->flags & AMDGPU_IB_FLAG_CE))
+ if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid)
gfx_v8_0_ring_emit_de_meta(ring);
}
@@ -6236,104 +6275,6 @@ static void gfx_v8_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
}
-static void gfx_v8_0_ring_set_pipe_percent(struct amdgpu_ring *ring,
- bool acquire)
-{
- struct amdgpu_device *adev = ring->adev;
- int pipe_num, tmp, reg;
- int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1;
-
- pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe;
-
- /* first me only has 2 entries, GFX and HP3D */
- if (ring->me > 0)
- pipe_num -= 2;
-
- reg = mmSPI_WCL_PIPE_PERCENT_GFX + pipe_num;
- tmp = RREG32(reg);
- tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent);
- WREG32(reg, tmp);
-}
-
-static void gfx_v8_0_pipe_reserve_resources(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- bool acquire)
-{
- int i, pipe;
- bool reserve;
- struct amdgpu_ring *iring;
-
- mutex_lock(&adev->gfx.pipe_reserve_mutex);
- pipe = amdgpu_gfx_mec_queue_to_bit(adev, ring->me, ring->pipe, 0);
- if (acquire)
- set_bit(pipe, adev->gfx.pipe_reserve_bitmap);
- else
- clear_bit(pipe, adev->gfx.pipe_reserve_bitmap);
-
- if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) {
- /* Clear all reservations - everyone reacquires all resources */
- for (i = 0; i < adev->gfx.num_gfx_rings; ++i)
- gfx_v8_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i],
- true);
-
- for (i = 0; i < adev->gfx.num_compute_rings; ++i)
- gfx_v8_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i],
- true);
- } else {
- /* Lower all pipes without a current reservation */
- for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
- iring = &adev->gfx.gfx_ring[i];
- pipe = amdgpu_gfx_mec_queue_to_bit(adev,
- iring->me,
- iring->pipe,
- 0);
- reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
- gfx_v8_0_ring_set_pipe_percent(iring, reserve);
- }
-
- for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
- iring = &adev->gfx.compute_ring[i];
- pipe = amdgpu_gfx_mec_queue_to_bit(adev,
- iring->me,
- iring->pipe,
- 0);
- reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
- gfx_v8_0_ring_set_pipe_percent(iring, reserve);
- }
- }
-
- mutex_unlock(&adev->gfx.pipe_reserve_mutex);
-}
-
-static void gfx_v8_0_hqd_set_priority(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- bool acquire)
-{
- uint32_t pipe_priority = acquire ? 0x2 : 0x0;
- uint32_t queue_priority = acquire ? 0xf : 0x0;
-
- mutex_lock(&adev->srbm_mutex);
- vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
-
- WREG32(mmCP_HQD_PIPE_PRIORITY, pipe_priority);
- WREG32(mmCP_HQD_QUEUE_PRIORITY, queue_priority);
-
- vi_srbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
-}
-static void gfx_v8_0_ring_set_priority_compute(struct amdgpu_ring *ring,
- enum drm_sched_priority priority)
-{
- struct amdgpu_device *adev = ring->adev;
- bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW;
-
- if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
- return;
-
- gfx_v8_0_hqd_set_priority(adev, ring, acquire);
- gfx_v8_0_pipe_reserve_resources(adev, ring, acquire);
-}
-
static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
u64 addr, u64 seq,
unsigned flags)
@@ -6966,7 +6907,6 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
.test_ib = gfx_v8_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
- .set_priority = gfx_v8_0_ring_set_priority_compute,
.emit_wreg = gfx_v8_0_ring_emit_wreg,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 90f64b8bc358..ba90a14089cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -697,6 +697,11 @@ static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_FIFO_SIZES, 0xffffffff, 0x00000f00),
};
+static const struct soc15_reg_rlcg rlcg_access_gc_9_0[] = {
+ {SOC15_REG_ENTRY(GC, 0, mmGRBM_GFX_INDEX)},
+ {SOC15_REG_ENTRY(GC, 0, mmSQ_IND_INDEX)},
+};
+
static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
{
mmRLC_SRM_INDEX_CNTL_ADDR_0 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
@@ -721,6 +726,59 @@ static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
};
+void gfx_v9_0_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v)
+{
+ static void *scratch_reg0;
+ static void *scratch_reg1;
+ static void *scratch_reg2;
+ static void *scratch_reg3;
+ static void *spare_int;
+ static uint32_t grbm_cntl;
+ static uint32_t grbm_idx;
+
+ scratch_reg0 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0)*4;
+ scratch_reg1 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1)*4;
+ scratch_reg2 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2)*4;
+ scratch_reg3 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3)*4;
+ spare_int = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT)*4;
+
+ grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL;
+ grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX;
+
+ if (amdgpu_sriov_runtime(adev)) {
+ pr_err("shouldn't call rlcg write register during runtime\n");
+ return;
+ }
+
+ if (offset == grbm_cntl || offset == grbm_idx) {
+ if (offset == grbm_cntl)
+ writel(v, scratch_reg2);
+ else if (offset == grbm_idx)
+ writel(v, scratch_reg3);
+
+ writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
+ } else {
+ uint32_t i = 0;
+ uint32_t retries = 50000;
+
+ writel(v, scratch_reg0);
+ writel(offset | 0x80000000, scratch_reg1);
+ writel(1, spare_int);
+ for (i = 0; i < retries; i++) {
+ u32 tmp;
+
+ tmp = readl(scratch_reg1);
+ if (!(tmp & 0x80000000))
+ break;
+
+ udelay(10);
+ }
+ if (i >= retries)
+ pr_err("timeout: rlcg program reg:0x%05x failed !\n", offset);
+ }
+
+}
+
#define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
#define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
#define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
@@ -738,9 +796,9 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring);
static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status);
-static void gfx_v9_0_clear_ras_edc_counter(struct amdgpu_device *adev);
static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
void *inject_if);
+static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev);
static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring,
uint64_t queue_mask)
@@ -1106,10 +1164,11 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
adev->gfx.me_fw_write_wait = false;
adev->gfx.mec_fw_write_wait = false;
- if ((adev->gfx.mec_fw_version < 0x000001a5) ||
+ if ((adev->asic_type != CHIP_ARCTURUS) &&
+ ((adev->gfx.mec_fw_version < 0x000001a5) ||
(adev->gfx.mec_feature_version < 46) ||
(adev->gfx.pfp_fw_version < 0x000000b7) ||
- (adev->gfx.pfp_feature_version < 46))
+ (adev->gfx.pfp_feature_version < 46)))
DRM_WARN_ONCE("CP firmware version too old, please update!");
switch (adev->asic_type) {
@@ -1193,6 +1252,14 @@ static bool gfx_v9_0_should_disable_gfxoff(struct pci_dev *pdev)
return false;
}
+static bool is_raven_kicker(struct amdgpu_device *adev)
+{
+ if (adev->pm.fw_version >= 0x41e2b)
+ return true;
+ else
+ return false;
+}
+
static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
{
if (gfx_v9_0_should_disable_gfxoff(adev->pdev))
@@ -1205,9 +1272,8 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
break;
case CHIP_RAVEN:
if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) &&
- ((adev->gfx.rlc_fw_version != 106 &&
+ ((!is_raven_kicker(adev) &&
adev->gfx.rlc_fw_version < 531) ||
- (adev->gfx.rlc_fw_version == 53815) ||
(adev->gfx.rlc_feature_version < 1) ||
!adev->gfx.rlc.is_rlc_v2_1))
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
@@ -1839,6 +1905,10 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
break;
}
+ /* init spm vmid with 0xf */
+ if (adev->gfx.rlc.funcs->update_spm_vmid)
+ adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
+
return 0;
}
@@ -1909,7 +1979,7 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
{
- WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
+ WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
(address << SQ_IND_INDEX__INDEX__SHIFT) |
@@ -1921,7 +1991,7 @@ static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
uint32_t wave, uint32_t thread,
uint32_t regno, uint32_t num, uint32_t *out)
{
- WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
+ WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
(regno << SQ_IND_INDEX__INDEX__SHIFT) |
@@ -1985,7 +2055,8 @@ static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
.read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
.select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
.ras_error_inject = &gfx_v9_0_ras_error_inject,
- .query_ras_error_count = &gfx_v9_0_query_ras_error_count
+ .query_ras_error_count = &gfx_v9_0_query_ras_error_count,
+ .reset_ras_error_count = &gfx_v9_0_reset_ras_error_count,
};
static const struct amdgpu_gfx_funcs gfx_v9_4_gfx_funcs = {
@@ -1996,7 +2067,8 @@ static const struct amdgpu_gfx_funcs gfx_v9_4_gfx_funcs = {
.read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
.select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
.ras_error_inject = &gfx_v9_4_ras_error_inject,
- .query_ras_error_count = &gfx_v9_4_query_ras_error_count
+ .query_ras_error_count = &gfx_v9_4_query_ras_error_count,
+ .reset_ras_error_count = &gfx_v9_4_reset_ras_error_count,
};
static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
@@ -3302,6 +3374,22 @@ static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
}
+static void gfx_v9_0_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
+ if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) {
+ mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
+ ring->has_high_prio = true;
+ mqd->cp_hqd_queue_priority =
+ AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
+ } else {
+ ring->has_high_prio = false;
+ }
+ }
+}
+
static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
@@ -3438,6 +3526,10 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
mqd->cp_hqd_ib_control = tmp;
+ /* set static priority for a queue/ring */
+ gfx_v9_0_mqd_set_priority(ring, mqd);
+ mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
+
/* map_queues packet doesn't need activate the queue,
* so only kiq need set this field.
*/
@@ -3656,6 +3748,7 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
/* reset ring buffer */
ring->wptr = 0;
+ atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
amdgpu_ring_clear_ring(ring);
} else {
amdgpu_ring_clear_ring(ring);
@@ -3955,28 +4048,78 @@ static int gfx_v9_0_soft_reset(void *handle)
return 0;
}
+static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
+{
+ signed long r, cnt = 0;
+ unsigned long flags;
+ uint32_t seq;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_ring *ring = &kiq->ring;
+
+ BUG_ON(!ring->funcs->emit_rreg);
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+ amdgpu_ring_alloc(ring, 32);
+ amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
+ amdgpu_ring_write(ring, 9 | /* src: register*/
+ (5 << 8) | /* dst: memory */
+ (1 << 16) | /* count sel */
+ (1 << 20)); /* write confirm */
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
+ kiq->reg_val_offs * 4));
+ amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
+ kiq->reg_val_offs * 4));
+ amdgpu_fence_emit_polling(ring, &seq);
+ amdgpu_ring_commit(ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+ /* don't wait anymore for gpu reset case because this way may
+ * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
+ * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
+ * never return if we keep waiting in virt_kiq_rreg, which cause
+ * gpu_recover() hang there.
+ *
+ * also don't wait anymore for IRQ context
+ * */
+ if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
+ goto failed_kiq_read;
+
+ might_sleep();
+ while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+ msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+ }
+
+ if (cnt > MAX_KIQ_REG_TRY)
+ goto failed_kiq_read;
+
+ return (uint64_t)adev->wb.wb[kiq->reg_val_offs] |
+ (uint64_t)adev->wb.wb[kiq->reg_val_offs + 1 ] << 32ULL;
+
+failed_kiq_read:
+ pr_err("failed to read gpu clock\n");
+ return ~0;
+}
+
static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
{
uint64_t clock;
+ amdgpu_gfx_off_ctrl(adev, false);
mutex_lock(&adev->gfx.gpu_clock_mutex);
if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) {
- uint32_t tmp, lsb, msb, i = 0;
- do {
- if (i != 0)
- udelay(1);
- tmp = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_MSB);
- lsb = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_LSB);
- msb = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_MSB);
- i++;
- } while (unlikely(tmp != msb) && (i < adev->usec_timeout));
- clock = (uint64_t)lsb | ((uint64_t)msb << 32ULL);
+ clock = gfx_v9_0_kiq_read_clock(adev);
} else {
WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
}
mutex_unlock(&adev->gfx.gpu_clock_mutex);
+ amdgpu_gfx_off_ctrl(adev, true);
return clock;
}
@@ -4043,6 +4186,101 @@ static const u32 sgpr_init_compute_shader[] =
0xbe800080, 0xbf810000,
};
+static const u32 vgpr_init_compute_shader_arcturus[] = {
+ 0xd3d94000, 0x18000080, 0xd3d94001, 0x18000080, 0xd3d94002, 0x18000080,
+ 0xd3d94003, 0x18000080, 0xd3d94004, 0x18000080, 0xd3d94005, 0x18000080,
+ 0xd3d94006, 0x18000080, 0xd3d94007, 0x18000080, 0xd3d94008, 0x18000080,
+ 0xd3d94009, 0x18000080, 0xd3d9400a, 0x18000080, 0xd3d9400b, 0x18000080,
+ 0xd3d9400c, 0x18000080, 0xd3d9400d, 0x18000080, 0xd3d9400e, 0x18000080,
+ 0xd3d9400f, 0x18000080, 0xd3d94010, 0x18000080, 0xd3d94011, 0x18000080,
+ 0xd3d94012, 0x18000080, 0xd3d94013, 0x18000080, 0xd3d94014, 0x18000080,
+ 0xd3d94015, 0x18000080, 0xd3d94016, 0x18000080, 0xd3d94017, 0x18000080,
+ 0xd3d94018, 0x18000080, 0xd3d94019, 0x18000080, 0xd3d9401a, 0x18000080,
+ 0xd3d9401b, 0x18000080, 0xd3d9401c, 0x18000080, 0xd3d9401d, 0x18000080,
+ 0xd3d9401e, 0x18000080, 0xd3d9401f, 0x18000080, 0xd3d94020, 0x18000080,
+ 0xd3d94021, 0x18000080, 0xd3d94022, 0x18000080, 0xd3d94023, 0x18000080,
+ 0xd3d94024, 0x18000080, 0xd3d94025, 0x18000080, 0xd3d94026, 0x18000080,
+ 0xd3d94027, 0x18000080, 0xd3d94028, 0x18000080, 0xd3d94029, 0x18000080,
+ 0xd3d9402a, 0x18000080, 0xd3d9402b, 0x18000080, 0xd3d9402c, 0x18000080,
+ 0xd3d9402d, 0x18000080, 0xd3d9402e, 0x18000080, 0xd3d9402f, 0x18000080,
+ 0xd3d94030, 0x18000080, 0xd3d94031, 0x18000080, 0xd3d94032, 0x18000080,
+ 0xd3d94033, 0x18000080, 0xd3d94034, 0x18000080, 0xd3d94035, 0x18000080,
+ 0xd3d94036, 0x18000080, 0xd3d94037, 0x18000080, 0xd3d94038, 0x18000080,
+ 0xd3d94039, 0x18000080, 0xd3d9403a, 0x18000080, 0xd3d9403b, 0x18000080,
+ 0xd3d9403c, 0x18000080, 0xd3d9403d, 0x18000080, 0xd3d9403e, 0x18000080,
+ 0xd3d9403f, 0x18000080, 0xd3d94040, 0x18000080, 0xd3d94041, 0x18000080,
+ 0xd3d94042, 0x18000080, 0xd3d94043, 0x18000080, 0xd3d94044, 0x18000080,
+ 0xd3d94045, 0x18000080, 0xd3d94046, 0x18000080, 0xd3d94047, 0x18000080,
+ 0xd3d94048, 0x18000080, 0xd3d94049, 0x18000080, 0xd3d9404a, 0x18000080,
+ 0xd3d9404b, 0x18000080, 0xd3d9404c, 0x18000080, 0xd3d9404d, 0x18000080,
+ 0xd3d9404e, 0x18000080, 0xd3d9404f, 0x18000080, 0xd3d94050, 0x18000080,
+ 0xd3d94051, 0x18000080, 0xd3d94052, 0x18000080, 0xd3d94053, 0x18000080,
+ 0xd3d94054, 0x18000080, 0xd3d94055, 0x18000080, 0xd3d94056, 0x18000080,
+ 0xd3d94057, 0x18000080, 0xd3d94058, 0x18000080, 0xd3d94059, 0x18000080,
+ 0xd3d9405a, 0x18000080, 0xd3d9405b, 0x18000080, 0xd3d9405c, 0x18000080,
+ 0xd3d9405d, 0x18000080, 0xd3d9405e, 0x18000080, 0xd3d9405f, 0x18000080,
+ 0xd3d94060, 0x18000080, 0xd3d94061, 0x18000080, 0xd3d94062, 0x18000080,
+ 0xd3d94063, 0x18000080, 0xd3d94064, 0x18000080, 0xd3d94065, 0x18000080,
+ 0xd3d94066, 0x18000080, 0xd3d94067, 0x18000080, 0xd3d94068, 0x18000080,
+ 0xd3d94069, 0x18000080, 0xd3d9406a, 0x18000080, 0xd3d9406b, 0x18000080,
+ 0xd3d9406c, 0x18000080, 0xd3d9406d, 0x18000080, 0xd3d9406e, 0x18000080,
+ 0xd3d9406f, 0x18000080, 0xd3d94070, 0x18000080, 0xd3d94071, 0x18000080,
+ 0xd3d94072, 0x18000080, 0xd3d94073, 0x18000080, 0xd3d94074, 0x18000080,
+ 0xd3d94075, 0x18000080, 0xd3d94076, 0x18000080, 0xd3d94077, 0x18000080,
+ 0xd3d94078, 0x18000080, 0xd3d94079, 0x18000080, 0xd3d9407a, 0x18000080,
+ 0xd3d9407b, 0x18000080, 0xd3d9407c, 0x18000080, 0xd3d9407d, 0x18000080,
+ 0xd3d9407e, 0x18000080, 0xd3d9407f, 0x18000080, 0xd3d94080, 0x18000080,
+ 0xd3d94081, 0x18000080, 0xd3d94082, 0x18000080, 0xd3d94083, 0x18000080,
+ 0xd3d94084, 0x18000080, 0xd3d94085, 0x18000080, 0xd3d94086, 0x18000080,
+ 0xd3d94087, 0x18000080, 0xd3d94088, 0x18000080, 0xd3d94089, 0x18000080,
+ 0xd3d9408a, 0x18000080, 0xd3d9408b, 0x18000080, 0xd3d9408c, 0x18000080,
+ 0xd3d9408d, 0x18000080, 0xd3d9408e, 0x18000080, 0xd3d9408f, 0x18000080,
+ 0xd3d94090, 0x18000080, 0xd3d94091, 0x18000080, 0xd3d94092, 0x18000080,
+ 0xd3d94093, 0x18000080, 0xd3d94094, 0x18000080, 0xd3d94095, 0x18000080,
+ 0xd3d94096, 0x18000080, 0xd3d94097, 0x18000080, 0xd3d94098, 0x18000080,
+ 0xd3d94099, 0x18000080, 0xd3d9409a, 0x18000080, 0xd3d9409b, 0x18000080,
+ 0xd3d9409c, 0x18000080, 0xd3d9409d, 0x18000080, 0xd3d9409e, 0x18000080,
+ 0xd3d9409f, 0x18000080, 0xd3d940a0, 0x18000080, 0xd3d940a1, 0x18000080,
+ 0xd3d940a2, 0x18000080, 0xd3d940a3, 0x18000080, 0xd3d940a4, 0x18000080,
+ 0xd3d940a5, 0x18000080, 0xd3d940a6, 0x18000080, 0xd3d940a7, 0x18000080,
+ 0xd3d940a8, 0x18000080, 0xd3d940a9, 0x18000080, 0xd3d940aa, 0x18000080,
+ 0xd3d940ab, 0x18000080, 0xd3d940ac, 0x18000080, 0xd3d940ad, 0x18000080,
+ 0xd3d940ae, 0x18000080, 0xd3d940af, 0x18000080, 0xd3d940b0, 0x18000080,
+ 0xd3d940b1, 0x18000080, 0xd3d940b2, 0x18000080, 0xd3d940b3, 0x18000080,
+ 0xd3d940b4, 0x18000080, 0xd3d940b5, 0x18000080, 0xd3d940b6, 0x18000080,
+ 0xd3d940b7, 0x18000080, 0xd3d940b8, 0x18000080, 0xd3d940b9, 0x18000080,
+ 0xd3d940ba, 0x18000080, 0xd3d940bb, 0x18000080, 0xd3d940bc, 0x18000080,
+ 0xd3d940bd, 0x18000080, 0xd3d940be, 0x18000080, 0xd3d940bf, 0x18000080,
+ 0xd3d940c0, 0x18000080, 0xd3d940c1, 0x18000080, 0xd3d940c2, 0x18000080,
+ 0xd3d940c3, 0x18000080, 0xd3d940c4, 0x18000080, 0xd3d940c5, 0x18000080,
+ 0xd3d940c6, 0x18000080, 0xd3d940c7, 0x18000080, 0xd3d940c8, 0x18000080,
+ 0xd3d940c9, 0x18000080, 0xd3d940ca, 0x18000080, 0xd3d940cb, 0x18000080,
+ 0xd3d940cc, 0x18000080, 0xd3d940cd, 0x18000080, 0xd3d940ce, 0x18000080,
+ 0xd3d940cf, 0x18000080, 0xd3d940d0, 0x18000080, 0xd3d940d1, 0x18000080,
+ 0xd3d940d2, 0x18000080, 0xd3d940d3, 0x18000080, 0xd3d940d4, 0x18000080,
+ 0xd3d940d5, 0x18000080, 0xd3d940d6, 0x18000080, 0xd3d940d7, 0x18000080,
+ 0xd3d940d8, 0x18000080, 0xd3d940d9, 0x18000080, 0xd3d940da, 0x18000080,
+ 0xd3d940db, 0x18000080, 0xd3d940dc, 0x18000080, 0xd3d940dd, 0x18000080,
+ 0xd3d940de, 0x18000080, 0xd3d940df, 0x18000080, 0xd3d940e0, 0x18000080,
+ 0xd3d940e1, 0x18000080, 0xd3d940e2, 0x18000080, 0xd3d940e3, 0x18000080,
+ 0xd3d940e4, 0x18000080, 0xd3d940e5, 0x18000080, 0xd3d940e6, 0x18000080,
+ 0xd3d940e7, 0x18000080, 0xd3d940e8, 0x18000080, 0xd3d940e9, 0x18000080,
+ 0xd3d940ea, 0x18000080, 0xd3d940eb, 0x18000080, 0xd3d940ec, 0x18000080,
+ 0xd3d940ed, 0x18000080, 0xd3d940ee, 0x18000080, 0xd3d940ef, 0x18000080,
+ 0xd3d940f0, 0x18000080, 0xd3d940f1, 0x18000080, 0xd3d940f2, 0x18000080,
+ 0xd3d940f3, 0x18000080, 0xd3d940f4, 0x18000080, 0xd3d940f5, 0x18000080,
+ 0xd3d940f6, 0x18000080, 0xd3d940f7, 0x18000080, 0xd3d940f8, 0x18000080,
+ 0xd3d940f9, 0x18000080, 0xd3d940fa, 0x18000080, 0xd3d940fb, 0x18000080,
+ 0xd3d940fc, 0x18000080, 0xd3d940fd, 0x18000080, 0xd3d940fe, 0x18000080,
+ 0xd3d940ff, 0x18000080, 0xb07c0000, 0xbe8a00ff, 0x000000f8, 0xbf11080a,
+ 0x7e000280, 0x7e020280, 0x7e040280, 0x7e060280, 0x7e080280, 0x7e0a0280,
+ 0x7e0c0280, 0x7e0e0280, 0x808a880a, 0xbe80320a, 0xbf84fff5, 0xbf9c0000,
+ 0xd28c0001, 0x0001007f, 0xd28d0001, 0x0002027e, 0x10020288, 0xb88b0904,
+ 0xb78b4000, 0xd1196a01, 0x00001701, 0xbe8a0087, 0xbefc00c1, 0xd89c4000,
+ 0x00020201, 0xd89cc080, 0x00040401, 0x320202ff, 0x00000800, 0x808a810a,
+ 0xbf84fff8, 0xbf810000,
+};
+
/* When below register arrays changed, please update gpr_reg_size,
and sec_ded_counter_reg_size in function gfx_v9_0_do_edc_gpr_workarounds,
to cover all gfx9 ASICs */
@@ -4063,6 +4301,23 @@ static const struct soc15_reg_entry vgpr_init_regs[] = {
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
};
+static const struct soc15_reg_entry vgpr_init_regs_arcturus[] = {
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x81 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 }, /* 64KB LDS */
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
+};
+
static const struct soc15_reg_entry sgpr1_init_regs[] = {
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
@@ -4131,7 +4386,6 @@ static const struct soc15_reg_entry gfx_v9_0_edc_counter_regs[] = {
{ SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16},
{ SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2},
{ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6},
- { SOC15_REG_ENTRY(HDP, 0, mmHDP_EDC_CNT), 0, 1, 1},
};
static int gfx_v9_0_do_edc_gds_workarounds(struct amdgpu_device *adev)
@@ -4194,7 +4448,10 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
adev->gfx.config.max_cu_per_sh *
adev->gfx.config.max_sh_per_se;
int sgpr_work_group_size = 5;
- int gpr_reg_size = compute_dim_x / 16 + 6;
+ int gpr_reg_size = adev->gfx.config.max_shader_engines + 6;
+ int vgpr_init_shader_size;
+ const u32 *vgpr_init_shader_ptr;
+ const struct soc15_reg_entry *vgpr_init_regs_ptr;
/* only support when RAS is enabled */
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
@@ -4204,6 +4461,16 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
if (!ring->sched.ready)
return 0;
+ if (adev->asic_type == CHIP_ARCTURUS) {
+ vgpr_init_shader_ptr = vgpr_init_compute_shader_arcturus;
+ vgpr_init_shader_size = sizeof(vgpr_init_compute_shader_arcturus);
+ vgpr_init_regs_ptr = vgpr_init_regs_arcturus;
+ } else {
+ vgpr_init_shader_ptr = vgpr_init_compute_shader;
+ vgpr_init_shader_size = sizeof(vgpr_init_compute_shader);
+ vgpr_init_regs_ptr = vgpr_init_regs;
+ }
+
total_size =
(gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* VGPRS */
total_size +=
@@ -4212,7 +4479,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
(gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS2 */
total_size = ALIGN(total_size, 256);
vgpr_offset = total_size;
- total_size += ALIGN(sizeof(vgpr_init_compute_shader), 256);
+ total_size += ALIGN(vgpr_init_shader_size, 256);
sgpr_offset = total_size;
total_size += sizeof(sgpr_init_compute_shader);
@@ -4225,8 +4492,8 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
}
/* load the compute shaders */
- for (i = 0; i < ARRAY_SIZE(vgpr_init_compute_shader); i++)
- ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_compute_shader[i];
+ for (i = 0; i < vgpr_init_shader_size/sizeof(u32); i++)
+ ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_shader_ptr[i];
for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++)
ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i];
@@ -4238,9 +4505,9 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
/* write the register state for the compute dispatch */
for (i = 0; i < gpr_reg_size; i++) {
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
- ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs[i])
+ ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs_ptr[i])
- PACKET3_SET_SH_REG_START;
- ib.ptr[ib.length_dw++] = vgpr_init_regs[i].reg_value;
+ ib.ptr[ib.length_dw++] = vgpr_init_regs_ptr[i].reg_value;
}
/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8;
@@ -4252,7 +4519,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
/* write dispatch packet */
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
- ib.ptr[ib.length_dw++] = compute_dim_x; /* x */
+ ib.ptr[ib.length_dw++] = compute_dim_x * 2; /* x */
ib.ptr[ib.length_dw++] = 1; /* y */
ib.ptr[ib.length_dw++] = 1; /* z */
ib.ptr[ib.length_dw++] =
@@ -4332,18 +4599,6 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
goto fail;
}
- switch (adev->asic_type)
- {
- case CHIP_VEGA20:
- gfx_v9_0_clear_ras_edc_counter(adev);
- break;
- case CHIP_ARCTURUS:
- gfx_v9_4_clear_ras_edc_counter(adev);
- break;
- default:
- break;
- }
-
fail:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
@@ -4374,15 +4629,27 @@ static int gfx_v9_0_ecc_late_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- r = gfx_v9_0_do_edc_gds_workarounds(adev);
- if (r)
- return r;
+ /*
+ * Temp workaround to fix the issue that CP firmware fails to
+ * update read pointer when CPDMA is writing clearing operation
+ * to GDS in suspend/resume sequence on several cards. So just
+ * limit this operation in cold boot sequence.
+ */
+ if (!adev->in_suspend) {
+ r = gfx_v9_0_do_edc_gds_workarounds(adev);
+ if (r)
+ return r;
+ }
/* requires IBs so do in late init after IB pool is initialized */
r = gfx_v9_0_do_edc_gpr_workarounds(adev);
if (r)
return r;
+ if (adev->gfx.funcs &&
+ adev->gfx.funcs->reset_ras_error_count)
+ adev->gfx.funcs->reset_ras_error_count(adev);
+
r = amdgpu_gfx_ras_late_init(adev);
if (r)
return r;
@@ -4687,6 +4954,47 @@ static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
return 0;
}
+static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
+{
+ u32 data;
+
+ data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL);
+
+ data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
+ data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
+
+ WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
+}
+
+static bool gfx_v9_0_check_rlcg_range(struct amdgpu_device *adev,
+ uint32_t offset,
+ struct soc15_reg_rlcg *entries, int arr_size)
+{
+ int i;
+ uint32_t reg;
+
+ if (!entries)
+ return false;
+
+ for (i = 0; i < arr_size; i++) {
+ const struct soc15_reg_rlcg *entry;
+
+ entry = &entries[i];
+ reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
+ if (offset == reg)
+ return true;
+ }
+
+ return false;
+}
+
+static bool gfx_v9_0_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset)
+{
+ return gfx_v9_0_check_rlcg_range(adev, offset,
+ (void *)rlcg_access_gc_9_0,
+ ARRAY_SIZE(rlcg_access_gc_9_0));
+}
+
static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
.is_rlc_enabled = gfx_v9_0_is_rlc_enabled,
.set_safe_mode = gfx_v9_0_set_safe_mode,
@@ -4698,7 +5006,10 @@ static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
.resume = gfx_v9_0_rlc_resume,
.stop = gfx_v9_0_rlc_stop,
.reset = gfx_v9_0_rlc_reset,
- .start = gfx_v9_0_rlc_start
+ .start = gfx_v9_0_rlc_start,
+ .update_spm_vmid = gfx_v9_0_update_spm_vmid,
+ .rlcg_wreg = gfx_v9_0_rlcg_wreg,
+ .is_rlcg_access_range = gfx_v9_0_is_rlcg_access_range,
};
static int gfx_v9_0_set_powergating_state(void *handle,
@@ -4901,7 +5212,7 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
control |= INDIRECT_BUFFER_PRE_ENB(1);
- if (!(ib->flags & AMDGPU_IB_FLAG_CE))
+ if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid)
gfx_v9_0_ring_emit_de_meta(ring);
}
@@ -5026,105 +5337,6 @@ static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
return wptr;
}
-static void gfx_v9_0_ring_set_pipe_percent(struct amdgpu_ring *ring,
- bool acquire)
-{
- struct amdgpu_device *adev = ring->adev;
- int pipe_num, tmp, reg;
- int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1;
-
- pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe;
-
- /* first me only has 2 entries, GFX and HP3D */
- if (ring->me > 0)
- pipe_num -= 2;
-
- reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX) + pipe_num;
- tmp = RREG32(reg);
- tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent);
- WREG32(reg, tmp);
-}
-
-static void gfx_v9_0_pipe_reserve_resources(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- bool acquire)
-{
- int i, pipe;
- bool reserve;
- struct amdgpu_ring *iring;
-
- mutex_lock(&adev->gfx.pipe_reserve_mutex);
- pipe = amdgpu_gfx_mec_queue_to_bit(adev, ring->me, ring->pipe, 0);
- if (acquire)
- set_bit(pipe, adev->gfx.pipe_reserve_bitmap);
- else
- clear_bit(pipe, adev->gfx.pipe_reserve_bitmap);
-
- if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) {
- /* Clear all reservations - everyone reacquires all resources */
- for (i = 0; i < adev->gfx.num_gfx_rings; ++i)
- gfx_v9_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i],
- true);
-
- for (i = 0; i < adev->gfx.num_compute_rings; ++i)
- gfx_v9_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i],
- true);
- } else {
- /* Lower all pipes without a current reservation */
- for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
- iring = &adev->gfx.gfx_ring[i];
- pipe = amdgpu_gfx_mec_queue_to_bit(adev,
- iring->me,
- iring->pipe,
- 0);
- reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
- gfx_v9_0_ring_set_pipe_percent(iring, reserve);
- }
-
- for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
- iring = &adev->gfx.compute_ring[i];
- pipe = amdgpu_gfx_mec_queue_to_bit(adev,
- iring->me,
- iring->pipe,
- 0);
- reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
- gfx_v9_0_ring_set_pipe_percent(iring, reserve);
- }
- }
-
- mutex_unlock(&adev->gfx.pipe_reserve_mutex);
-}
-
-static void gfx_v9_0_hqd_set_priority(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- bool acquire)
-{
- uint32_t pipe_priority = acquire ? 0x2 : 0x0;
- uint32_t queue_priority = acquire ? 0xf : 0x0;
-
- mutex_lock(&adev->srbm_mutex);
- soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
-
- WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PIPE_PRIORITY, pipe_priority);
- WREG32_SOC15_RLC(GC, 0, mmCP_HQD_QUEUE_PRIORITY, queue_priority);
-
- soc15_grbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
-}
-
-static void gfx_v9_0_ring_set_priority_compute(struct amdgpu_ring *ring,
- enum drm_sched_priority priority)
-{
- struct amdgpu_device *adev = ring->adev;
- bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW;
-
- if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
- return;
-
- gfx_v9_0_hqd_set_priority(adev, ring, acquire);
- gfx_v9_0_pipe_reserve_resources(adev, ring, acquire);
-}
-
static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
@@ -6304,10 +6516,13 @@ static int gfx_v9_0_ras_error_count(const struct soc15_reg_entry *reg,
return 0;
}
-static void gfx_v9_0_clear_ras_edc_counter(struct amdgpu_device *adev)
+static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev)
{
int i, j, k;
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ return;
+
/* read back registers to clear the counters */
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
@@ -6495,7 +6710,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
.test_ib = gfx_v9_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
- .set_priority = gfx_v9_0_ring_set_priority_compute,
.emit_wreg = gfx_v9_0_ring_emit_wreg,
.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
index f099f13d7f1e..cceb46faf212 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
@@ -893,10 +893,13 @@ int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
return 0;
}
-void gfx_v9_4_clear_ras_edc_counter(struct amdgpu_device *adev)
+void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev)
{
int i, j, k;
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ return;
+
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < ARRAY_SIZE(gfx_v9_4_edc_counter_regs); i++) {
for (j = 0; j < gfx_v9_4_edc_counter_regs[i].se_num; j++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
index 2e3f6f755ad4..1ffecc5c0f0a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
@@ -32,4 +32,6 @@ int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev,
void *inject_if);
+void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev);
+
#endif /* __GFX_V9_4_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
index b70c7b483c24..cc866c367939 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
@@ -81,24 +81,31 @@ static void gfxhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev)
{
uint64_t value;
- /* Disable AGP. */
- WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BASE, 0);
- WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_TOP, 0);
- WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BOT, 0x00FFFFFF);
-
- /* Program the system aperture low logical page number. */
- WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_LOW_ADDR,
- adev->gmc.vram_start >> 18);
- WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- adev->gmc.vram_end >> 18);
-
- /* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
- + adev->vm_manager.vram_base_offset;
- WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
- (u32)(value >> 12));
- WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
- (u32)(value >> 44));
+ if (!amdgpu_sriov_vf(adev)) {
+ /*
+ * the new L1 policy will block SRIOV guest from writing
+ * these regs, and they will be programed at host.
+ * so skip programing these regs.
+ */
+ /* Disable AGP. */
+ WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BASE, 0);
+ WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_TOP, 0);
+ WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BOT, 0x00FFFFFF);
+
+ /* Program the system aperture low logical page number. */
+ WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ adev->gmc.vram_start >> 18);
+ WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ adev->gmc.vram_end >> 18);
+
+ /* Set default page address. */
+ value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
+ + adev->vm_manager.vram_base_offset;
+ WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
+ (u32)(value >> 12));
+ WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
+ (u32)(value >> 44));
+ }
/* Program "protection fault". */
WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
@@ -135,6 +142,10 @@ static void gfxhub_v2_0_init_cache_regs(struct amdgpu_device *adev)
{
uint32_t tmp;
+ /* These regs are not accessible for VF, PF will program these in SRIOV */
+ if (amdgpu_sriov_vf(adev))
+ return;
+
/* Setup L2 cache */
tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL);
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, ENABLE_L2_CACHE, 1);
@@ -256,18 +267,6 @@ static void gfxhub_v2_0_program_invalidation(struct amdgpu_device *adev)
int gfxhub_v2_0_gart_enable(struct amdgpu_device *adev)
{
- if (amdgpu_sriov_vf(adev)) {
- /*
- * GCMC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
- * VF copy registers so vbios post doesn't program them, for
- * SRIOV driver need to program them
- */
- WREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_BASE,
- adev->gmc.vram_start >> 24);
- WREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_TOP,
- adev->gmc.vram_end >> 24);
- }
-
/* GART Enable. */
gfxhub_v2_0_init_gart_aperture_regs(adev);
gfxhub_v2_0_init_system_aperture_regs(adev);
@@ -298,9 +297,11 @@ void gfxhub_v2_0_gart_disable(struct amdgpu_device *adev)
ENABLE_ADVANCED_DRIVER_MODEL, 0);
WREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL, tmp);
- /* Setup L2 cache */
- WREG32_FIELD15(GC, 0, GCVM_L2_CNTL, ENABLE_L2_CACHE, 0);
- WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL3, 0);
+ if (!amdgpu_sriov_vf(adev)) {
+ /* Setup L2 cache */
+ WREG32_FIELD15(GC, 0, GCVM_L2_CNTL, ENABLE_L2_CACHE, 0);
+ WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL3, 0);
+ }
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 90216abf14a4..8606f877478f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -476,13 +476,26 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
{
bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
const unsigned eng = 17;
- u32 j, inv_req, tmp;
+ u32 j, inv_req, inv_req2, tmp;
struct amdgpu_vmhub *hub;
BUG_ON(vmhub >= adev->num_vmhubs);
hub = &adev->vmhub[vmhub];
- inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
+ if (adev->gmc.xgmi.num_physical_nodes &&
+ adev->asic_type == CHIP_VEGA20) {
+ /* Vega20+XGMI caches PTEs in TC and TLB. Add a
+ * heavy-weight TLB flush (type 2), which flushes
+ * both. Due to a race condition with concurrent
+ * memory accesses using the same TLB cache line, we
+ * still need a second TLB flush after this.
+ */
+ inv_req = gmc_v9_0_get_invalidate_req(vmid, 2);
+ inv_req2 = gmc_v9_0_get_invalidate_req(vmid, flush_type);
+ } else {
+ inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
+ inv_req2 = 0;
+ }
/* This is necessary for a HW workaround under SRIOV as well
* as GFXOFF under bare metal
@@ -521,21 +534,27 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
}
- WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, inv_req);
+ do {
+ WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, inv_req);
- /*
- * Issue a dummy read to wait for the ACK register to be cleared
- * to avoid a false ACK due to the new fast GRBM interface.
- */
- if (vmhub == AMDGPU_GFXHUB_0)
- RREG32_NO_KIQ(hub->vm_inv_eng0_req + eng);
+ /*
+ * Issue a dummy read to wait for the ACK register to
+ * be cleared to avoid a false ACK due to the new fast
+ * GRBM interface.
+ */
+ if (vmhub == AMDGPU_GFXHUB_0)
+ RREG32_NO_KIQ(hub->vm_inv_eng0_req + eng);
- for (j = 0; j < adev->usec_timeout; j++) {
- tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
- if (tmp & (1 << vmid))
- break;
- udelay(1);
- }
+ for (j = 0; j < adev->usec_timeout; j++) {
+ tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
+ if (tmp & (1 << vmid))
+ break;
+ udelay(1);
+ }
+
+ inv_req = inv_req2;
+ inv_req2 = 0;
+ } while (inv_req);
/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
if (use_semaphore)
@@ -577,9 +596,26 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
return -EIO;
if (ring->sched.ready) {
+ /* Vega20+XGMI caches PTEs in TC and TLB. Add a
+ * heavy-weight TLB flush (type 2), which flushes
+ * both. Due to a race condition with concurrent
+ * memory accesses using the same TLB cache line, we
+ * still need a second TLB flush after this.
+ */
+ bool vega20_xgmi_wa = (adev->gmc.xgmi.num_physical_nodes &&
+ adev->asic_type == CHIP_VEGA20);
+ /* 2 dwords flush + 8 dwords fence */
+ unsigned int ndw = kiq->pmf->invalidate_tlbs_size + 8;
+
+ if (vega20_xgmi_wa)
+ ndw += kiq->pmf->invalidate_tlbs_size;
+
spin_lock(&adev->gfx.kiq.ring_lock);
/* 2 dwords flush + 8 dwords fence */
- amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
+ amdgpu_ring_alloc(ring, ndw);
+ if (vega20_xgmi_wa)
+ kiq->pmf->kiq_invalidate_tlbs(ring,
+ pasid, 2, all_hub);
kiq->pmf->kiq_invalidate_tlbs(ring,
pasid, flush_type, all_hub);
amdgpu_fence_emit_polling(ring, &seq);
@@ -886,32 +922,25 @@ static int gmc_v9_0_late_init(void *handle)
if (r)
return r;
/* Check if ecc is available */
- if (!amdgpu_sriov_vf(adev)) {
- switch (adev->asic_type) {
- case CHIP_VEGA10:
- case CHIP_VEGA20:
- case CHIP_ARCTURUS:
- r = amdgpu_atomfirmware_mem_ecc_supported(adev);
- if (!r) {
- DRM_INFO("ECC is not present.\n");
- if (adev->df.funcs->enable_ecc_force_par_wr_rmw)
- adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
- } else {
- DRM_INFO("ECC is active.\n");
- }
-
- r = amdgpu_atomfirmware_sram_ecc_supported(adev);
- if (!r) {
- DRM_INFO("SRAM ECC is not present.\n");
- } else {
- DRM_INFO("SRAM ECC is active.\n");
- }
- break;
- default:
- break;
- }
+ if (!amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_VEGA10)) {
+ r = amdgpu_atomfirmware_mem_ecc_supported(adev);
+ if (!r) {
+ DRM_INFO("ECC is not present.\n");
+ if (adev->df.funcs->enable_ecc_force_par_wr_rmw)
+ adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
+ } else
+ DRM_INFO("ECC is active.\n");
+
+ r = amdgpu_atomfirmware_sram_ecc_supported(adev);
+ if (!r)
+ DRM_INFO("SRAM ECC is not present.\n");
+ else
+ DRM_INFO("SRAM ECC is active.\n");
}
+ if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count)
+ adev->mmhub.funcs->reset_ras_error_count(adev);
+
r = amdgpu_gmc_ras_late_init(adev);
if (r)
return r;
@@ -1272,6 +1301,19 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
}
/**
+ * gmc_v9_0_restore_registers - restores regs
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * This restores register values, saved at suspend.
+ */
+static void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
+{
+ if (adev->asic_type == CHIP_RAVEN)
+ WREG32(mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
+}
+
+/**
* gmc_v9_0_gart_enable - gart enable
*
* @adev: amdgpu_device pointer
@@ -1377,6 +1419,20 @@ static int gmc_v9_0_hw_init(void *handle)
}
/**
+ * gmc_v9_0_save_registers - saves regs
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * This saves potential register values that should be
+ * restored upon resume
+ */
+static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
+{
+ if (adev->asic_type == CHIP_RAVEN)
+ adev->gmc.sdpif_register = RREG32(mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
+}
+
+/**
* gmc_v9_0_gart_disable - gart disable
*
* @adev: amdgpu_device pointer
@@ -1412,9 +1468,16 @@ static int gmc_v9_0_hw_fini(void *handle)
static int gmc_v9_0_suspend(void *handle)
{
+ int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- return gmc_v9_0_hw_fini(adev);
+ r = gmc_v9_0_hw_fini(adev);
+ if (r)
+ return r;
+
+ gmc_v9_0_save_registers(adev);
+
+ return 0;
}
static int gmc_v9_0_resume(void *handle)
@@ -1422,6 +1485,7 @@ static int gmc_v9_0_resume(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ gmc_v9_0_restore_registers(adev);
r = gmc_v9_0_hw_init(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
index ff2e6e1ccde7..6173951db7b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
@@ -693,7 +693,7 @@ static int jpeg_v2_0_set_clockgating_state(void *handle,
bool enable = (state == AMD_CG_STATE_GATE);
if (enable) {
- if (jpeg_v2_0_is_idle(handle))
+ if (!jpeg_v2_0_is_idle(handle))
return -EBUSY;
jpeg_v2_0_enable_clock_gating(adev);
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index c6d046df4b70..c04c2078a7c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -477,7 +477,7 @@ static int jpeg_v2_5_set_clockgating_state(void *handle,
continue;
if (enable) {
- if (jpeg_v2_5_is_idle(handle))
+ if (!jpeg_v2_5_is_idle(handle))
return -EBUSY;
jpeg_v2_5_enable_clock_gating(adev, i);
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 49a3a56ec017..396c2a624de0 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -747,7 +747,19 @@ static void mmhub_v1_0_query_ras_error_count(struct amdgpu_device *adev,
err_data->ue_count += ded_count;
}
+static void mmhub_v1_0_reset_ras_error_count(struct amdgpu_device *adev)
+{
+ uint32_t i;
+
+ /* read back edc counter registers to reset the counters to 0 */
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) {
+ for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_edc_cnt_regs); i++)
+ RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_0_edc_cnt_regs[i]));
+ }
+}
+
const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = {
.ras_late_init = amdgpu_mmhub_ras_late_init,
.query_ras_error_count = mmhub_v1_0_query_ras_error_count,
+ .reset_ras_error_count = mmhub_v1_0_reset_ras_error_count,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
index bde189680521..fb3f228458e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
@@ -72,11 +72,18 @@ static void mmhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev)
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_TOP, 0);
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_BOT, 0x00FFFFFF);
- /* Program the system aperture low logical page number. */
- WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_LOW_ADDR,
- adev->gmc.vram_start >> 18);
- WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- adev->gmc.vram_end >> 18);
+ if (!amdgpu_sriov_vf(adev)) {
+ /*
+ * the new L1 policy will block SRIOV guest from writing
+ * these regs, and they will be programed at host.
+ * so skip programing these regs.
+ */
+ /* Program the system aperture low logical page number. */
+ WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ adev->gmc.vram_start >> 18);
+ WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ adev->gmc.vram_end >> 18);
+ }
/* Set default page address. */
value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
@@ -247,18 +254,6 @@ static void mmhub_v2_0_program_invalidation(struct amdgpu_device *adev)
int mmhub_v2_0_gart_enable(struct amdgpu_device *adev)
{
- if (amdgpu_sriov_vf(adev)) {
- /*
- * MMMC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
- * VF copy registers so vbios post doesn't program them, for
- * SRIOV driver need to program them
- */
- WREG32_SOC15(MMHUB, 0, mmMMMC_VM_FB_LOCATION_BASE,
- adev->gmc.vram_start >> 24);
- WREG32_SOC15(MMHUB, 0, mmMMMC_VM_FB_LOCATION_TOP,
- adev->gmc.vram_end >> 24);
- }
-
/* GART Enable. */
mmhub_v2_0_init_gart_aperture_regs(adev);
mmhub_v2_0_init_system_aperture_regs(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index a5281df8d84f..0d413fabd015 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -1596,7 +1596,19 @@ static void mmhub_v9_4_query_ras_error_count(struct amdgpu_device *adev,
err_data->ue_count += ded_count;
}
+static void mmhub_v9_4_reset_ras_error_count(struct amdgpu_device *adev)
+{
+ uint32_t i;
+
+ /* read back edc counter registers to reset the counters to 0 */
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) {
+ for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_edc_cnt_regs); i++)
+ RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v9_4_edc_cnt_regs[i]));
+ }
+}
+
const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs = {
.ras_late_init = amdgpu_mmhub_ras_late_init,
.query_ras_error_count = mmhub_v9_4_query_ras_error_count,
+ .reset_ras_error_count = mmhub_v9_4_reset_ras_error_count,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mmsch_v2_0.h b/drivers/gpu/drm/amd/amdgpu/mmsch_v2_0.h
new file mode 100644
index 000000000000..1b5086c7d4e6
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/mmsch_v2_0.h
@@ -0,0 +1,338 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __MMSCH_V2_0_H__
+#define __MMSCH_V2_0_H__
+
+// addressBlock: uvd0_mmsch_dec
+// base address: 0x1e000
+#define mmMMSCH_UCODE_ADDR 0x0000
+#define mmMMSCH_UCODE_ADDR_BASE_IDX 0
+#define mmMMSCH_UCODE_DATA 0x0001
+#define mmMMSCH_UCODE_DATA_BASE_IDX 0
+#define mmMMSCH_SRAM_ADDR 0x0002
+#define mmMMSCH_SRAM_ADDR_BASE_IDX 0
+#define mmMMSCH_SRAM_DATA 0x0003
+#define mmMMSCH_SRAM_DATA_BASE_IDX 0
+#define mmMMSCH_VF_SRAM_OFFSET 0x0004
+#define mmMMSCH_VF_SRAM_OFFSET_BASE_IDX 0
+#define mmMMSCH_DB_SRAM_OFFSET 0x0005
+#define mmMMSCH_DB_SRAM_OFFSET_BASE_IDX 0
+#define mmMMSCH_CTX_SRAM_OFFSET 0x0006
+#define mmMMSCH_CTX_SRAM_OFFSET_BASE_IDX 0
+#define mmMMSCH_CTL 0x0007
+#define mmMMSCH_CTL_BASE_IDX 0
+#define mmMMSCH_INTR 0x0008
+#define mmMMSCH_INTR_BASE_IDX 0
+#define mmMMSCH_INTR_ACK 0x0009
+#define mmMMSCH_INTR_ACK_BASE_IDX 0
+#define mmMMSCH_INTR_STATUS 0x000a
+#define mmMMSCH_INTR_STATUS_BASE_IDX 0
+#define mmMMSCH_VF_VMID 0x000b
+#define mmMMSCH_VF_VMID_BASE_IDX 0
+#define mmMMSCH_VF_CTX_ADDR_LO 0x000c
+#define mmMMSCH_VF_CTX_ADDR_LO_BASE_IDX 0
+#define mmMMSCH_VF_CTX_ADDR_HI 0x000d
+#define mmMMSCH_VF_CTX_ADDR_HI_BASE_IDX 0
+#define mmMMSCH_VF_CTX_SIZE 0x000e
+#define mmMMSCH_VF_CTX_SIZE_BASE_IDX 0
+#define mmMMSCH_VF_GPCOM_ADDR_LO 0x000f
+#define mmMMSCH_VF_GPCOM_ADDR_LO_BASE_IDX 0
+#define mmMMSCH_VF_GPCOM_ADDR_HI 0x0010
+#define mmMMSCH_VF_GPCOM_ADDR_HI_BASE_IDX 0
+#define mmMMSCH_VF_GPCOM_SIZE 0x0011
+#define mmMMSCH_VF_GPCOM_SIZE_BASE_IDX 0
+#define mmMMSCH_VF_MAILBOX_HOST 0x0012
+#define mmMMSCH_VF_MAILBOX_HOST_BASE_IDX 0
+#define mmMMSCH_VF_MAILBOX_RESP 0x0013
+#define mmMMSCH_VF_MAILBOX_RESP_BASE_IDX 0
+#define mmMMSCH_VF_MAILBOX_0 0x0014
+#define mmMMSCH_VF_MAILBOX_0_BASE_IDX 0
+#define mmMMSCH_VF_MAILBOX_0_RESP 0x0015
+#define mmMMSCH_VF_MAILBOX_0_RESP_BASE_IDX 0
+#define mmMMSCH_VF_MAILBOX_1 0x0016
+#define mmMMSCH_VF_MAILBOX_1_BASE_IDX 0
+#define mmMMSCH_VF_MAILBOX_1_RESP 0x0017
+#define mmMMSCH_VF_MAILBOX_1_RESP_BASE_IDX 0
+#define mmMMSCH_CNTL 0x001c
+#define mmMMSCH_CNTL_BASE_IDX 0
+#define mmMMSCH_NONCACHE_OFFSET0 0x001d
+#define mmMMSCH_NONCACHE_OFFSET0_BASE_IDX 0
+#define mmMMSCH_NONCACHE_SIZE0 0x001e
+#define mmMMSCH_NONCACHE_SIZE0_BASE_IDX 0
+#define mmMMSCH_NONCACHE_OFFSET1 0x001f
+#define mmMMSCH_NONCACHE_OFFSET1_BASE_IDX 0
+#define mmMMSCH_NONCACHE_SIZE1 0x0020
+#define mmMMSCH_NONCACHE_SIZE1_BASE_IDX 0
+#define mmMMSCH_PDEBUG_STATUS 0x0021
+#define mmMMSCH_PDEBUG_STATUS_BASE_IDX 0
+#define mmMMSCH_PDEBUG_DATA_32UPPERBITS 0x0022
+#define mmMMSCH_PDEBUG_DATA_32UPPERBITS_BASE_IDX 0
+#define mmMMSCH_PDEBUG_DATA_32LOWERBITS 0x0023
+#define mmMMSCH_PDEBUG_DATA_32LOWERBITS_BASE_IDX 0
+#define mmMMSCH_PDEBUG_EPC 0x0024
+#define mmMMSCH_PDEBUG_EPC_BASE_IDX 0
+#define mmMMSCH_PDEBUG_EXCCAUSE 0x0025
+#define mmMMSCH_PDEBUG_EXCCAUSE_BASE_IDX 0
+#define mmMMSCH_PROC_STATE1 0x0026
+#define mmMMSCH_PROC_STATE1_BASE_IDX 0
+#define mmMMSCH_LAST_MC_ADDR 0x0027
+#define mmMMSCH_LAST_MC_ADDR_BASE_IDX 0
+#define mmMMSCH_LAST_MEM_ACCESS_HI 0x0028
+#define mmMMSCH_LAST_MEM_ACCESS_HI_BASE_IDX 0
+#define mmMMSCH_LAST_MEM_ACCESS_LO 0x0029
+#define mmMMSCH_LAST_MEM_ACCESS_LO_BASE_IDX 0
+#define mmMMSCH_IOV_ACTIVE_FCN_ID 0x002a
+#define mmMMSCH_IOV_ACTIVE_FCN_ID_BASE_IDX 0
+#define mmMMSCH_SCRATCH_0 0x002b
+#define mmMMSCH_SCRATCH_0_BASE_IDX 0
+#define mmMMSCH_SCRATCH_1 0x002c
+#define mmMMSCH_SCRATCH_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_SCH_BLOCK_0 0x002d
+#define mmMMSCH_GPUIOV_SCH_BLOCK_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_CMD_CONTROL_0 0x002e
+#define mmMMSCH_GPUIOV_CMD_CONTROL_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_CMD_STATUS_0 0x002f
+#define mmMMSCH_GPUIOV_CMD_STATUS_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_0 0x0030
+#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_ACTIVE_FCNS_0 0x0031
+#define mmMMSCH_GPUIOV_ACTIVE_FCNS_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_0 0x0032
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_DW6_0 0x0033
+#define mmMMSCH_GPUIOV_DW6_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_DW7_0 0x0034
+#define mmMMSCH_GPUIOV_DW7_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_DW8_0 0x0035
+#define mmMMSCH_GPUIOV_DW8_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_SCH_BLOCK_1 0x0036
+#define mmMMSCH_GPUIOV_SCH_BLOCK_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_CMD_CONTROL_1 0x0037
+#define mmMMSCH_GPUIOV_CMD_CONTROL_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_CMD_STATUS_1 0x0038
+#define mmMMSCH_GPUIOV_CMD_STATUS_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_1 0x0039
+#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_ACTIVE_FCNS_1 0x003a
+#define mmMMSCH_GPUIOV_ACTIVE_FCNS_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_1 0x003b
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_DW6_1 0x003c
+#define mmMMSCH_GPUIOV_DW6_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_DW7_1 0x003d
+#define mmMMSCH_GPUIOV_DW7_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_DW8_1 0x003e
+#define mmMMSCH_GPUIOV_DW8_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_CNTXT 0x003f
+#define mmMMSCH_GPUIOV_CNTXT_BASE_IDX 0
+#define mmMMSCH_SCRATCH_2 0x0040
+#define mmMMSCH_SCRATCH_2_BASE_IDX 0
+#define mmMMSCH_SCRATCH_3 0x0041
+#define mmMMSCH_SCRATCH_3_BASE_IDX 0
+#define mmMMSCH_SCRATCH_4 0x0042
+#define mmMMSCH_SCRATCH_4_BASE_IDX 0
+#define mmMMSCH_SCRATCH_5 0x0043
+#define mmMMSCH_SCRATCH_5_BASE_IDX 0
+#define mmMMSCH_SCRATCH_6 0x0044
+#define mmMMSCH_SCRATCH_6_BASE_IDX 0
+#define mmMMSCH_SCRATCH_7 0x0045
+#define mmMMSCH_SCRATCH_7_BASE_IDX 0
+#define mmMMSCH_VFID_FIFO_HEAD_0 0x0046
+#define mmMMSCH_VFID_FIFO_HEAD_0_BASE_IDX 0
+#define mmMMSCH_VFID_FIFO_TAIL_0 0x0047
+#define mmMMSCH_VFID_FIFO_TAIL_0_BASE_IDX 0
+#define mmMMSCH_VFID_FIFO_HEAD_1 0x0048
+#define mmMMSCH_VFID_FIFO_HEAD_1_BASE_IDX 0
+#define mmMMSCH_VFID_FIFO_TAIL_1 0x0049
+#define mmMMSCH_VFID_FIFO_TAIL_1_BASE_IDX 0
+#define mmMMSCH_NACK_STATUS 0x004a
+#define mmMMSCH_NACK_STATUS_BASE_IDX 0
+#define mmMMSCH_VF_MAILBOX0_DATA 0x004b
+#define mmMMSCH_VF_MAILBOX0_DATA_BASE_IDX 0
+#define mmMMSCH_VF_MAILBOX1_DATA 0x004c
+#define mmMMSCH_VF_MAILBOX1_DATA_BASE_IDX 0
+#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_0 0x004d
+#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_CMD_STATUS_IP_0 0x004e
+#define mmMMSCH_GPUIOV_CMD_STATUS_IP_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_0 0x004f
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_1 0x0050
+#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_CMD_STATUS_IP_1 0x0051
+#define mmMMSCH_GPUIOV_CMD_STATUS_IP_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_1 0x0052
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_CNTXT_IP 0x0053
+#define mmMMSCH_GPUIOV_CNTXT_IP_BASE_IDX 0
+#define mmMMSCH_GPUIOV_SCH_BLOCK_2 0x0054
+#define mmMMSCH_GPUIOV_SCH_BLOCK_2_BASE_IDX 0
+#define mmMMSCH_GPUIOV_CMD_CONTROL_2 0x0055
+#define mmMMSCH_GPUIOV_CMD_CONTROL_2_BASE_IDX 0
+#define mmMMSCH_GPUIOV_CMD_STATUS_2 0x0056
+#define mmMMSCH_GPUIOV_CMD_STATUS_2_BASE_IDX 0
+#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_2 0x0057
+#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_2_BASE_IDX 0
+#define mmMMSCH_GPUIOV_ACTIVE_FCNS_2 0x0058
+#define mmMMSCH_GPUIOV_ACTIVE_FCNS_2_BASE_IDX 0
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_2 0x0059
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_2_BASE_IDX 0
+#define mmMMSCH_GPUIOV_DW6_2 0x005a
+#define mmMMSCH_GPUIOV_DW6_2_BASE_IDX 0
+#define mmMMSCH_GPUIOV_DW7_2 0x005b
+#define mmMMSCH_GPUIOV_DW7_2_BASE_IDX 0
+#define mmMMSCH_GPUIOV_DW8_2 0x005c
+#define mmMMSCH_GPUIOV_DW8_2_BASE_IDX 0
+#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_2 0x005d
+#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_2_BASE_IDX 0
+#define mmMMSCH_GPUIOV_CMD_STATUS_IP_2 0x005e
+#define mmMMSCH_GPUIOV_CMD_STATUS_IP_2_BASE_IDX 0
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_2 0x005f
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_2_BASE_IDX 0
+#define mmMMSCH_VFID_FIFO_HEAD_2 0x0060
+#define mmMMSCH_VFID_FIFO_HEAD_2_BASE_IDX 0
+#define mmMMSCH_VFID_FIFO_TAIL_2 0x0061
+#define mmMMSCH_VFID_FIFO_TAIL_2_BASE_IDX 0
+#define mmMMSCH_VM_BUSY_STATUS_0 0x0062
+#define mmMMSCH_VM_BUSY_STATUS_0_BASE_IDX 0
+#define mmMMSCH_VM_BUSY_STATUS_1 0x0063
+#define mmMMSCH_VM_BUSY_STATUS_1_BASE_IDX 0
+#define mmMMSCH_VM_BUSY_STATUS_2 0x0064
+#define mmMMSCH_VM_BUSY_STATUS_2_BASE_IDX 0
+
+#define MMSCH_VERSION_MAJOR 2
+#define MMSCH_VERSION_MINOR 0
+#define MMSCH_VERSION (MMSCH_VERSION_MAJOR << 16 | MMSCH_VERSION_MINOR)
+
+enum mmsch_v2_0_command_type {
+ MMSCH_COMMAND__DIRECT_REG_WRITE = 0,
+ MMSCH_COMMAND__DIRECT_REG_POLLING = 2,
+ MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE = 3,
+ MMSCH_COMMAND__INDIRECT_REG_WRITE = 8,
+ MMSCH_COMMAND__END = 0xf
+};
+
+struct mmsch_v2_0_init_header {
+ uint32_t version;
+ uint32_t header_size;
+ uint32_t vcn_init_status;
+ uint32_t vcn_table_offset;
+ uint32_t vcn_table_size;
+};
+
+struct mmsch_v2_0_cmd_direct_reg_header {
+ uint32_t reg_offset : 28;
+ uint32_t command_type : 4;
+};
+
+struct mmsch_v2_0_cmd_indirect_reg_header {
+ uint32_t reg_offset : 20;
+ uint32_t reg_idx_space : 8;
+ uint32_t command_type : 4;
+};
+
+struct mmsch_v2_0_cmd_direct_write {
+ struct mmsch_v2_0_cmd_direct_reg_header cmd_header;
+ uint32_t reg_value;
+};
+
+struct mmsch_v2_0_cmd_direct_read_modify_write {
+ struct mmsch_v2_0_cmd_direct_reg_header cmd_header;
+ uint32_t write_data;
+ uint32_t mask_value;
+};
+
+struct mmsch_v2_0_cmd_direct_polling {
+ struct mmsch_v2_0_cmd_direct_reg_header cmd_header;
+ uint32_t mask_value;
+ uint32_t wait_value;
+};
+
+struct mmsch_v2_0_cmd_end {
+ struct mmsch_v2_0_cmd_direct_reg_header cmd_header;
+};
+
+struct mmsch_v2_0_cmd_indirect_write {
+ struct mmsch_v2_0_cmd_indirect_reg_header cmd_header;
+ uint32_t reg_value;
+};
+
+static inline void mmsch_v2_0_insert_direct_wt(struct mmsch_v2_0_cmd_direct_write *direct_wt,
+ uint32_t *init_table,
+ uint32_t reg_offset,
+ uint32_t value)
+{
+ direct_wt->cmd_header.reg_offset = reg_offset;
+ direct_wt->reg_value = value;
+ memcpy((void *)init_table, direct_wt, sizeof(struct mmsch_v2_0_cmd_direct_write));
+}
+
+static inline void mmsch_v2_0_insert_direct_rd_mod_wt(struct mmsch_v2_0_cmd_direct_read_modify_write *direct_rd_mod_wt,
+ uint32_t *init_table,
+ uint32_t reg_offset,
+ uint32_t mask, uint32_t data)
+{
+ direct_rd_mod_wt->cmd_header.reg_offset = reg_offset;
+ direct_rd_mod_wt->mask_value = mask;
+ direct_rd_mod_wt->write_data = data;
+ memcpy((void *)init_table, direct_rd_mod_wt,
+ sizeof(struct mmsch_v2_0_cmd_direct_read_modify_write));
+}
+
+static inline void mmsch_v2_0_insert_direct_poll(struct mmsch_v2_0_cmd_direct_polling *direct_poll,
+ uint32_t *init_table,
+ uint32_t reg_offset,
+ uint32_t mask, uint32_t wait)
+{
+ direct_poll->cmd_header.reg_offset = reg_offset;
+ direct_poll->mask_value = mask;
+ direct_poll->wait_value = wait;
+ memcpy((void *)init_table, direct_poll, sizeof(struct mmsch_v2_0_cmd_direct_polling));
+}
+
+#define MMSCH_V2_0_INSERT_DIRECT_RD_MOD_WT(reg, mask, data) { \
+ mmsch_v2_0_insert_direct_rd_mod_wt(&direct_rd_mod_wt, \
+ init_table, (reg), \
+ (mask), (data)); \
+ init_table += sizeof(struct mmsch_v2_0_cmd_direct_read_modify_write)/4; \
+ table_size += sizeof(struct mmsch_v2_0_cmd_direct_read_modify_write)/4; \
+}
+
+#define MMSCH_V2_0_INSERT_DIRECT_WT(reg, value) { \
+ mmsch_v2_0_insert_direct_wt(&direct_wt, \
+ init_table, (reg), \
+ (value)); \
+ init_table += sizeof(struct mmsch_v2_0_cmd_direct_write)/4; \
+ table_size += sizeof(struct mmsch_v2_0_cmd_direct_write)/4; \
+}
+
+#define MMSCH_V2_0_INSERT_DIRECT_POLL(reg, mask, wait) { \
+ mmsch_v2_0_insert_direct_poll(&direct_poll, \
+ init_table, (reg), \
+ (mask), (wait)); \
+ init_table += sizeof(struct mmsch_v2_0_cmd_direct_polling)/4; \
+ table_size += sizeof(struct mmsch_v2_0_cmd_direct_polling)/4; \
+}
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index cf557a428298..e08245a446fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -32,6 +32,7 @@
#include "soc15_common.h"
#include "navi10_ih.h"
+#define MAX_REARM_RETRY 10
static void navi10_ih_set_interrupt_funcs(struct amdgpu_device *adev);
@@ -284,6 +285,38 @@ static void navi10_ih_decode_iv(struct amdgpu_device *adev,
}
/**
+ * navi10_ih_irq_rearm - rearm IRQ if lost
+ *
+ * @adev: amdgpu_device pointer
+ *
+ */
+static void navi10_ih_irq_rearm(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
+{
+ uint32_t reg_rptr = 0;
+ uint32_t v = 0;
+ uint32_t i = 0;
+
+ if (ih == &adev->irq.ih)
+ reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR);
+ else if (ih == &adev->irq.ih1)
+ reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1);
+ else if (ih == &adev->irq.ih2)
+ reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2);
+ else
+ return;
+
+ /* Rearm IRQ / re-write doorbell if doorbell write is lost */
+ for (i = 0; i < MAX_REARM_RETRY; i++) {
+ v = RREG32_NO_KIQ(reg_rptr);
+ if ((v < ih->ring_size) && (v != ih->rptr))
+ WDOORBELL32(ih->doorbell_index, ih->rptr);
+ else
+ break;
+ }
+}
+
+/**
* navi10_ih_set_rptr - set the IH ring buffer rptr
*
* @adev: amdgpu_device pointer
@@ -297,6 +330,9 @@ static void navi10_ih_set_rptr(struct amdgpu_device *adev,
/* XXX check if swapping is necessary on BE */
*ih->rptr_cpu = ih->rptr;
WDOORBELL32(ih->doorbell_index, ih->rptr);
+
+ if (amdgpu_sriov_vf(adev))
+ navi10_ih_irq_rearm(adev, ih);
} else
WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index 65eb378fa035..149d386590df 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -318,6 +318,7 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
{
uint32_t bif_doorbell_intr_cntl;
struct ras_manager *obj = amdgpu_ras_find_obj(adev, adev->nbio.ras_if);
+ struct ras_err_data err_data = {0, 0, 0, NULL};
bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
if (REG_GET_FIELD(bif_doorbell_intr_cntl,
@@ -332,7 +333,19 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
* clear error status after ras_controller_intr according to
* hw team and count ue number for query
*/
- nbio_v7_4_query_ras_error_count(adev, &obj->err_data);
+ nbio_v7_4_query_ras_error_count(adev, &err_data);
+
+ /* logging on error counter and printing for awareness */
+ obj->err_data.ue_count += err_data.ue_count;
+ obj->err_data.ce_count += err_data.ce_count;
+
+ if (err_data.ce_count)
+ DRM_INFO("%ld correctable errors detected in %s block\n",
+ obj->err_data.ce_count, adev->nbio.ras_if->name);
+
+ if (err_data.ue_count)
+ DRM_INFO("%ld uncorrectable errors detected in %s block\n",
+ obj->err_data.ue_count, adev->nbio.ras_if->name);
DRM_WARN("RAS controller interrupt triggered by NBIF error\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 2d1bebdf1603..033cbbca2072 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -516,7 +516,8 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
!amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
- amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
break;
default:
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index 36b65797434e..6ff9a9544110 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -31,6 +31,9 @@
#define GFX_CMD_RESERVED_MASK 0x7FF00000
#define GFX_CMD_RESPONSE_MASK 0x80000000
+/* USBC PD FW version retrieval command */
+#define C2PMSG_CMD_GFX_USB_PD_FW_VER 0x2000000
+
/* TEE Gfx Command IDs for the register interface.
* Command ID must be between 0x00010000 and 0x000F0000.
*/
@@ -243,6 +246,7 @@ enum psp_gfx_fw_type {
GFX_FW_TYPE_SDMA6 = 56, /* SDMA6 MI */
GFX_FW_TYPE_SDMA7 = 57, /* SDMA7 MI */
GFX_FW_TYPE_VCN1 = 58, /* VCN1 MI */
+ GFX_FW_TYPE_CAP = 62, /* CAP_FW VG */
GFX_FW_TYPE_MAX
};
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 0829188c1a5c..0afd610a1263 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -26,6 +26,7 @@
#include "amdgpu.h"
#include "amdgpu_psp.h"
+#include "amdgpu_ras.h"
#include "amdgpu_ucode.h"
#include "soc15_common.h"
#include "psp_v11_0.h"
@@ -65,6 +66,9 @@ MODULE_FIRMWARE("amdgpu/arcturus_ta.bin");
/* memory training timeout define */
#define MEM_TRAIN_SEND_MSG_TIMEOUT_US 3000000
+/* For large FW files the time to complete can be very long */
+#define USBC_PD_POLLING_LIMIT_S 240
+
static int psp_v11_0_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -420,7 +424,8 @@ static int psp_v11_0_ring_init(struct psp_context *psp,
struct psp_ring *ring;
struct amdgpu_device *adev = psp->adev;
- psp_v11_0_reroute_ih(psp);
+ if (!amdgpu_sriov_vf(adev))
+ psp_v11_0_reroute_ih(psp);
ring = &psp->km_ring;
@@ -864,6 +869,11 @@ static int psp_v11_0_ras_trigger_error(struct psp_context *psp,
if (ret)
return -EINVAL;
+ /* If err_event_athub occurs error inject was successful, however
+ return status from TA is no long reliable */
+ if (amdgpu_ras_intr_triggered())
+ return 0;
+
return ras_cmd->ras_status;
}
@@ -1108,6 +1118,82 @@ static void psp_v11_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value);
}
+static int psp_v11_0_load_usbc_pd_fw(struct psp_context *psp, dma_addr_t dma_addr)
+{
+ struct amdgpu_device *adev = psp->adev;
+ uint32_t reg_status;
+ int ret, i = 0;
+
+ /* Write lower 32-bit address of the PD Controller FW */
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, lower_32_bits(dma_addr));
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, false);
+ if (ret)
+ return ret;
+
+ /* Fireup interrupt so PSP can pick up the lower address */
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, 0x800000);
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, false);
+ if (ret)
+ return ret;
+
+ reg_status = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35);
+
+ if ((reg_status & 0xFFFF) != 0) {
+ DRM_ERROR("Lower address load failed - MP0_SMN_C2PMSG_35.Bits [15:0] = %02x...\n",
+ reg_status & 0xFFFF);
+ return -EIO;
+ }
+
+ /* Write upper 32-bit address of the PD Controller FW */
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, upper_32_bits(dma_addr));
+
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, false);
+ if (ret)
+ return ret;
+
+ /* Fireup interrupt so PSP can pick up the upper address */
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, 0x4000000);
+
+ /* FW load takes very long time */
+ do {
+ msleep(1000);
+ reg_status = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35);
+
+ if (reg_status & 0x80000000)
+ goto done;
+
+ } while (++i < USBC_PD_POLLING_LIMIT_S);
+
+ return -ETIME;
+done:
+
+ if ((reg_status & 0xFFFF) != 0) {
+ DRM_ERROR("Upper address load failed - MP0_SMN_C2PMSG_35.Bits [15:0] = x%04x\n",
+ reg_status & 0xFFFF);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int psp_v11_0_read_usbc_pd_fw(struct psp_context *psp, uint32_t *fw_ver)
+{
+ struct amdgpu_device *adev = psp->adev;
+ int ret;
+
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, C2PMSG_CMD_GFX_USB_PD_FW_VER);
+
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, false);
+ if (!ret)
+ *fw_ver = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36);
+
+ return ret;
+}
+
static const struct psp_funcs psp_v11_0_funcs = {
.init_microcode = psp_v11_0_init_microcode,
.bootloader_load_kdb = psp_v11_0_bootloader_load_kdb,
@@ -1132,6 +1218,8 @@ static const struct psp_funcs psp_v11_0_funcs = {
.mem_training = psp_v11_0_memory_training,
.ring_get_wptr = psp_v11_0_ring_get_wptr,
.ring_set_wptr = psp_v11_0_ring_set_wptr,
+ .load_usbc_pd_fw = psp_v11_0_load_usbc_pd_fw,
+ .read_usbc_pd_fw = psp_v11_0_read_usbc_pd_fw
};
void psp_v11_0_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index 735c43c7daab..43896f4779b0 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -44,6 +44,7 @@
MODULE_FIRMWARE("amdgpu/vega10_sos.bin");
MODULE_FIRMWARE("amdgpu/vega10_asd.bin");
+MODULE_FIRMWARE("amdgpu/vega10_cap.bin");
MODULE_FIRMWARE("amdgpu/vega12_sos.bin");
MODULE_FIRMWARE("amdgpu/vega12_asd.bin");
@@ -63,6 +64,7 @@ static int psp_v3_1_init_microcode(struct psp_context *psp)
char fw_name[30];
int err = 0;
const struct psp_firmware_header_v1_0 *hdr;
+ struct amdgpu_firmware_info *info = NULL;
DRM_DEBUG("\n");
@@ -112,6 +114,26 @@ static int psp_v3_1_init_microcode(struct psp_context *psp)
adev->psp.asd_start_addr = (uint8_t *)hdr +
le32_to_cpu(hdr->header.ucode_array_offset_bytes);
+ if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_VEGA10) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_cap.bin",
+ chip_name);
+ err = request_firmware(&adev->psp.cap_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+
+ err = amdgpu_ucode_validate(adev->psp.cap_fw);
+ if (err)
+ goto out;
+
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP];
+ info->ucode_id = AMDGPU_UCODE_ID_CAP;
+ info->fw = adev->psp.cap_fw;
+ hdr = (const struct psp_firmware_header_v1_0 *)
+ adev->psp.cap_fw->data;
+ adev->firmware.fw_size += ALIGN(
+ le32_to_cpu(hdr->header.ucode_size_bytes), PAGE_SIZE);
+ }
+
return 0;
out:
if (err) {
@@ -122,6 +144,8 @@ out:
adev->psp.sos_fw = NULL;
release_firmware(adev->psp.asd_fw);
adev->psp.asd_fw = NULL;
+ release_firmware(adev->psp.cap_fw);
+ adev->psp.cap_fw = NULL;
}
return err;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index e55884d204bd..9159bd46482b 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -1801,13 +1801,9 @@ static int sdma_v4_0_late_init(void *handle)
struct ras_ih_if ih_info = {
.cb = sdma_v4_0_process_ras_data_cb,
};
- int i;
- /* read back edc counter registers to clear the counters */
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
- for (i = 0; i < adev->sdma.num_instances; i++)
- RREG32_SDMA(i, mmSDMA0_EDC_COUNTER);
- }
+ if (adev->sdma.funcs && adev->sdma.funcs->reset_ras_error_count)
+ adev->sdma.funcs->reset_ras_error_count(adev);
if (adev->sdma.funcs && adev->sdma.funcs->ras_late_init)
return adev->sdma.funcs->ras_late_init(adev, &ih_info);
@@ -2572,10 +2568,22 @@ static int sdma_v4_0_query_ras_error_count(struct amdgpu_device *adev,
return 0;
};
+static void sdma_v4_0_reset_ras_error_count(struct amdgpu_device *adev)
+{
+ int i;
+
+ /* read back edc counter registers to clear the counters */
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
+ for (i = 0; i < adev->sdma.num_instances; i++)
+ RREG32_SDMA(i, mmSDMA0_EDC_COUNTER);
+ }
+}
+
static const struct amdgpu_sdma_ras_funcs sdma_v4_0_ras_funcs = {
.ras_late_init = amdgpu_sdma_ras_late_init,
.ras_fini = amdgpu_sdma_ras_fini,
.query_ras_error_count = sdma_v4_0_query_ras_error_count,
+ .reset_ras_error_count = sdma_v4_0_reset_ras_error_count,
};
static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index 67b9830b7c7e..ebfd2cdf4e65 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -746,11 +746,9 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
sdma_v5_0_enable(adev, true);
}
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->sched.ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
return r;
- }
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
index c902f26cf50d..9bffbab35041 100644
--- a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
@@ -46,8 +46,7 @@
#define I2C_NO_STOP 1
#define I2C_RESTART 2
-#define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control.eeprom_accessor))->adev
-#define to_eeprom_control(x) container_of(x, struct amdgpu_ras_eeprom_control, eeprom_accessor)
+#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
static void smu_v11_0_i2c_set_clock_gating(struct i2c_adapter *control, bool en)
{
@@ -592,7 +591,8 @@ static uint32_t smu_v11_0_i2c_eeprom_write_data(struct i2c_adapter *control,
static void lock_bus(struct i2c_adapter *i2c, unsigned int flags)
{
- struct amdgpu_ras_eeprom_control *control = to_eeprom_control(i2c);
+ struct amdgpu_device *adev = to_amdgpu_device(i2c);
+ struct amdgpu_ras_eeprom_control *control = &adev->psp.ras.ras->eeprom_control;
if (!smu_v11_0_i2c_bus_lock(i2c)) {
DRM_ERROR("Failed to lock the bus from SMU");
@@ -610,7 +610,8 @@ static int trylock_bus(struct i2c_adapter *i2c, unsigned int flags)
static void unlock_bus(struct i2c_adapter *i2c, unsigned int flags)
{
- struct amdgpu_ras_eeprom_control *control = to_eeprom_control(i2c);
+ struct amdgpu_device *adev = to_amdgpu_device(i2c);
+ struct amdgpu_ras_eeprom_control *control = &adev->psp.ras.ras->eeprom_control;
if (!smu_v11_0_i2c_bus_unlock(i2c)) {
DRM_ERROR("Failed to unlock the bus from SMU");
@@ -630,7 +631,8 @@ static int smu_v11_0_i2c_eeprom_i2c_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg *msgs, int num)
{
int i, ret;
- struct amdgpu_ras_eeprom_control *control = to_eeprom_control(i2c_adap);
+ struct amdgpu_device *adev = to_amdgpu_device(i2c_adap);
+ struct amdgpu_ras_eeprom_control *control = &adev->psp.ras.ras->eeprom_control;
if (!control->bus_locked) {
DRM_ERROR("I2C bus unlocked, stopping transaction!");
@@ -679,7 +681,7 @@ int smu_v11_0_i2c_eeprom_control_init(struct i2c_adapter *control)
control->class = I2C_CLASS_SPD;
control->dev.parent = &adev->pdev->dev;
control->algo = &smu_v11_0_i2c_eeprom_i2c_algo;
- snprintf(control->name, sizeof(control->name), "RAS EEPROM");
+ snprintf(control->name, sizeof(control->name), "AMDGPU EEPROM");
control->lock_ops = &smu_v11_0_i2c_i2c_lock_ops;
res = i2c_add_adapter(control);
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 15f3424a1ff7..a40499d51c93 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -89,6 +89,13 @@
#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L
#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L
#define mmHDP_MEM_POWER_CTRL_BASE_IDX 0
+
+/* for Vega20/arcturus regiter offset change */
+#define mmROM_INDEX_VG20 0x00e4
+#define mmROM_INDEX_VG20_BASE_IDX 0
+#define mmROM_DATA_VG20 0x00e5
+#define mmROM_DATA_VG20_BASE_IDX 0
+
/*
* Indirect registers accessor
*/
@@ -272,7 +279,12 @@ static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
static u32 soc15_get_xclk(struct amdgpu_device *adev)
{
- return adev->clock.spll.reference_freq;
+ u32 reference_clock = adev->clock.spll.reference_freq;
+
+ if (adev->asic_type == CHIP_RAVEN)
+ return reference_clock / 4;
+
+ return reference_clock;
}
@@ -304,6 +316,8 @@ static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
{
u32 *dw_ptr;
u32 i, length_dw;
+ uint32_t rom_index_offset;
+ uint32_t rom_data_offset;
if (bios == NULL)
return false;
@@ -316,11 +330,23 @@ static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
dw_ptr = (u32 *)bios;
length_dw = ALIGN(length_bytes, 4) / 4;
+ switch (adev->asic_type) {
+ case CHIP_VEGA20:
+ case CHIP_ARCTURUS:
+ rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX_VG20);
+ rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA_VG20);
+ break;
+ default:
+ rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX);
+ rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA);
+ break;
+ }
+
/* set rom index to 0 */
- WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
+ WREG32(rom_index_offset, 0);
/* read out the rom data */
for (i = 0; i < length_dw; i++)
- dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
+ dw_ptr[i] = RREG32(rom_data_offset);
return true;
}
@@ -826,6 +852,15 @@ static bool soc15_need_full_reset(struct amdgpu_device *adev)
/* change this when we implement soft reset */
return true;
}
+
+static void vega20_reset_hdp_ras_error_count(struct amdgpu_device *adev)
+{
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP))
+ return;
+ /*read back hdp ras counter to reset it to 0 */
+ RREG32_SOC15(HDP, 0, mmHDP_EDC_CNT);
+}
+
static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
uint64_t *count1)
{
@@ -993,6 +1028,7 @@ static const struct amdgpu_asic_funcs vega20_asic_funcs =
.get_config_memsize = &soc15_get_config_memsize,
.flush_hdp = &soc15_flush_hdp,
.invalidate_hdp = &soc15_invalidate_hdp,
+ .reset_hdp_ras_error_count = &vega20_reset_hdp_ras_error_count,
.need_full_reset = &soc15_need_full_reset,
.init_doorbell_index = &vega20_doorbell_index_init,
.get_pcie_usage = &vega20_get_pcie_usage,
@@ -1238,6 +1274,10 @@ static int soc15_common_late_init(void *handle)
if (amdgpu_sriov_vf(adev))
xgpu_ai_mailbox_get_irq(adev);
+ if (adev->asic_funcs &&
+ adev->asic_funcs->reset_hdp_ras_error_count)
+ adev->asic_funcs->reset_hdp_ras_error_count(adev);
+
if (adev->nbio.funcs->ras_late_init)
r = adev->nbio.funcs->ras_late_init(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
index d0fb7a67c1a3..b03f950c486c 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
@@ -42,6 +42,13 @@ struct soc15_reg_golden {
u32 or_mask;
};
+struct soc15_reg_rlcg {
+ u32 hwip;
+ u32 instance;
+ u32 segment;
+ u32 reg;
+};
+
struct soc15_reg_entry {
uint32_t hwip;
uint32_t inst;
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
index 19e870c79896..c893c645a4b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
@@ -70,10 +70,9 @@
} \
} while (0)
-#define AMDGPU_VIRT_SUPPORT_RLC_PRG_REG(a) (amdgpu_sriov_vf((a)) && !amdgpu_sriov_runtime((a)))
#define WREG32_RLC(reg, value) \
do { \
- if (AMDGPU_VIRT_SUPPORT_RLC_PRG_REG(adev)) { \
+ if (amdgpu_sriov_fullaccess(adev)) { \
uint32_t i = 0; \
uint32_t retries = 50000; \
uint32_t r0 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0; \
@@ -98,7 +97,7 @@
#define WREG32_SOC15_RLC_SHADOW(ip, inst, reg, value) \
do { \
uint32_t target_reg = adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg;\
- if (AMDGPU_VIRT_SUPPORT_RLC_PRG_REG(adev)) { \
+ if (amdgpu_sriov_fullaccess(adev)) { \
uint32_t r2 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2; \
uint32_t r3 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3; \
uint32_t grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL; \
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
index 793bf70e64b1..14d346321a5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
@@ -186,6 +186,10 @@ static void umc_v6_1_query_ras_error_count(struct amdgpu_device *adev,
if (rsmu_umc_index_state)
umc_v6_1_disable_umc_index_mode(adev);
+ if ((adev->asic_type == CHIP_ARCTURUS) &&
+ amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
+ DRM_WARN("Fail to disable DF-Cstate.\n");
+
LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
umc_reg_offset = get_umc_6_reg_offset(adev,
umc_inst,
@@ -199,6 +203,10 @@ static void umc_v6_1_query_ras_error_count(struct amdgpu_device *adev,
&(err_data->ue_count));
}
+ if ((adev->asic_type == CHIP_ARCTURUS) &&
+ amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
+ DRM_WARN("Fail to enable DF-Cstate\n");
+
if (rsmu_umc_index_state)
umc_v6_1_enable_umc_index_mode(adev);
}
@@ -228,7 +236,11 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_ADDRT0);
}
- /* skip error address process if -ENOMEM */
+ mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
+
+ if (mc_umc_status == 0)
+ return;
+
if (!err_data->err_addr) {
/* clear umc status */
WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
@@ -236,7 +248,6 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
}
err_rec = &err_data->err_addr[err_data->err_addr_cnt];
- mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
/* calculate error address if ue/ce error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
@@ -288,6 +299,10 @@ static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev,
if (rsmu_umc_index_state)
umc_v6_1_disable_umc_index_mode(adev);
+ if ((adev->asic_type == CHIP_ARCTURUS) &&
+ amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
+ DRM_WARN("Fail to disable DF-Cstate.\n");
+
LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
umc_reg_offset = get_umc_6_reg_offset(adev,
umc_inst,
@@ -300,6 +315,10 @@ static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev,
umc_inst);
}
+ if ((adev->asic_type == CHIP_ARCTURUS) &&
+ amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
+ DRM_WARN("Fail to enable DF-Cstate\n");
+
if (rsmu_umc_index_state)
umc_v6_1_enable_umc_index_mode(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 1a24fadd30e2..09b0572b838d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -1207,9 +1207,10 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
struct amdgpu_ring *ring;
/* pause/unpause if state is changed */
- if (adev->vcn.pause_state.fw_based != new_state->fw_based) {
+ if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
- adev->vcn.pause_state.fw_based, adev->vcn.pause_state.jpeg,
+ adev->vcn.inst[inst_idx].pause_state.fw_based,
+ adev->vcn.inst[inst_idx].pause_state.jpeg,
new_state->fw_based, new_state->jpeg);
reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
@@ -1258,13 +1259,14 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
}
- adev->vcn.pause_state.fw_based = new_state->fw_based;
+ adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
}
/* pause/unpause if state is changed */
- if (adev->vcn.pause_state.jpeg != new_state->jpeg) {
+ if (adev->vcn.inst[inst_idx].pause_state.jpeg != new_state->jpeg) {
DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
- adev->vcn.pause_state.fw_based, adev->vcn.pause_state.jpeg,
+ adev->vcn.inst[inst_idx].pause_state.fw_based,
+ adev->vcn.inst[inst_idx].pause_state.jpeg,
new_state->fw_based, new_state->jpeg);
reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
@@ -1318,7 +1320,7 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
reg_data &= ~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK;
WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
}
- adev->vcn.pause_state.jpeg = new_state->jpeg;
+ adev->vcn.inst[inst_idx].pause_state.jpeg = new_state->jpeg;
}
return 0;
@@ -1350,7 +1352,7 @@ static int vcn_v1_0_set_clockgating_state(void *handle,
if (enable) {
/* wait for STATUS to clear */
- if (vcn_v1_0_is_idle(handle))
+ if (!vcn_v1_0_is_idle(handle))
return -EBUSY;
vcn_v1_0_enable_clock_gating(adev);
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index 4f7216788f11..ec8091a661df 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -29,6 +29,7 @@
#include "soc15d.h"
#include "amdgpu_pm.h"
#include "amdgpu_psp.h"
+#include "mmsch_v2_0.h"
#include "vcn/vcn_2_0_0_offset.h"
#include "vcn/vcn_2_0_0_sh_mask.h"
@@ -54,7 +55,7 @@ static int vcn_v2_0_set_powergating_state(void *handle,
enum amd_powergating_state state);
static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
-
+static int vcn_v2_0_start_sriov(struct amdgpu_device *adev);
/**
* vcn_v2_0_early_init - set function pointers
*
@@ -67,7 +68,10 @@ static int vcn_v2_0_early_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->vcn.num_vcn_inst = 1;
- adev->vcn.num_enc_rings = 2;
+ if (amdgpu_sriov_vf(adev))
+ adev->vcn.num_enc_rings = 1;
+ else
+ adev->vcn.num_enc_rings = 2;
vcn_v2_0_set_dec_ring_funcs(adev);
vcn_v2_0_set_enc_ring_funcs(adev);
@@ -154,7 +158,10 @@ static int vcn_v2_0_sw_init(void *handle)
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
ring = &adev->vcn.inst->ring_enc[i];
ring->use_doorbell = true;
- ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i;
+ if (!amdgpu_sriov_vf(adev))
+ ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i;
+ else
+ ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + i;
sprintf(ring->name, "vcn_enc%d", i);
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
if (r)
@@ -163,6 +170,10 @@ static int vcn_v2_0_sw_init(void *handle)
adev->vcn.pause_dpg_mode = vcn_v2_0_pause_dpg_mode;
+ r = amdgpu_virt_alloc_mm_table(adev);
+ if (r)
+ return r;
+
return 0;
}
@@ -178,6 +189,8 @@ static int vcn_v2_0_sw_fini(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ amdgpu_virt_free_mm_table(adev);
+
r = amdgpu_vcn_suspend(adev);
if (r)
return r;
@@ -203,6 +216,9 @@ static int vcn_v2_0_hw_init(void *handle)
adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
ring->doorbell_index, 0);
+ if (amdgpu_sriov_vf(adev))
+ vcn_v2_0_start_sriov(adev);
+
r = amdgpu_ring_test_helper(ring);
if (r)
goto done;
@@ -304,6 +320,9 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
uint32_t offset;
+ if (amdgpu_sriov_vf(adev))
+ return;
+
/* cache window 0: fw */
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
@@ -448,6 +467,9 @@ static void vcn_v2_0_disable_clock_gating(struct amdgpu_device *adev)
{
uint32_t data;
+ if (amdgpu_sriov_vf(adev))
+ return;
+
/* UVD disable CGC */
data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
@@ -606,6 +628,9 @@ static void vcn_v2_0_enable_clock_gating(struct amdgpu_device *adev)
{
uint32_t data = 0;
+ if (amdgpu_sriov_vf(adev))
+ return;
+
/* enable UVD CGC */
data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
@@ -658,6 +683,9 @@ static void vcn_v2_0_disable_static_power_gating(struct amdgpu_device *adev)
uint32_t data = 0;
int ret;
+ if (amdgpu_sriov_vf(adev))
+ return;
+
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
| 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
@@ -705,6 +733,9 @@ static void vcn_v2_0_enable_static_power_gating(struct amdgpu_device *adev)
uint32_t data = 0;
int ret;
+ if (amdgpu_sriov_vf(adev))
+ return;
+
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
/* Before power off, this indicator has to be turned on */
data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
@@ -1137,9 +1168,9 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
int ret_code;
/* pause/unpause if state is changed */
- if (adev->vcn.pause_state.fw_based != new_state->fw_based) {
+ if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
DRM_DEBUG("dpg pause state changed %d -> %d",
- adev->vcn.pause_state.fw_based, new_state->fw_based);
+ adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
(~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
@@ -1185,7 +1216,7 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
}
- adev->vcn.pause_state.fw_based = new_state->fw_based;
+ adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
}
return 0;
@@ -1215,9 +1246,12 @@ static int vcn_v2_0_set_clockgating_state(void *handle,
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_CG_STATE_GATE);
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
if (enable) {
/* wait for STATUS to clear */
- if (vcn_v2_0_is_idle(handle))
+ if (!vcn_v2_0_is_idle(handle))
return -EBUSY;
vcn_v2_0_enable_clock_gating(adev);
} else {
@@ -1631,6 +1665,9 @@ int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring)
unsigned i;
int r;
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 4);
if (r)
@@ -1667,6 +1704,11 @@ static int vcn_v2_0_set_powergating_state(void *handle,
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (amdgpu_sriov_vf(adev)) {
+ adev->vcn.cur_state = AMD_PG_STATE_UNGATE;
+ return 0;
+ }
+
if (state == adev->vcn.cur_state)
return 0;
@@ -1680,6 +1722,215 @@ static int vcn_v2_0_set_powergating_state(void *handle,
return ret;
}
+static int vcn_v2_0_start_mmsch(struct amdgpu_device *adev,
+ struct amdgpu_mm_table *table)
+{
+ uint32_t data = 0, loop;
+ uint64_t addr = table->gpu_addr;
+ struct mmsch_v2_0_init_header *header;
+ uint32_t size;
+ int i;
+
+ header = (struct mmsch_v2_0_init_header *)table->cpu_addr;
+ size = header->header_size + header->vcn_table_size;
+
+ /* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr
+ * of memory descriptor location
+ */
+ WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
+ WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
+
+ /* 2, update vmid of descriptor */
+ data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID);
+ data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
+ /* use domain0 for MM scheduler */
+ data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
+ WREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID, data);
+
+ /* 3, notify mmsch about the size of this descriptor */
+ WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_SIZE, size);
+
+ /* 4, set resp to zero */
+ WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
+
+ adev->vcn.inst->ring_dec.wptr = 0;
+ adev->vcn.inst->ring_dec.wptr_old = 0;
+ vcn_v2_0_dec_ring_set_wptr(&adev->vcn.inst->ring_dec);
+
+ for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ adev->vcn.inst->ring_enc[i].wptr = 0;
+ adev->vcn.inst->ring_enc[i].wptr_old = 0;
+ vcn_v2_0_enc_ring_set_wptr(&adev->vcn.inst->ring_enc[i]);
+ }
+
+ /* 5, kick off the initialization and wait until
+ * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero
+ */
+ WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
+
+ data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
+ loop = 1000;
+ while ((data & 0x10000002) != 0x10000002) {
+ udelay(10);
+ data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
+ loop--;
+ if (!loop)
+ break;
+ }
+
+ if (!loop) {
+ DRM_ERROR("failed to init MMSCH, " \
+ "mmMMSCH_VF_MAILBOX_RESP = 0x%08x\n", data);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
+{
+ int r;
+ uint32_t tmp;
+ struct amdgpu_ring *ring;
+ uint32_t offset, size;
+ uint32_t table_size = 0;
+ struct mmsch_v2_0_cmd_direct_write direct_wt = { {0} };
+ struct mmsch_v2_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
+ struct mmsch_v2_0_cmd_direct_polling direct_poll = { {0} };
+ struct mmsch_v2_0_cmd_end end = { {0} };
+ struct mmsch_v2_0_init_header *header;
+ uint32_t *init_table = adev->virt.mm_table.cpu_addr;
+ uint8_t i = 0;
+
+ header = (struct mmsch_v2_0_init_header *)init_table;
+ direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
+ direct_rd_mod_wt.cmd_header.command_type =
+ MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
+ direct_poll.cmd_header.command_type =
+ MMSCH_COMMAND__DIRECT_REG_POLLING;
+ end.cmd_header.command_type = MMSCH_COMMAND__END;
+
+ if (header->vcn_table_offset == 0 && header->vcn_table_size == 0) {
+ header->version = MMSCH_VERSION;
+ header->header_size = sizeof(struct mmsch_v2_0_init_header) >> 2;
+
+ header->vcn_table_offset = header->header_size;
+
+ init_table += header->vcn_table_offset;
+
+ size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+
+ MMSCH_V2_0_INSERT_DIRECT_RD_MOD_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
+ 0xFFFFFFFF, 0x00000004);
+
+ /* mc resume*/
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ tmp = AMDGPU_UCODE_ID_VCN;
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ adev->firmware.ucode[tmp].tmr_mc_addr_lo);
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ adev->firmware.ucode[tmp].tmr_mc_addr_hi);
+ offset = 0;
+ } else {
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst->gpu_addr));
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst->gpu_addr));
+ offset = size;
+ }
+
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0),
+ 0);
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0),
+ size);
+
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst->gpu_addr + offset));
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst->gpu_addr + offset));
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1),
+ 0);
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1),
+ AMDGPU_VCN_STACK_SIZE);
+
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst->gpu_addr + offset +
+ AMDGPU_VCN_STACK_SIZE));
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst->gpu_addr + offset +
+ AMDGPU_VCN_STACK_SIZE));
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2),
+ 0);
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
+ AMDGPU_VCN_CONTEXT_SIZE);
+
+ for (r = 0; r < adev->vcn.num_enc_rings; ++r) {
+ ring = &adev->vcn.inst->ring_enc[r];
+ ring->wptr = 0;
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO),
+ lower_32_bits(ring->gpu_addr));
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI),
+ upper_32_bits(ring->gpu_addr));
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE),
+ ring->ring_size / 4);
+ }
+
+ ring = &adev->vcn.inst->ring_dec;
+ ring->wptr = 0;
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
+ lower_32_bits(ring->gpu_addr));
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
+ upper_32_bits(ring->gpu_addr));
+ /* force RBC into idle state */
+ tmp = order_base_2(ring->ring_size);
+ tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, tmp);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
+
+ /* add end packet */
+ tmp = sizeof(struct mmsch_v2_0_cmd_end);
+ memcpy((void *)init_table, &end, tmp);
+ table_size += (tmp / 4);
+ header->vcn_table_size = table_size;
+
+ }
+ return vcn_v2_0_start_mmsch(adev, &adev->virt.mm_table);
+}
+
static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
.name = "vcn_v2_0",
.early_init = vcn_v2_0_early_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 70fae7977f8f..c6363f5ad564 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -74,29 +74,30 @@ static int amdgpu_ih_clientid_vcns[] = {
static int vcn_v2_5_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->asic_type == CHIP_ARCTURUS) {
- u32 harvest;
- int i;
-
- adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS;
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- harvest = RREG32_SOC15(UVD, i, mmCC_UVD_HARVESTING);
- if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
- adev->vcn.harvest_config |= 1 << i;
- }
-
- if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
- AMDGPU_VCN_HARVEST_VCN1))
- /* both instances are harvested, disable the block */
- return -ENOENT;
- } else
- adev->vcn.num_vcn_inst = 1;
if (amdgpu_sriov_vf(adev)) {
adev->vcn.num_vcn_inst = 2;
adev->vcn.harvest_config = 0;
adev->vcn.num_enc_rings = 1;
} else {
+ if (adev->asic_type == CHIP_ARCTURUS) {
+ u32 harvest;
+ int i;
+
+ adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ harvest = RREG32_SOC15(UVD, i, mmCC_UVD_HARVESTING);
+ if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
+ adev->vcn.harvest_config |= 1 << i;
+ }
+
+ if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
+ AMDGPU_VCN_HARVEST_VCN1))
+ /* both instances are harvested, disable the block */
+ return -ENOENT;
+ } else
+ adev->vcn.num_vcn_inst = 1;
+
adev->vcn.num_enc_rings = 2;
}
@@ -1367,9 +1368,9 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
int ret_code;
/* pause/unpause if state is changed */
- if (adev->vcn.pause_state.fw_based != new_state->fw_based) {
+ if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
DRM_DEBUG("dpg pause state changed %d -> %d",
- adev->vcn.pause_state.fw_based, new_state->fw_based);
+ adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
reg_data = RREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE) &
(~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
@@ -1407,14 +1408,14 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
RREG32_SOC15(UVD, inst_idx, mmUVD_SCRATCH2) & 0x7FFFFFFF);
SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS,
- 0x0, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+ UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
}
} else {
/* unpause dpg, no need to wait */
reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
WREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE, reg_data);
}
- adev->vcn.pause_state.fw_based = new_state->fw_based;
+ adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
}
return 0;
@@ -1672,7 +1673,7 @@ static int vcn_v2_5_set_clockgating_state(void *handle,
return 0;
if (enable) {
- if (vcn_v2_5_is_idle(handle))
+ if (!vcn_v2_5_is_idle(handle))
return -EBUSY;
vcn_v2_5_enable_clock_gating(adev);
} else {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 3f0300e53727..0ec5f25adf56 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -127,6 +127,8 @@ static int kfd_open(struct inode *inode, struct file *filep)
return PTR_ERR(process);
if (kfd_is_locked()) {
+ dev_dbg(kfd_device, "kfd is locked!\n"
+ "process %d unreferenced", process->pasid);
kfd_unref_process(process);
return -EAGAIN;
}
@@ -1167,7 +1169,7 @@ static int kfd_ioctl_get_tile_config(struct file *filep,
if (!dev)
return -EINVAL;
- dev->kfd2kgd->get_tile_config(dev->kgd, &config);
+ amdgpu_amdkfd_get_tile_config(dev->kgd, &config);
args->gb_addr_config = config.gb_addr_config;
args->num_banks = config.num_banks;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 2a9e40131735..d5386f15c4a5 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -648,6 +648,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
if (kfd->kfd2kgd->get_hive_id)
kfd->hive_id = kfd->kfd2kgd->get_hive_id(kfd->kgd);
+ if (kfd->kfd2kgd->get_unique_id)
+ kfd->unique_id = kfd->kfd2kgd->get_unique_id(kfd->kgd);
+
if (kfd_interrupt_init(kfd)) {
dev_err(kfd_device, "Error initializing interrupts\n");
goto kfd_interrupt_error;
@@ -710,7 +713,7 @@ out:
void kgd2kfd_device_exit(struct kfd_dev *kfd)
{
if (kfd->init_complete) {
- kgd2kfd_suspend(kfd);
+ kgd2kfd_suspend(kfd, false);
device_queue_manager_uninit(kfd->dqm);
kfd_interrupt_exit(kfd);
kfd_topology_remove_device(kfd);
@@ -731,7 +734,7 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd)
kfd->dqm->ops.pre_reset(kfd->dqm);
- kgd2kfd_suspend(kfd);
+ kgd2kfd_suspend(kfd, false);
kfd_signal_reset_event(kfd);
return 0;
@@ -765,21 +768,23 @@ bool kfd_is_locked(void)
return (atomic_read(&kfd_locked) > 0);
}
-void kgd2kfd_suspend(struct kfd_dev *kfd)
+void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
{
if (!kfd->init_complete)
return;
- /* For first KFD device suspend all the KFD processes */
- if (atomic_inc_return(&kfd_locked) == 1)
- kfd_suspend_all_processes();
+ /* for runtime suspend, skip locking kfd */
+ if (!run_pm) {
+ /* For first KFD device suspend all the KFD processes */
+ if (atomic_inc_return(&kfd_locked) == 1)
+ kfd_suspend_all_processes();
+ }
kfd->dqm->ops.stop(kfd->dqm);
-
kfd_iommu_suspend(kfd);
}
-int kgd2kfd_resume(struct kfd_dev *kfd)
+int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
{
int ret, count;
@@ -790,10 +795,13 @@ int kgd2kfd_resume(struct kfd_dev *kfd)
if (ret)
return ret;
- count = atomic_dec_return(&kfd_locked);
- WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
- if (count == 0)
- ret = kfd_resume_all_processes();
+ /* for runtime resume, skip unlocking kfd */
+ if (!run_pm) {
+ count = atomic_dec_return(&kfd_locked);
+ WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
+ if (count == 0)
+ ret = kfd_resume_all_processes();
+ }
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 80d22bf702e8..77ea0f0cb163 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -78,14 +78,14 @@ static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
/* queue is available for KFD usage if bit is 1 */
for (i = 0; i < dqm->dev->shared_resources.num_queue_per_pipe; ++i)
if (test_bit(pipe_offset + i,
- dqm->dev->shared_resources.queue_bitmap))
+ dqm->dev->shared_resources.cp_queue_bitmap))
return true;
return false;
}
-unsigned int get_queues_num(struct device_queue_manager *dqm)
+unsigned int get_cp_queues_num(struct device_queue_manager *dqm)
{
- return bitmap_weight(dqm->dev->shared_resources.queue_bitmap,
+ return bitmap_weight(dqm->dev->shared_resources.cp_queue_bitmap,
KGD_MAX_QUEUES);
}
@@ -109,6 +109,11 @@ static unsigned int get_num_xgmi_sdma_engines(struct device_queue_manager *dqm)
return dqm->dev->device_info->num_xgmi_sdma_engines;
}
+static unsigned int get_num_all_sdma_engines(struct device_queue_manager *dqm)
+{
+ return get_num_sdma_engines(dqm) + get_num_xgmi_sdma_engines(dqm);
+}
+
unsigned int get_num_sdma_queues(struct device_queue_manager *dqm)
{
return dqm->dev->device_info->num_sdma_engines
@@ -132,6 +137,22 @@ void program_sh_mem_settings(struct device_queue_manager *dqm,
qpd->sh_mem_bases);
}
+void increment_queue_count(struct device_queue_manager *dqm,
+ enum kfd_queue_type type)
+{
+ dqm->active_queue_count++;
+ if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
+ dqm->active_cp_queue_count++;
+}
+
+void decrement_queue_count(struct device_queue_manager *dqm,
+ enum kfd_queue_type type)
+{
+ dqm->active_queue_count--;
+ if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
+ dqm->active_cp_queue_count--;
+}
+
static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
{
struct kfd_dev *dev = qpd->dqm->dev;
@@ -281,8 +302,6 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
struct mqd_manager *mqd_mgr;
int retval;
- print_queue(q);
-
dqm_lock(dqm);
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
@@ -359,12 +378,7 @@ add_queue_to_list:
list_add(&q->list, &qpd->queues_list);
qpd->queue_count++;
if (q->properties.is_active)
- dqm->queue_count++;
-
- if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
- dqm->sdma_queue_count++;
- else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
- dqm->xgmi_sdma_queue_count++;
+ increment_queue_count(dqm, q->properties.type);
/*
* Unconditionally increment this counter, regardless of the queue's
@@ -446,15 +460,13 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
- if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
+ if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
deallocate_hqd(dqm, q);
- } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
- dqm->sdma_queue_count--;
+ else if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
deallocate_sdma_queue(dqm, q);
- } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
- dqm->xgmi_sdma_queue_count--;
+ else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
deallocate_sdma_queue(dqm, q);
- } else {
+ else {
pr_debug("q->properties.type %d is invalid\n",
q->properties.type);
return -EINVAL;
@@ -494,7 +506,7 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
}
qpd->queue_count--;
if (q->properties.is_active)
- dqm->queue_count--;
+ decrement_queue_count(dqm, q->properties.type);
return retval;
}
@@ -563,13 +575,13 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
/*
* check active state vs. the previous state and modify
* counter accordingly. map_queues_cpsch uses the
- * dqm->queue_count to determine whether a new runlist must be
+ * dqm->active_queue_count to determine whether a new runlist must be
* uploaded.
*/
if (q->properties.is_active && !prev_active)
- dqm->queue_count++;
+ increment_queue_count(dqm, q->properties.type);
else if (!q->properties.is_active && prev_active)
- dqm->queue_count--;
+ decrement_queue_count(dqm, q->properties.type);
if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS)
retval = map_queues_cpsch(dqm);
@@ -618,7 +630,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
q->properties.is_active = false;
- dqm->queue_count--;
+ decrement_queue_count(dqm, q->properties.type);
if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n"))
continue;
@@ -662,7 +674,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
continue;
q->properties.is_active = false;
- dqm->queue_count--;
+ decrement_queue_count(dqm, q->properties.type);
}
retval = execute_queues_cpsch(dqm,
qpd->is_debug ?
@@ -731,7 +743,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
q->properties.is_active = true;
- dqm->queue_count++;
+ increment_queue_count(dqm, q->properties.type);
if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n"))
continue;
@@ -786,7 +798,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
continue;
q->properties.is_active = true;
- dqm->queue_count++;
+ increment_queue_count(dqm, q->properties.type);
}
retval = execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
@@ -899,16 +911,15 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
mutex_init(&dqm->lock_hidden);
INIT_LIST_HEAD(&dqm->queues);
- dqm->queue_count = dqm->next_pipe_to_allocate = 0;
- dqm->sdma_queue_count = 0;
- dqm->xgmi_sdma_queue_count = 0;
+ dqm->active_queue_count = dqm->next_pipe_to_allocate = 0;
+ dqm->active_cp_queue_count = 0;
for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
int pipe_offset = pipe * get_queues_per_pipe(dqm);
for (queue = 0; queue < get_queues_per_pipe(dqm); queue++)
if (test_bit(pipe_offset + queue,
- dqm->dev->shared_resources.queue_bitmap))
+ dqm->dev->shared_resources.cp_queue_bitmap))
dqm->allocated_queues[pipe] |= 1 << queue;
}
@@ -924,7 +935,7 @@ static void uninitialize(struct device_queue_manager *dqm)
{
int i;
- WARN_ON(dqm->queue_count > 0 || dqm->processes_count > 0);
+ WARN_ON(dqm->active_queue_count > 0 || dqm->processes_count > 0);
kfree(dqm->allocated_queues);
for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
@@ -966,8 +977,11 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
int bit;
if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
- if (dqm->sdma_bitmap == 0)
+ if (dqm->sdma_bitmap == 0) {
+ pr_err("No more SDMA queue to allocate\n");
return -ENOMEM;
+ }
+
bit = __ffs64(dqm->sdma_bitmap);
dqm->sdma_bitmap &= ~(1ULL << bit);
q->sdma_id = bit;
@@ -976,8 +990,10 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
q->properties.sdma_queue_id = q->sdma_id /
get_num_sdma_engines(dqm);
} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
- if (dqm->xgmi_sdma_bitmap == 0)
+ if (dqm->xgmi_sdma_bitmap == 0) {
+ pr_err("No more XGMI SDMA queue to allocate\n");
return -ENOMEM;
+ }
bit = __ffs64(dqm->xgmi_sdma_bitmap);
dqm->xgmi_sdma_bitmap &= ~(1ULL << bit);
q->sdma_id = bit;
@@ -1029,7 +1045,7 @@ static int set_sched_resources(struct device_queue_manager *dqm)
mec = (i / dqm->dev->shared_resources.num_queue_per_pipe)
/ dqm->dev->shared_resources.num_pipe_per_mec;
- if (!test_bit(i, dqm->dev->shared_resources.queue_bitmap))
+ if (!test_bit(i, dqm->dev->shared_resources.cp_queue_bitmap))
continue;
/* only acquire queues from the first MEC */
@@ -1064,9 +1080,9 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
mutex_init(&dqm->lock_hidden);
INIT_LIST_HEAD(&dqm->queues);
- dqm->queue_count = dqm->processes_count = 0;
- dqm->sdma_queue_count = 0;
- dqm->xgmi_sdma_queue_count = 0;
+ dqm->active_queue_count = dqm->processes_count = 0;
+ dqm->active_cp_queue_count = 0;
+
dqm->active_runlist = false;
dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
@@ -1158,7 +1174,7 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
dqm->total_queue_count);
list_add(&kq->list, &qpd->priv_queue_list);
- dqm->queue_count++;
+ increment_queue_count(dqm, kq->queue->properties.type);
qpd->is_debug = true;
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
dqm_unlock(dqm);
@@ -1172,7 +1188,7 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
{
dqm_lock(dqm);
list_del(&kq->list);
- dqm->queue_count--;
+ decrement_queue_count(dqm, kq->queue->properties.type);
qpd->is_debug = false;
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
/*
@@ -1238,13 +1254,9 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
list_add(&q->list, &qpd->queues_list);
qpd->queue_count++;
- if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
- dqm->sdma_queue_count++;
- else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
- dqm->xgmi_sdma_queue_count++;
-
if (q->properties.is_active) {
- dqm->queue_count++;
+ increment_queue_count(dqm, q->properties.type);
+
retval = execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
}
@@ -1298,20 +1310,6 @@ int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
return 0;
}
-static int unmap_sdma_queues(struct device_queue_manager *dqm)
-{
- int i, retval = 0;
-
- for (i = 0; i < dqm->dev->device_info->num_sdma_engines +
- dqm->dev->device_info->num_xgmi_sdma_engines; i++) {
- retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA,
- KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false, i);
- if (retval)
- return retval;
- }
- return retval;
-}
-
/* dqm->lock mutex has to be locked before calling this function */
static int map_queues_cpsch(struct device_queue_manager *dqm)
{
@@ -1319,7 +1317,7 @@ static int map_queues_cpsch(struct device_queue_manager *dqm)
if (!dqm->sched_running)
return 0;
- if (dqm->queue_count <= 0 || dqm->processes_count <= 0)
+ if (dqm->active_queue_count <= 0 || dqm->processes_count <= 0)
return 0;
if (dqm->active_runlist)
return 0;
@@ -1349,12 +1347,6 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
if (!dqm->active_runlist)
return retval;
- pr_debug("Before destroying queues, sdma queue count is : %u, xgmi sdma queue count is : %u\n",
- dqm->sdma_queue_count, dqm->xgmi_sdma_queue_count);
-
- if (dqm->sdma_queue_count > 0 || dqm->xgmi_sdma_queue_count)
- unmap_sdma_queues(dqm);
-
retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
filter, filter_param, false, 0);
if (retval)
@@ -1427,18 +1419,15 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
deallocate_doorbell(qpd, q);
- if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
- dqm->sdma_queue_count--;
+ if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
deallocate_sdma_queue(dqm, q);
- } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
- dqm->xgmi_sdma_queue_count--;
+ else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
deallocate_sdma_queue(dqm, q);
- }
list_del(&q->list);
qpd->queue_count--;
if (q->properties.is_active) {
- dqm->queue_count--;
+ decrement_queue_count(dqm, q->properties.type);
retval = execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
if (retval == -ETIME)
@@ -1648,7 +1637,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
/* Clean all kernel queues */
list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
list_del(&kq->list);
- dqm->queue_count--;
+ decrement_queue_count(dqm, kq->queue->properties.type);
qpd->is_debug = false;
dqm->total_queue_count--;
filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
@@ -1656,16 +1645,13 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
/* Clear all user mode queues */
list_for_each_entry(q, &qpd->queues_list, list) {
- if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
- dqm->sdma_queue_count--;
+ if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
deallocate_sdma_queue(dqm, q);
- } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
- dqm->xgmi_sdma_queue_count--;
+ else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
deallocate_sdma_queue(dqm, q);
- }
if (q->properties.is_active)
- dqm->queue_count--;
+ decrement_queue_count(dqm, q->properties.type);
dqm->total_queue_count--;
}
@@ -1742,14 +1728,13 @@ static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
struct kfd_dev *dev = dqm->dev;
struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd;
uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size *
- (dev->device_info->num_sdma_engines +
- dev->device_info->num_xgmi_sdma_engines) *
+ get_num_all_sdma_engines(dqm) *
dev->device_info->num_sdma_queues_per_engine +
dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size;
retval = amdgpu_amdkfd_alloc_gtt_mem(dev->kgd, size,
&(mem_obj->gtt_mem), &(mem_obj->gpu_addr),
- (void *)&(mem_obj->cpu_ptr), true);
+ (void *)&(mem_obj->cpu_ptr), false);
return retval;
}
@@ -1979,7 +1964,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) {
if (!test_bit(pipe_offset + queue,
- dqm->dev->shared_resources.queue_bitmap))
+ dqm->dev->shared_resources.cp_queue_bitmap))
continue;
r = dqm->dev->kfd2kgd->hqd_dump(
@@ -1995,8 +1980,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
}
}
- for (pipe = 0; pipe < get_num_sdma_engines(dqm) +
- get_num_xgmi_sdma_engines(dqm); pipe++) {
+ for (pipe = 0; pipe < get_num_all_sdma_engines(dqm); pipe++) {
for (queue = 0;
queue < dqm->dev->device_info->num_sdma_queues_per_engine;
queue++) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 871d3b628d2d..50d919f814e9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -180,9 +180,8 @@ struct device_queue_manager {
struct list_head queues;
unsigned int saved_flags;
unsigned int processes_count;
- unsigned int queue_count;
- unsigned int sdma_queue_count;
- unsigned int xgmi_sdma_queue_count;
+ unsigned int active_queue_count;
+ unsigned int active_cp_queue_count;
unsigned int total_queue_count;
unsigned int next_pipe_to_allocate;
unsigned int *allocated_queues;
@@ -219,7 +218,7 @@ void device_queue_manager_init_v10_navi10(
struct device_queue_manager_asic_ops *asic_ops);
void program_sh_mem_settings(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
-unsigned int get_queues_num(struct device_queue_manager *dqm);
+unsigned int get_cp_queues_num(struct device_queue_manager *dqm);
unsigned int get_queues_per_pipe(struct device_queue_manager *dqm);
unsigned int get_pipes_per_mec(struct device_queue_manager *dqm);
unsigned int get_num_sdma_queues(struct device_queue_manager *dqm);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 1f8365575b12..15476fca8fa6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -187,7 +187,7 @@ static int create_signal_event(struct file *devkfd,
if (p->signal_mapped_size &&
p->signal_event_count == p->signal_mapped_size / 8) {
if (!p->signal_event_limit_reached) {
- pr_warn("Signal event wasn't created because limit was reached\n");
+ pr_debug("Signal event wasn't created because limit was reached\n");
p->signal_event_limit_reached = true;
}
return -ENOSPC;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index bb77b8890e77..78714f9a8b11 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -316,7 +316,7 @@ static void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id)
{
/*
* node id couldn't be 0 - the three MSB bits of
- * aperture shoudn't be 0
+ * aperture shouldn't be 0
*/
pdd->lds_base = MAKE_LDS_APP_BASE_VI();
pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index 436b7f518979..48cda3073b70 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -87,9 +87,21 @@ static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
int retval;
struct kfd_mem_obj *mqd_mem_obj = NULL;
- /* From V9, for CWSR, the control stack is located on the next page
- * boundary after the mqd, we will use the gtt allocation function
- * instead of sub-allocation function.
+ /* For V9 only, due to a HW bug, the control stack of a user mode
+ * compute queue needs to be allocated just behind the page boundary
+ * of its regular MQD buffer. So we allocate an enlarged MQD buffer:
+ * the first page of the buffer serves as the regular MQD buffer
+ * purpose and the remaining is for control stack. Although the two
+ * parts are in the same buffer object, they need different memory
+ * types: MQD part needs UC (uncached) as usual, while control stack
+ * needs NC (non coherent), which is different from the UC type which
+ * is used when control stack is allocated in user space.
+ *
+ * Because of all those, we use the gtt allocation function instead
+ * of sub-allocation function for this enlarged MQD buffer. Moreover,
+ * in order to achieve two memory types in a single buffer object, we
+ * pass a special bo flag AMDGPU_GEM_CREATE_CP_MQD_GFX9 to instruct
+ * amdgpu memory functions to do so.
*/
if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) {
mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index dc406e6dee23..efdb75e7677b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -47,9 +47,8 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
struct kfd_dev *dev = pm->dqm->dev;
process_count = pm->dqm->processes_count;
- queue_count = pm->dqm->queue_count;
- compute_queue_count = queue_count - pm->dqm->sdma_queue_count -
- pm->dqm->xgmi_sdma_queue_count;
+ queue_count = pm->dqm->active_queue_count;
+ compute_queue_count = pm->dqm->active_cp_queue_count;
/* check if there is over subscription
* Note: the arbitration between the number of VMIDs and
@@ -62,7 +61,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
max_proc_per_quantum = dev->max_proc_per_quantum;
if ((process_count > max_proc_per_quantum) ||
- compute_queue_count > get_queues_num(pm->dqm)) {
+ compute_queue_count > get_cp_queues_num(pm->dqm)) {
*over_subscription = true;
pr_debug("Over subscribed runlist\n");
}
@@ -141,7 +140,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
pm->ib_size_bytes = alloc_size_bytes;
pr_debug("Building runlist ib process count: %d queues count %d\n",
- pm->dqm->processes_count, pm->dqm->queue_count);
+ pm->dqm->processes_count, pm->dqm->active_queue_count);
/* build the run list ib packet */
list_for_each_entry(cur, queues, list) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 6af1b5881f43..4a3049841086 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -41,6 +41,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_device.h>
#include <kgd_kfd_interface.h>
+#include <linux/swap.h>
#include "amd_shared.h"
@@ -293,6 +294,9 @@ struct kfd_dev {
/* xGMI */
uint64_t hive_id;
+
+ /* UUID */
+ uint64_t unique_id;
bool pci_atomic_requested;
@@ -502,6 +506,9 @@ struct queue {
struct kfd_process *process;
struct kfd_dev *device;
void *gws;
+
+ /* procfs */
+ struct kobject kobj;
};
/*
@@ -646,6 +653,7 @@ struct kfd_process_device {
* function.
*/
bool already_dequeued;
+ bool runtime_inuse;
/* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */
enum kfd_pdd_bound bound;
@@ -729,6 +737,7 @@ struct kfd_process {
/* Kobj for our procfs */
struct kobject *kobj;
+ struct kobject *kobj_queues;
struct attribute attr_pasid;
};
@@ -835,6 +844,8 @@ extern struct device *kfd_device;
/* KFD's procfs */
void kfd_procfs_init(void);
void kfd_procfs_shutdown(void);
+int kfd_procfs_add_queue(struct queue *q);
+void kfd_procfs_del_queue(struct queue *q);
/* Topology */
int kfd_topology_init(void);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 25b90f70aecd..fe0cd49d4ea7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -31,6 +31,7 @@
#include <linux/compat.h>
#include <linux/mman.h>
#include <linux/file.h>
+#include <linux/pm_runtime.h>
#include "amdgpu_amdkfd.h"
#include "amdgpu.h"
@@ -132,6 +133,88 @@ void kfd_procfs_shutdown(void)
}
}
+static ssize_t kfd_procfs_queue_show(struct kobject *kobj,
+ struct attribute *attr, char *buffer)
+{
+ struct queue *q = container_of(kobj, struct queue, kobj);
+
+ if (!strcmp(attr->name, "size"))
+ return snprintf(buffer, PAGE_SIZE, "%llu",
+ q->properties.queue_size);
+ else if (!strcmp(attr->name, "type"))
+ return snprintf(buffer, PAGE_SIZE, "%d", q->properties.type);
+ else if (!strcmp(attr->name, "gpuid"))
+ return snprintf(buffer, PAGE_SIZE, "%u", q->device->id);
+ else
+ pr_err("Invalid attribute");
+
+ return 0;
+}
+
+static struct attribute attr_queue_size = {
+ .name = "size",
+ .mode = KFD_SYSFS_FILE_MODE
+};
+
+static struct attribute attr_queue_type = {
+ .name = "type",
+ .mode = KFD_SYSFS_FILE_MODE
+};
+
+static struct attribute attr_queue_gpuid = {
+ .name = "gpuid",
+ .mode = KFD_SYSFS_FILE_MODE
+};
+
+static struct attribute *procfs_queue_attrs[] = {
+ &attr_queue_size,
+ &attr_queue_type,
+ &attr_queue_gpuid,
+ NULL
+};
+
+static const struct sysfs_ops procfs_queue_ops = {
+ .show = kfd_procfs_queue_show,
+};
+
+static struct kobj_type procfs_queue_type = {
+ .sysfs_ops = &procfs_queue_ops,
+ .default_attrs = procfs_queue_attrs,
+};
+
+int kfd_procfs_add_queue(struct queue *q)
+{
+ struct kfd_process *proc;
+ int ret;
+
+ if (!q || !q->process)
+ return -EINVAL;
+ proc = q->process;
+
+ /* Create proc/<pid>/queues/<queue id> folder */
+ if (!proc->kobj_queues)
+ return -EFAULT;
+ ret = kobject_init_and_add(&q->kobj, &procfs_queue_type,
+ proc->kobj_queues, "%u", q->properties.queue_id);
+ if (ret < 0) {
+ pr_warn("Creating proc/<pid>/queues/%u failed",
+ q->properties.queue_id);
+ kobject_put(&q->kobj);
+ return ret;
+ }
+
+ return 0;
+}
+
+void kfd_procfs_del_queue(struct queue *q)
+{
+ if (!q)
+ return;
+
+ kobject_del(&q->kobj);
+ kobject_put(&q->kobj);
+}
+
int kfd_process_create_wq(void)
{
if (!kfd_process_wq)
@@ -244,10 +327,10 @@ err_alloc_mem:
static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
{
struct qcm_process_device *qpd = &pdd->qpd;
- uint32_t flags = ALLOC_MEM_FLAGS_GTT |
- ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
- ALLOC_MEM_FLAGS_WRITABLE |
- ALLOC_MEM_FLAGS_EXECUTABLE;
+ uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT |
+ KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
+ KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE |
+ KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
void *kaddr;
int ret;
@@ -323,6 +406,11 @@ struct kfd_process *kfd_create_process(struct file *filep)
if (ret)
pr_warn("Creating pasid for pid %d failed",
(int)process->lead_thread->pid);
+
+ process->kobj_queues = kobject_create_and_add("queues",
+ process->kobj);
+ if (!process->kobj_queues)
+ pr_warn("Creating KFD proc/queues folder failed");
}
out:
if (!IS_ERR(process))
@@ -440,6 +528,16 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
kfree(pdd->qpd.doorbell_bitmap);
idr_destroy(&pdd->alloc_idr);
+ /*
+ * before destroying pdd, make sure to report availability
+ * for auto suspend
+ */
+ if (pdd->runtime_inuse) {
+ pm_runtime_mark_last_busy(pdd->dev->ddev->dev);
+ pm_runtime_put_autosuspend(pdd->dev->ddev->dev);
+ pdd->runtime_inuse = false;
+ }
+
kfree(pdd);
}
}
@@ -457,6 +555,9 @@ static void kfd_process_wq_release(struct work_struct *work)
/* Remove the procfs files */
if (p->kobj) {
sysfs_remove_file(p->kobj, &p->attr_pasid);
+ kobject_del(p->kobj_queues);
+ kobject_put(p->kobj_queues);
+ p->kobj_queues = NULL;
kobject_del(p->kobj);
kobject_put(p->kobj);
p->kobj = NULL;
@@ -540,6 +641,11 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
/* Indicate to other users that MM is no longer valid */
p->mm = NULL;
+ /* Signal the eviction fence after user mode queues are
+ * destroyed. This allows any BOs to be freed without
+ * triggering pointless evictions or waiting for fences.
+ */
+ dma_fence_signal(p->ef);
mutex_unlock(&p->mutex);
@@ -591,8 +697,9 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
{
struct kfd_dev *dev = pdd->dev;
struct qcm_process_device *qpd = &pdd->qpd;
- uint32_t flags = ALLOC_MEM_FLAGS_GTT |
- ALLOC_MEM_FLAGS_NO_SUBSTITUTE | ALLOC_MEM_FLAGS_EXECUTABLE;
+ uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT
+ | KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE
+ | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
void *kaddr;
int ret;
@@ -754,6 +861,7 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
pdd->process = p;
pdd->bound = PDD_UNBOUND;
pdd->already_dequeued = false;
+ pdd->runtime_inuse = false;
list_add(&pdd->per_device_list, &p->per_device_data);
/* Init idr used for memory handle translation */
@@ -843,15 +951,41 @@ struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
return ERR_PTR(-ENOMEM);
}
+ /*
+ * signal runtime-pm system to auto resume and prevent
+ * further runtime suspend once device pdd is created until
+ * pdd is destroyed.
+ */
+ if (!pdd->runtime_inuse) {
+ err = pm_runtime_get_sync(dev->ddev->dev);
+ if (err < 0)
+ return ERR_PTR(err);
+ }
+
err = kfd_iommu_bind_process_to_device(pdd);
if (err)
- return ERR_PTR(err);
+ goto out;
err = kfd_process_device_init_vm(pdd, NULL);
if (err)
- return ERR_PTR(err);
+ goto out;
+
+ /*
+ * make sure that runtime_usage counter is incremented just once
+ * per pdd
+ */
+ pdd->runtime_inuse = true;
return pdd;
+
+out:
+ /* balance runpm reference count and exit with error */
+ if (!pdd->runtime_inuse) {
+ pm_runtime_mark_last_busy(dev->ddev->dev);
+ pm_runtime_put_autosuspend(dev->ddev->dev);
+ }
+
+ return ERR_PTR(err);
}
struct kfd_process_device *kfd_get_first_process_device_data(
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 31fcd1b51f00..084c35f55d59 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -241,23 +241,18 @@ int pqm_create_queue(struct process_queue_manager *pqm,
switch (type) {
case KFD_QUEUE_TYPE_SDMA:
case KFD_QUEUE_TYPE_SDMA_XGMI:
- if ((type == KFD_QUEUE_TYPE_SDMA && dev->dqm->sdma_queue_count
- >= get_num_sdma_queues(dev->dqm)) ||
- (type == KFD_QUEUE_TYPE_SDMA_XGMI &&
- dev->dqm->xgmi_sdma_queue_count
- >= get_num_xgmi_sdma_queues(dev->dqm))) {
- pr_debug("Over-subscription is not allowed for SDMA.\n");
- retval = -EPERM;
- goto err_create_queue;
- }
-
+ /* SDMA queues are always allocated statically no matter
+ * which scheduler mode is used. We also do not need to
+ * check whether a SDMA queue can be allocated here, because
+ * allocate_sdma_queue() in create_queue() has the
+ * corresponding check logic.
+ */
retval = init_user_queue(pqm, dev, &q, properties, f, *qid);
if (retval != 0)
goto err_create_queue;
pqn->q = q;
pqn->kq = NULL;
retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd);
- pr_debug("DQM returned %d for create_queue\n", retval);
print_queue(q);
break;
@@ -266,7 +261,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
if ((dev->dqm->sched_policy ==
KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
((dev->dqm->processes_count >= dev->vm_info.vmid_num_kfd) ||
- (dev->dqm->queue_count >= get_queues_num(dev->dqm)))) {
+ (dev->dqm->active_queue_count >= get_cp_queues_num(dev->dqm)))) {
pr_debug("Over-subscription is not allowed when amdkfd.sched_policy == 1\n");
retval = -EPERM;
goto err_create_queue;
@@ -278,7 +273,6 @@ int pqm_create_queue(struct process_queue_manager *pqm,
pqn->q = q;
pqn->kq = NULL;
retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd);
- pr_debug("DQM returned %d for create_queue\n", retval);
print_queue(q);
break;
case KFD_QUEUE_TYPE_DIQ:
@@ -299,7 +293,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
}
if (retval != 0) {
- pr_err("Pasid 0x%x DQM create queue %d failed. ret %d\n",
+ pr_err("Pasid 0x%x DQM create queue type %d failed. ret %d\n",
pqm->process->pasid, type, retval);
goto err_create_queue;
}
@@ -322,12 +316,16 @@ int pqm_create_queue(struct process_queue_manager *pqm,
if (q) {
pr_debug("PQM done creating queue\n");
+ kfd_procfs_add_queue(q);
print_queue_properties(&q->properties);
}
return retval;
err_create_queue:
+ uninit_queue(q);
+ if (kq)
+ kernel_queue_uninit(kq, false);
kfree(pqn);
err_allocate_pqn:
/* check if queues list is empty unregister process from device */
@@ -378,6 +376,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
}
if (pqn->q) {
+ kfd_procfs_del_queue(pqn->q);
dqm = pqn->q->device->dqm;
retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
if (retval) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 203c823d65f1..aa0bfa78a667 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -490,6 +490,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->node_props.num_sdma_queues_per_engine);
sysfs_show_32bit_prop(buffer, "num_cp_queues",
dev->node_props.num_cp_queues);
+ sysfs_show_64bit_prop(buffer, "unique_id",
+ dev->node_props.unique_id);
if (dev->gpu) {
log_max_watch_addr =
@@ -1318,7 +1320,8 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
dev->node_props.num_gws = (hws_gws_support &&
dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ?
amdgpu_amdkfd_get_num_gws(dev->gpu->kgd) : 0;
- dev->node_props.num_cp_queues = get_queues_num(dev->gpu->dqm);
+ dev->node_props.num_cp_queues = get_cp_queues_num(dev->gpu->dqm);
+ dev->node_props.unique_id = gpu->unique_id;
kfd_fill_mem_clk_max_info(dev);
kfd_fill_iolink_non_crat_info(dev);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
index 74e9b1682af8..46eeecaf1b68 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -54,6 +54,7 @@
struct kfd_node_properties {
uint64_t hive_id;
+ uint64_t unique_id;
uint32_t cpu_cores_count;
uint32_t simd_count;
uint32_t mem_banks_count;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 279541517a99..a4256780e70e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -98,6 +98,9 @@ MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
+#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
+MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
+
/* Number of bytes in PSP header for firmware. */
#define PSP_HEADER_BYTES 0x100
@@ -129,9 +132,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
/* removes and deallocates the drm structures, created by the above function */
static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
-static void
-amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
-
static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
struct drm_plane *plane,
unsigned long possible_crtcs,
@@ -383,8 +383,8 @@ static void dm_pflip_high_irq(void *interrupt_params)
* of pageflip completion, so last_flip_vblank is the forbidden count
* for queueing new pageflips if vsync + VRR is enabled.
*/
- amdgpu_crtc->last_flip_vblank = amdgpu_get_vblank_counter_kms(adev->ddev,
- amdgpu_crtc->crtc_id);
+ amdgpu_crtc->last_flip_vblank =
+ amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
@@ -407,8 +407,9 @@ static void dm_vupdate_high_irq(void *interrupt_params)
if (acrtc) {
acrtc_state = to_dm_crtc_state(acrtc->base.state);
- DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
- amdgpu_dm_vrr_active(acrtc_state));
+ DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
+ acrtc->crtc_id,
+ amdgpu_dm_vrr_active(acrtc_state));
/* Core vblank handling is done here after end of front-porch in
* vrr mode, as vblank timestamping will give valid results
@@ -458,8 +459,9 @@ static void dm_crtc_high_irq(void *interrupt_params)
if (acrtc) {
acrtc_state = to_dm_crtc_state(acrtc->base.state);
- DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
- amdgpu_dm_vrr_active(acrtc_state));
+ DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
+ acrtc->crtc_id,
+ amdgpu_dm_vrr_active(acrtc_state));
/* Core vblank handling at start of front-porch is only possible
* in non-vrr mode, as only there vblank timestamping will give
@@ -522,8 +524,8 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params)
acrtc_state = to_dm_crtc_state(acrtc->base.state);
- DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
- amdgpu_dm_vrr_active(acrtc_state));
+ DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
+ amdgpu_dm_vrr_active(acrtc_state));
amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
drm_crtc_handle_vblank(&acrtc->base);
@@ -801,10 +803,20 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
- memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
- fw_inst_const_size);
+ /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
+ * amdgpu_ucode_init_single_fw will load dmub firmware
+ * fw_inst_const part to cw0; otherwise, the firmware back door load
+ * will be done by dm_dmub_hw_init
+ */
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+ memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
+ fw_inst_const_size);
+ }
+
memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data,
fw_bss_data_size);
+
+ /* Copy firmware bios info into FB memory. */
memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
adev->bios_size);
@@ -823,6 +835,10 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
hw_params.fb_base = adev->gmc.fb_start;
hw_params.fb_offset = adev->gmc.aper_base;
+ /* backdoor load firmware and trigger dmub running */
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
+ hw_params.load_inst_const = true;
+
if (dmcu)
hw_params.psp_version = dmcu->psp_version;
@@ -885,7 +901,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
init_data.asic_id.chip_family = adev->family;
- init_data.asic_id.pci_revision_id = adev->rev_id;
+ init_data.asic_id.pci_revision_id = adev->pdev->revision;
init_data.asic_id.hw_internal_rev = adev->external_rev_id;
init_data.asic_id.vram_width = adev->gmc.vram_width;
@@ -960,7 +976,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
#ifdef CONFIG_DRM_AMD_DC_HDCP
if (adev->asic_type >= CHIP_RAVEN) {
- adev->dm.hdcp_workqueue = hdcp_create_workqueue(&adev->psp, &init_params.cp_psp, adev->dm.dc);
+ adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
if (!adev->dm.hdcp_workqueue)
DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
@@ -991,11 +1007,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
goto error;
}
-#if defined(CONFIG_DEBUG_FS)
- if (dtn_debugfs_init(adev))
- DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n");
-#endif
-
DRM_DEBUG_DRIVER("KMS initialized.\n");
return 0;
@@ -1079,9 +1090,11 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
case CHIP_VEGA20:
case CHIP_NAVI10:
case CHIP_NAVI14:
- case CHIP_NAVI12:
case CHIP_RENOIR:
return 0;
+ case CHIP_NAVI12:
+ fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
+ break;
case CHIP_RAVEN:
if (ASICREV_IS_PICASSO(adev->external_rev_id))
fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
@@ -1192,22 +1205,21 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
return 0;
}
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
- DRM_WARN("Only PSP firmware loading is supported for DMUB\n");
- return 0;
- }
-
hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
- adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
- AMDGPU_UCODE_ID_DMCUB;
- adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw = adev->dm.dmub_fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
- adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
+ AMDGPU_UCODE_ID_DMCUB;
+ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
+ adev->dm.dmub_fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
- DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
- adev->dm.dmcub_fw_version);
+ DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
+ adev->dm.dmcub_fw_version);
+ }
+
+ adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
dmub_srv = adev->dm.dmub_srv;
@@ -1422,6 +1434,73 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
drm_kms_helper_hotplug_event(dev);
}
+static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
+{
+ struct smu_context *smu = &adev->smu;
+ int ret = 0;
+
+ if (!is_support_sw_smu(adev))
+ return 0;
+
+ /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
+ * on window driver dc implementation.
+ * For Navi1x, clock settings of dcn watermarks are fixed. the settings
+ * should be passed to smu during boot up and resume from s3.
+ * boot up: dc calculate dcn watermark clock settings within dc_create,
+ * dcn20_resource_construct
+ * then call pplib functions below to pass the settings to smu:
+ * smu_set_watermarks_for_clock_ranges
+ * smu_set_watermarks_table
+ * navi10_set_watermarks_table
+ * smu_write_watermarks_table
+ *
+ * For Renoir, clock settings of dcn watermark are also fixed values.
+ * dc has implemented different flow for window driver:
+ * dc_hardware_init / dc_set_power_state
+ * dcn10_init_hw
+ * notify_wm_ranges
+ * set_wm_ranges
+ * -- Linux
+ * smu_set_watermarks_for_clock_ranges
+ * renoir_set_watermarks_table
+ * smu_write_watermarks_table
+ *
+ * For Linux,
+ * dc_hardware_init -> amdgpu_dm_init
+ * dc_set_power_state --> dm_resume
+ *
+ * therefore, this function apply to navi10/12/14 but not Renoir
+ * *
+ */
+ switch(adev->asic_type) {
+ case CHIP_NAVI10:
+ case CHIP_NAVI14:
+ case CHIP_NAVI12:
+ break;
+ default:
+ return 0;
+ }
+
+ mutex_lock(&smu->mutex);
+
+ /* pass data to smu controller */
+ if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
+ !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
+ ret = smu_write_watermarks_table(smu);
+
+ if (ret) {
+ mutex_unlock(&smu->mutex);
+ DRM_ERROR("Failed to update WMTABLE!\n");
+ return ret;
+ }
+ smu->watermarks_bitmap |= WATERMARKS_LOADED;
+ }
+
+ mutex_unlock(&smu->mutex);
+
+ return 0;
+}
+
/**
* dm_hw_init() - Initialize DC device
* @handle: The base driver device containing the amdgpu_dm device.
@@ -1700,6 +1779,8 @@ static int dm_resume(void *handle)
amdgpu_dm_irq_resume_late(adev);
+ amdgpu_dm_smu_write_watermarks_table(adev);
+
return 0;
}
@@ -1758,8 +1839,63 @@ static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
};
-static void
-amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
+static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
+{
+ u32 max_cll, min_cll, max, min, q, r;
+ struct amdgpu_dm_backlight_caps *caps;
+ struct amdgpu_display_manager *dm;
+ struct drm_connector *conn_base;
+ struct amdgpu_device *adev;
+ static const u8 pre_computed_values[] = {
+ 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
+ 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
+
+ if (!aconnector || !aconnector->dc_link)
+ return;
+
+ conn_base = &aconnector->base;
+ adev = conn_base->dev->dev_private;
+ dm = &adev->dm;
+ caps = &dm->backlight_caps;
+ caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
+ caps->aux_support = false;
+ max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
+ min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
+
+ if (caps->ext_caps->bits.oled == 1 ||
+ caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
+ caps->ext_caps->bits.hdr_aux_backlight_control == 1)
+ caps->aux_support = true;
+
+ /* From the specification (CTA-861-G), for calculating the maximum
+ * luminance we need to use:
+ * Luminance = 50*2**(CV/32)
+ * Where CV is a one-byte value.
+ * For calculating this expression we may need float point precision;
+ * to avoid this complexity level, we take advantage that CV is divided
+ * by a constant. From the Euclids division algorithm, we know that CV
+ * can be written as: CV = 32*q + r. Next, we replace CV in the
+ * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
+ * need to pre-compute the value of r/32. For pre-computing the values
+ * We just used the following Ruby line:
+ * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
+ * The results of the above expressions can be verified at
+ * pre_computed_values.
+ */
+ q = max_cll >> 5;
+ r = max_cll % 32;
+ max = (1 << q) * pre_computed_values[r];
+
+ // min luminance: maxLum * (CV/255)^2 / 100
+ q = DIV_ROUND_CLOSEST(min_cll, 255);
+ min = max * DIV_ROUND_CLOSEST((q * q), 100);
+
+ caps->aux_max_input_signal = max;
+ caps->aux_min_input_signal = min;
+}
+
+void amdgpu_dm_update_connector_after_detect(
+ struct amdgpu_dm_connector *aconnector)
{
struct drm_connector *connector = &aconnector->base;
struct drm_device *dev = connector->dev;
@@ -1872,7 +2008,7 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
aconnector->edid);
}
amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
-
+ update_connector_ext_caps(aconnector);
} else {
drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
amdgpu_dm_update_freesync_caps(connector, NULL);
@@ -1911,7 +2047,7 @@ static void handle_hpd_irq(void *param)
mutex_lock(&aconnector->hpd_lock);
#ifdef CONFIG_DRM_AMD_DC_HDCP
- if (adev->asic_type >= CHIP_RAVEN)
+ if (adev->dm.hdcp_workqueue)
hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
#endif
if (aconnector->fake_enable)
@@ -2088,8 +2224,10 @@ static void handle_hpd_rx_irq(void *param)
}
}
#ifdef CONFIG_DRM_AMD_DC_HDCP
- if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ)
- hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
+ if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
+ if (adev->dm.hdcp_workqueue)
+ hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
+ }
#endif
if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
(dc_link->type == dc_connection_mst_branch))
@@ -2484,6 +2622,7 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
+#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
@@ -2498,9 +2637,11 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
if (caps.caps_valid) {
+ dm->backlight_caps.caps_valid = true;
+ if (caps.aux_support)
+ return;
dm->backlight_caps.min_input_signal = caps.min_input_signal;
dm->backlight_caps.max_input_signal = caps.max_input_signal;
- dm->backlight_caps.caps_valid = true;
} else {
dm->backlight_caps.min_input_signal =
AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
@@ -2508,40 +2649,95 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
}
#else
+ if (dm->backlight_caps.aux_support)
+ return;
+
dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
#endif
}
+static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
+{
+ bool rc;
+
+ if (!link)
+ return 1;
+
+ rc = dc_link_set_backlight_level_nits(link, true, brightness,
+ AUX_BL_DEFAULT_TRANSITION_TIME_MS);
+
+ return rc ? 0 : 1;
+}
+
+static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
+ const uint32_t user_brightness)
+{
+ u32 min, max, conversion_pace;
+ u32 brightness = user_brightness;
+
+ if (!caps)
+ goto out;
+
+ if (!caps->aux_support) {
+ max = caps->max_input_signal;
+ min = caps->min_input_signal;
+ /*
+ * The brightness input is in the range 0-255
+ * It needs to be rescaled to be between the
+ * requested min and max input signal
+ * It also needs to be scaled up by 0x101 to
+ * match the DC interface which has a range of
+ * 0 to 0xffff
+ */
+ conversion_pace = 0x101;
+ brightness =
+ user_brightness
+ * conversion_pace
+ * (max - min)
+ / AMDGPU_MAX_BL_LEVEL
+ + min * conversion_pace;
+ } else {
+ /* TODO
+ * We are doing a linear interpolation here, which is OK but
+ * does not provide the optimal result. We probably want
+ * something close to the Perceptual Quantizer (PQ) curve.
+ */
+ max = caps->aux_max_input_signal;
+ min = caps->aux_min_input_signal;
+
+ brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
+ + user_brightness * max;
+ // Multiple the value by 1000 since we use millinits
+ brightness *= 1000;
+ brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
+ }
+
+out:
+ return brightness;
+}
+
static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
{
struct amdgpu_display_manager *dm = bl_get_data(bd);
struct amdgpu_dm_backlight_caps caps;
- uint32_t brightness = bd->props.brightness;
+ struct dc_link *link = NULL;
+ u32 brightness;
+ bool rc;
amdgpu_dm_update_backlight_caps(dm);
caps = dm->backlight_caps;
- /*
- * The brightness input is in the range 0-255
- * It needs to be rescaled to be between the
- * requested min and max input signal
- *
- * It also needs to be scaled up by 0x101 to
- * match the DC interface which has a range of
- * 0 to 0xffff
- */
- brightness =
- brightness
- * 0x101
- * (caps.max_input_signal - caps.min_input_signal)
- / AMDGPU_MAX_BL_LEVEL
- + caps.min_input_signal * 0x101;
-
- if (dc_link_set_backlight_level(dm->backlight_link,
- brightness, 0))
- return 0;
- else
- return 1;
+
+ link = (struct dc_link *)dm->backlight_link;
+
+ brightness = convert_brightness(&caps, bd->props.brightness);
+ // Change brightness based on AUX property
+ if (caps.aux_support)
+ return set_backlight_via_aux(link, brightness);
+
+ rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
+
+ return rc ? 0 : 1;
}
static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
@@ -2826,6 +3022,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
+ /* No userspace support. */
+ dm->dc->debug.disable_tri_buf = true;
+
return 0;
fail:
kfree(aencoder);
@@ -4117,9 +4316,22 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
struct dmcu *dmcu = core_dc->res_pool->dmcu;
stream->psr_version = dmcu->dmcu_version.psr_version;
- mod_build_vsc_infopacket(stream,
- &stream->vsc_infopacket,
- &stream->use_vsc_sdp_for_colorimetry);
+
+ //
+ // should decide stream support vsc sdp colorimetry capability
+ // before building vsc info packet
+ //
+ stream->use_vsc_sdp_for_colorimetry = false;
+ if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ stream->use_vsc_sdp_for_colorimetry =
+ aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
+ } else {
+ if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
+ stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
+ stream->use_vsc_sdp_for_colorimetry = true;
+ }
+ }
+ mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
}
}
finish:
@@ -4269,8 +4481,10 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
.set_crc_source = amdgpu_dm_crtc_set_crc_source,
.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
+ .get_vblank_counter = amdgpu_get_vblank_counter_kms,
.enable_vblank = dm_enable_vblank,
.disable_vblank = dm_disable_vblank,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static enum drm_connector_status
@@ -4491,6 +4705,19 @@ amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
return &new_state->base;
}
+static int
+amdgpu_dm_connector_late_register(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+
+#if defined(CONFIG_DEBUG_FS)
+ connector_debugfs_init(amdgpu_dm_connector);
+#endif
+
+ return 0;
+}
+
static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
.reset = amdgpu_dm_connector_funcs_reset,
.detect = amdgpu_dm_connector_detect,
@@ -4500,6 +4727,7 @@ static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
+ .late_register = amdgpu_dm_connector_late_register,
.early_unregister = amdgpu_dm_connector_unregister
};
@@ -4876,7 +5104,8 @@ static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
.disable = dm_crtc_helper_disable,
.atomic_check = dm_crtc_helper_atomic_check,
- .mode_fixup = dm_crtc_helper_mode_fixup
+ .mode_fixup = dm_crtc_helper_mode_fixup,
+ .get_scanout_position = amdgpu_crtc_get_scanout_position,
};
static void dm_encoder_helper_disable(struct drm_encoder *encoder)
@@ -5702,7 +5931,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
drm_connector_attach_vrr_capable_property(
&aconnector->base);
#ifdef CONFIG_DRM_AMD_DC_HDCP
- if (adev->asic_type >= CHIP_RAVEN)
+ if (adev->dm.hdcp_workqueue)
drm_connector_attach_content_protection_property(&aconnector->base, true);
#endif
}
@@ -5839,13 +6068,6 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
drm_connector_attach_encoder(
&aconnector->base, &aencoder->base);
- drm_connector_register(&aconnector->base);
-#if defined(CONFIG_DEBUG_FS)
- connector_debugfs_init(aconnector);
- aconnector->debugfs_dpcd_address = 0;
- aconnector->debugfs_dpcd_size = 0;
-#endif
-
if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
|| connector_type == DRM_MODE_CONNECTOR_eDP)
amdgpu_dm_initialize_dp_connector(dm, aconnector);
@@ -6331,7 +6553,6 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
uint32_t target_vblank, last_flip_vblank;
bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
bool pflip_present = false;
- bool swizzle = true;
struct {
struct dc_surface_update surface_updates[MAX_SURFACES];
struct dc_plane_info plane_infos[MAX_SURFACES];
@@ -6377,9 +6598,6 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
dc_plane = dm_new_plane_state->dc_state;
- if (dc_plane && !dc_plane->tiling_info.gfx9.swizzle)
- swizzle = false;
-
bundle->surface_updates[planes_count].surface = dc_plane;
if (new_pcrtc_state->color_mgmt_changed) {
bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
@@ -6480,7 +6698,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* clients using the GLX_OML_sync_control extension or
* DRI3/Present extension with defined target_msc.
*/
- last_flip_vblank = amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id);
+ last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
}
else {
/* For variable refresh rate mode only:
@@ -6509,7 +6727,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
& (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
(int)(target_vblank -
- amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id)) > 0)) {
+ amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
usleep_range(1000, 1100);
}
@@ -6588,8 +6806,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
amdgpu_dm_link_setup_psr(acrtc_state->stream);
else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
acrtc_state->stream->link->psr_feature_enabled &&
- !acrtc_state->stream->link->psr_allow_active &&
- swizzle) {
+ !acrtc_state->stream->link->psr_allow_active) {
amdgpu_dm_psr_enable(acrtc_state->stream);
}
@@ -8408,7 +8625,6 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
/* Calculate number of static frames before generating interrupt to
* enter PSR.
*/
- unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
// Init fail safe of 2 frames static
unsigned int num_frames_static = 2;
@@ -8423,8 +8639,10 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
* Calculate number of frames such that at least 30 ms of time has
* passed.
*/
- if (vsync_rate_hz != 0)
+ if (vsync_rate_hz != 0) {
+ unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
num_frames_static = (30000 / frame_time_microsec) + 1;
+ }
params.triggers.cursor_update = true;
params.triggers.overlay_update = true;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 7ea9acb0358d..5cab3e65d992 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -90,15 +90,41 @@ struct dm_comressor_info {
};
/**
- * struct amdgpu_dm_backlight_caps - Usable range of backlight values from ACPI
- * @min_input_signal: minimum possible input in range 0-255
- * @max_input_signal: maximum possible input in range 0-255
- * @caps_valid: true if these values are from the ACPI interface
+ * struct amdgpu_dm_backlight_caps - Information about backlight
+ *
+ * Describe the backlight support for ACPI or eDP AUX.
*/
struct amdgpu_dm_backlight_caps {
+ /**
+ * @ext_caps: Keep the data struct with all the information about the
+ * display support for HDR.
+ */
+ union dpcd_sink_ext_caps *ext_caps;
+ /**
+ * @aux_min_input_signal: Min brightness value supported by the display
+ */
+ u32 aux_min_input_signal;
+ /**
+ * @aux_max_input_signal: Max brightness value supported by the display
+ * in nits.
+ */
+ u32 aux_max_input_signal;
+ /**
+ * @min_input_signal: minimum possible input in range 0-255.
+ */
int min_input_signal;
+ /**
+ * @max_input_signal: maximum possible input in range 0-255.
+ */
int max_input_signal;
+ /**
+ * @caps_valid: true if these values are from the ACPI interface.
+ */
bool caps_valid;
+ /**
+ * @aux_support: Describes if the display supports AUX backlight.
+ */
+ bool aux_support;
};
/**
@@ -457,6 +483,9 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc);
int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
struct dc_plane_state *dc_plane_state);
+void amdgpu_dm_update_connector_after_detect(
+ struct amdgpu_dm_connector *aconnector);
+
extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs;
#endif /* __AMDGPU_DM_H__ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index f81d3439ee8c..0461fecd68db 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -32,6 +32,19 @@
#include "amdgpu_dm.h"
#include "amdgpu_dm_debugfs.h"
#include "dm_helpers.h"
+#include "dmub/inc/dmub_srv.h"
+
+struct dmub_debugfs_trace_header {
+ uint32_t entry_count;
+ uint32_t reserved[3];
+};
+
+struct dmub_debugfs_trace_entry {
+ uint32_t trace_code;
+ uint32_t tick_count;
+ uint32_t param0;
+ uint32_t param1;
+};
/* function description
* get/ set DP configuration: lane_count, link_rate, spread_spectrum
@@ -675,6 +688,73 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
return bytes_from_user;
}
+/**
+ * Returns the DMCUB tracebuffer contents.
+ * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dmub_tracebuffer
+ */
+static int dmub_tracebuffer_show(struct seq_file *m, void *data)
+{
+ struct amdgpu_device *adev = m->private;
+ struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
+ struct dmub_debugfs_trace_entry *entries;
+ uint8_t *tbuf_base;
+ uint32_t tbuf_size, max_entries, num_entries, i;
+
+ if (!fb_info)
+ return 0;
+
+ tbuf_base = (uint8_t *)fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr;
+ if (!tbuf_base)
+ return 0;
+
+ tbuf_size = fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size;
+ max_entries = (tbuf_size - sizeof(struct dmub_debugfs_trace_header)) /
+ sizeof(struct dmub_debugfs_trace_entry);
+
+ num_entries =
+ ((struct dmub_debugfs_trace_header *)tbuf_base)->entry_count;
+
+ num_entries = min(num_entries, max_entries);
+
+ entries = (struct dmub_debugfs_trace_entry
+ *)(tbuf_base +
+ sizeof(struct dmub_debugfs_trace_header));
+
+ for (i = 0; i < num_entries; ++i) {
+ struct dmub_debugfs_trace_entry *entry = &entries[i];
+
+ seq_printf(m,
+ "trace_code=%u tick_count=%u param0=%u param1=%u\n",
+ entry->trace_code, entry->tick_count, entry->param0,
+ entry->param1);
+ }
+
+ return 0;
+}
+
+/**
+ * Returns the DMCUB firmware state contents.
+ * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dmub_fw_state
+ */
+static int dmub_fw_state_show(struct seq_file *m, void *data)
+{
+ struct amdgpu_device *adev = m->private;
+ struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
+ uint8_t *state_base;
+ uint32_t state_size;
+
+ if (!fb_info)
+ return 0;
+
+ state_base = (uint8_t *)fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr;
+ if (!state_base)
+ return 0;
+
+ state_size = fb_info->fb[DMUB_WINDOW_6_FW_STATE].size;
+
+ return seq_write(m, state_base, state_size);
+}
+
/*
* Returns the current and maximum output bpc for the connector.
* Example usage: cat /sys/kernel/debug/dri/0/DP-1/output_bpc
@@ -880,6 +960,8 @@ static ssize_t dp_dpcd_data_read(struct file *f, char __user *buf,
return read_size - r;
}
+DEFINE_SHOW_ATTRIBUTE(dmub_fw_state);
+DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer);
DEFINE_SHOW_ATTRIBUTE(output_bpc);
DEFINE_SHOW_ATTRIBUTE(vrr_range);
@@ -1008,6 +1090,9 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
debugfs_create_file_unsafe("force_yuv420_output", 0644, dir, connector,
&force_yuv420_output_fops);
+ connector->debugfs_dpcd_address = 0;
+ connector->debugfs_dpcd_size = 0;
+
}
/*
@@ -1188,5 +1273,11 @@ int dtn_debugfs_init(struct amdgpu_device *adev)
debugfs_create_file_unsafe("amdgpu_dm_visual_confirm", 0644, root, adev,
&visual_confirm_fops);
+ debugfs_create_file_unsafe("amdgpu_dm_dmub_tracebuffer", 0644, root,
+ adev, &dmub_tracebuffer_fops);
+
+ debugfs_create_file_unsafe("amdgpu_dm_dmub_fw_state", 0644, root,
+ adev, &dmub_fw_state_fops);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index 0acd3409dd6c..5b70ed3cdb88 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -28,6 +28,13 @@
#include "amdgpu_dm.h"
#include "dm_helpers.h"
#include <drm/drm_hdcp.h>
+#include "hdcp_psp.h"
+
+/*
+ * If the SRM version being loaded is less than or equal to the
+ * currently loaded SRM, psp will return 0xFFFF as the version
+ */
+#define PSP_SRM_VERSION_MAX 0xFFFF
static bool
lp_write_i2c(void *handle, uint32_t address, const uint8_t *data, uint32_t size)
@@ -67,6 +74,59 @@ lp_read_dpcd(void *handle, uint32_t address, uint8_t *data, uint32_t size)
return dm_helpers_dp_read_dpcd(link->ctx, link, address, data, size);
}
+static uint8_t *psp_get_srm(struct psp_context *psp, uint32_t *srm_version, uint32_t *srm_size)
+{
+
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+
+ if (!psp->hdcp_context.hdcp_initialized) {
+ DRM_WARN("Failed to get hdcp srm. HDCP TA is not initialized.");
+ return NULL;
+ }
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP_GET_SRM;
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return NULL;
+
+ *srm_version = hdcp_cmd->out_msg.hdcp_get_srm.srm_version;
+ *srm_size = hdcp_cmd->out_msg.hdcp_get_srm.srm_buf_size;
+
+
+ return hdcp_cmd->out_msg.hdcp_get_srm.srm_buf;
+}
+
+static int psp_set_srm(struct psp_context *psp, uint8_t *srm, uint32_t srm_size, uint32_t *srm_version)
+{
+
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+
+ if (!psp->hdcp_context.hdcp_initialized) {
+ DRM_WARN("Failed to get hdcp srm. HDCP TA is not initialized.");
+ return -EINVAL;
+ }
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ memcpy(hdcp_cmd->in_msg.hdcp_set_srm.srm_buf, srm, srm_size);
+ hdcp_cmd->in_msg.hdcp_set_srm.srm_buf_size = srm_size;
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP_SET_SRM;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS || hdcp_cmd->out_msg.hdcp_set_srm.valid_signature != 1 ||
+ hdcp_cmd->out_msg.hdcp_set_srm.srm_version == PSP_SRM_VERSION_MAX)
+ return -EINVAL;
+
+ *srm_version = hdcp_cmd->out_msg.hdcp_set_srm.srm_version;
+ return 0;
+}
+
static void process_output(struct hdcp_workqueue *hdcp_work)
{
struct mod_hdcp_output output = hdcp_work->output;
@@ -88,6 +148,18 @@ static void process_output(struct hdcp_workqueue *hdcp_work)
schedule_delayed_work(&hdcp_work->property_validate_dwork, msecs_to_jiffies(0));
}
+static void link_lock(struct hdcp_workqueue *work, bool lock)
+{
+
+ int i = 0;
+
+ for (i = 0; i < work->max_link; i++) {
+ if (lock)
+ mutex_lock(&work[i].mutex);
+ else
+ mutex_unlock(&work[i].mutex);
+ }
+}
void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
unsigned int link_index,
struct amdgpu_dm_connector *aconnector,
@@ -112,6 +184,13 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
if (enable_encryption) {
+ /* Explicitly set the saved SRM as sysfs call will be after we already enabled hdcp
+ * (s3 resume case)
+ */
+ if (hdcp_work->srm_size > 0)
+ psp_set_srm(hdcp_work->hdcp.config.psp.handle, hdcp_work->srm, hdcp_work->srm_size,
+ &hdcp_work->srm_version);
+
display->adjust.disable = 0;
if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0)
hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
@@ -301,8 +380,9 @@ void hdcp_destroy(struct hdcp_workqueue *hdcp_work)
cancel_delayed_work_sync(&hdcp_work[i].watchdog_timer_dwork);
}
+ kfree(hdcp_work->srm);
+ kfree(hdcp_work->srm_temp);
kfree(hdcp_work);
-
}
static void update_config(void *handle, struct cp_psp_stream_config *config)
@@ -332,26 +412,170 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
link->dig_be = config->link_enc_inst;
link->ddc_line = aconnector->dc_link->ddc_hw_inst + 1;
link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw;
+ link->dp.mst_supported = config->mst_supported;
display->adjust.disable = 1;
link->adjust.auth_delay = 2;
hdcp_update_display(hdcp_work, link_index, aconnector, DRM_MODE_HDCP_CONTENT_TYPE0, false);
}
-struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *cp_psp, struct dc *dc)
+
+/* NOTE: From the usermodes prospective you only need to call write *ONCE*, the kernel
+ * will automatically call once or twice depending on the size
+ *
+ * call: "cat file > /sys/class/drm/card0/device/hdcp_srm" from usermode no matter what the size is
+ *
+ * The kernel can only send PAGE_SIZE at once and since MAX_SRM_FILE(5120) > PAGE_SIZE(4096),
+ * srm_data_write can be called multiple times.
+ *
+ * sysfs interface doesn't tell us the size we will get so we are sending partial SRMs to psp and on
+ * the last call we will send the full SRM. PSP will fail on every call before the last.
+ *
+ * This means we don't know if the SRM is good until the last call. And because of this limitation we
+ * cannot throw errors early as it will stop the kernel from writing to sysfs
+ *
+ * Example 1:
+ * Good SRM size = 5096
+ * first call to write 4096 -> PSP fails
+ * Second call to write 1000 -> PSP Pass -> SRM is set
+ *
+ * Example 2:
+ * Bad SRM size = 4096
+ * first call to write 4096 -> PSP fails (This is the same as above, but we don't know if this
+ * is the last call)
+ *
+ * Solution?:
+ * 1: Parse the SRM? -> It is signed so we don't know the EOF
+ * 2: We can have another sysfs that passes the size before calling set. -> simpler solution
+ * below
+ *
+ * Easy Solution:
+ * Always call get after Set to verify if set was successful.
+ * +----------------------+
+ * | Why it works: |
+ * +----------------------+
+ * PSP will only update its srm if its older than the one we are trying to load.
+ * Always do set first than get.
+ * -if we try to "1. SET" a older version PSP will reject it and we can "2. GET" the newer
+ * version and save it
+ *
+ * -if we try to "1. SET" a newer version PSP will accept it and we can "2. GET" the
+ * same(newer) version back and save it
+ *
+ * -if we try to "1. SET" a newer version and PSP rejects it. That means the format is
+ * incorrect/corrupted and we should correct our SRM by getting it from PSP
+ */
+static ssize_t srm_data_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer,
+ loff_t pos, size_t count)
+{
+ struct hdcp_workqueue *work;
+ uint32_t srm_version = 0;
+
+ work = container_of(bin_attr, struct hdcp_workqueue, attr);
+ link_lock(work, true);
+
+ memcpy(work->srm_temp + pos, buffer, count);
+
+ if (!psp_set_srm(work->hdcp.config.psp.handle, work->srm_temp, pos + count, &srm_version)) {
+ DRM_DEBUG_DRIVER("HDCP SRM SET version 0x%X", srm_version);
+ memcpy(work->srm, work->srm_temp, pos + count);
+ work->srm_size = pos + count;
+ work->srm_version = srm_version;
+ }
+
+
+ link_lock(work, false);
+
+ return count;
+}
+
+static ssize_t srm_data_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer,
+ loff_t pos, size_t count)
+{
+ struct hdcp_workqueue *work;
+ uint8_t *srm = NULL;
+ uint32_t srm_version;
+ uint32_t srm_size;
+ size_t ret = count;
+
+ work = container_of(bin_attr, struct hdcp_workqueue, attr);
+
+ link_lock(work, true);
+
+ srm = psp_get_srm(work->hdcp.config.psp.handle, &srm_version, &srm_size);
+
+ if (!srm)
+ return -EINVAL;
+
+ if (pos >= srm_size)
+ ret = 0;
+
+ if (srm_size - pos < count) {
+ memcpy(buffer, srm + pos, srm_size - pos);
+ ret = srm_size - pos;
+ goto ret;
+ }
+
+ memcpy(buffer, srm + pos, count);
+
+ret:
+ link_lock(work, false);
+ return ret;
+}
+
+/* From the hdcp spec (5.Renewability) SRM needs to be stored in a non-volatile memory.
+ *
+ * For example,
+ * if Application "A" sets the SRM (ver 2) and we reboot/suspend and later when Application "B"
+ * needs to use HDCP, the version in PSP should be SRM(ver 2). So SRM should be persistent
+ * across boot/reboots/suspend/resume/shutdown
+ *
+ * Currently when the system goes down (suspend/shutdown) the SRM is cleared from PSP. For HDCP we need
+ * to make the SRM persistent.
+ *
+ * -PSP owns the checking of SRM but doesn't have the ability to store it in a non-volatile memory.
+ * -The kernel cannot write to the file systems.
+ * -So we need usermode to do this for us, which is why an interface for usermode is needed
+ *
+ *
+ *
+ * Usermode can read/write to/from PSP using the sysfs interface
+ * For example:
+ * to save SRM from PSP to storage : cat /sys/class/drm/card0/device/hdcp_srm > srmfile
+ * to load from storage to PSP: cat srmfile > /sys/class/drm/card0/device/hdcp_srm
+ */
+static const struct bin_attribute data_attr = {
+ .attr = {.name = "hdcp_srm", .mode = 0664},
+ .size = PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, /* Limit SRM size */
+ .write = srm_data_write,
+ .read = srm_data_read,
+};
+
+
+struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct cp_psp *cp_psp, struct dc *dc)
{
int max_caps = dc->caps.max_links;
- struct hdcp_workqueue *hdcp_work = kzalloc(max_caps*sizeof(*hdcp_work), GFP_KERNEL);
+ struct hdcp_workqueue *hdcp_work;
int i = 0;
+ hdcp_work = kcalloc(max_caps, sizeof(*hdcp_work), GFP_KERNEL);
if (hdcp_work == NULL)
+ return NULL;
+
+ hdcp_work->srm = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, sizeof(*hdcp_work->srm), GFP_KERNEL);
+
+ if (hdcp_work->srm == NULL)
+ goto fail_alloc_context;
+
+ hdcp_work->srm_temp = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, sizeof(*hdcp_work->srm_temp), GFP_KERNEL);
+
+ if (hdcp_work->srm_temp == NULL)
goto fail_alloc_context;
hdcp_work->max_link = max_caps;
for (i = 0; i < max_caps; i++) {
-
mutex_init(&hdcp_work[i].mutex);
INIT_WORK(&hdcp_work[i].cpirq_work, event_cpirq);
@@ -360,7 +584,7 @@ struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *c
INIT_DELAYED_WORK(&hdcp_work[i].watchdog_timer_dwork, event_watchdog_timer);
INIT_DELAYED_WORK(&hdcp_work[i].property_validate_dwork, event_property_validate);
- hdcp_work[i].hdcp.config.psp.handle = psp_context;
+ hdcp_work[i].hdcp.config.psp.handle = &adev->psp;
hdcp_work[i].hdcp.config.ddc.handle = dc_get_link_at_index(dc, i);
hdcp_work[i].hdcp.config.ddc.funcs.write_i2c = lp_write_i2c;
hdcp_work[i].hdcp.config.ddc.funcs.read_i2c = lp_read_i2c;
@@ -371,9 +595,17 @@ struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *c
cp_psp->funcs.update_stream_config = update_config;
cp_psp->handle = hdcp_work;
+ /* File created at /sys/class/drm/card0/device/hdcp_srm*/
+ hdcp_work[0].attr = data_attr;
+
+ if (sysfs_create_bin_file(&adev->dev->kobj, &hdcp_work[0].attr))
+ DRM_WARN("Failed to create device file hdcp_srm");
+
return hdcp_work;
fail_alloc_context:
+ kfree(hdcp_work->srm);
+ kfree(hdcp_work->srm_temp);
kfree(hdcp_work);
return NULL;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
index 6abde86bce4a..5159b3a5e5b0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
@@ -30,6 +30,7 @@
#include "hdcp.h"
#include "dc.h"
#include "dm_cp_psp.h"
+#include "amdgpu.h"
struct mod_hdcp;
struct mod_hdcp_link;
@@ -52,6 +53,12 @@ struct hdcp_workqueue {
enum mod_hdcp_encryption_status encryption_status;
uint8_t max_link;
+
+ uint8_t *srm;
+ uint8_t *srm_temp;
+ uint32_t srm_version;
+ uint32_t srm_size;
+ struct bin_attribute attr;
};
void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
@@ -64,6 +71,6 @@ void hdcp_reset_display(struct hdcp_workqueue *work, unsigned int link_index);
void hdcp_handle_cpirq(struct hdcp_workqueue *work, unsigned int link_index);
void hdcp_destroy(struct hdcp_workqueue *work);
-struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *cp_psp, struct dc *dc);
+struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct cp_psp *cp_psp, struct dc *dc);
#endif /* AMDGPU_DM_AMDGPU_DM_HDCP_H_ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 318b474ff20e..c20fb08c450b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -400,8 +400,8 @@ bool dm_helpers_dp_mst_start_top_mgr(
struct amdgpu_dm_connector *aconnector = link->priv;
if (!aconnector) {
- DRM_ERROR("Failed to found connector for link!");
- return false;
+ DRM_ERROR("Failed to find connector for link!");
+ return false;
}
if (boot) {
@@ -423,8 +423,8 @@ void dm_helpers_dp_mst_stop_top_mgr(
struct amdgpu_dm_connector *aconnector = link->priv;
if (!aconnector) {
- DRM_ERROR("Failed to found connector for link!");
- return;
+ DRM_ERROR("Failed to find connector for link!");
+ return;
}
DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n",
@@ -445,7 +445,7 @@ bool dm_helpers_dp_read_dpcd(
struct amdgpu_dm_connector *aconnector = link->priv;
if (!aconnector) {
- DRM_ERROR("Failed to found connector for link!");
+ DRM_ERROR("Failed to find connector for link!");
return false;
}
@@ -463,7 +463,7 @@ bool dm_helpers_dp_write_dpcd(
struct amdgpu_dm_connector *aconnector = link->priv;
if (!aconnector) {
- DRM_ERROR("Failed to found connector for link!");
+ DRM_ERROR("Failed to find connector for link!");
return false;
}
@@ -483,7 +483,7 @@ bool dm_helpers_submit_i2c(
bool result;
if (!aconnector) {
- DRM_ERROR("Failed to found connector for link!");
+ DRM_ERROR("Failed to find connector for link!");
return false;
}
@@ -538,7 +538,7 @@ bool dm_helpers_is_dp_sink_present(struct dc_link *link)
struct amdgpu_dm_connector *aconnector = link->priv;
if (!aconnector) {
- BUG_ON("Failed to found connector for link!");
+ BUG_ON("Failed to find connector for link!");
return true;
}
@@ -580,6 +580,20 @@ enum dc_edid_status dm_helpers_read_local_edid(
/* We don't need the original edid anymore */
kfree(edid);
+ /* connector->display_info will be parsed from EDID and saved
+ * into drm_connector->display_info from edid by call stack
+ * below:
+ * drm_parse_ycbcr420_deep_color_info
+ * drm_parse_hdmi_forum_vsdb
+ * drm_parse_cea_ext
+ * drm_add_display_info
+ * drm_connector_update_edid_property
+ *
+ * drm_connector->display_info will be used by amdgpu_dm funcs,
+ * like fill_stream_properties_from_drm_display_mode
+ */
+ amdgpu_dm_update_connector_after_detect(aconnector);
+
edid_status = dm_helpers_parse_edid_caps(
ctx,
&sink->dc_edid,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 5672f7765919..e8208df420d9 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -154,15 +154,18 @@ amdgpu_dm_mst_connector_late_register(struct drm_connector *connector)
{
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
- struct drm_dp_mst_port *port = amdgpu_dm_connector->port;
+ int r;
+
+ amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
+ r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
+ if (r)
+ return r;
#if defined(CONFIG_DEBUG_FS)
connector_debugfs_init(amdgpu_dm_connector);
- amdgpu_dm_connector->debugfs_dpcd_address = 0;
- amdgpu_dm_connector->debugfs_dpcd_size = 0;
#endif
- return drm_dp_mst_connector_late_register(connector, port);
+ return r;
}
static void
@@ -204,7 +207,7 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
if (!dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
dsc_caps, NULL,
- &dc_sink->sink_dsc_caps.dsc_dec_caps))
+ &dc_sink->dsc_caps.dsc_dec_caps))
return false;
return true;
@@ -259,8 +262,8 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (!validate_dsc_caps_on_connector(aconnector))
- memset(&aconnector->dc_sink->sink_dsc_caps,
- 0, sizeof(aconnector->dc_sink->sink_dsc_caps));
+ memset(&aconnector->dc_sink->dsc_caps,
+ 0, sizeof(aconnector->dc_sink->dsc_caps));
#endif
}
}
@@ -437,9 +440,6 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
struct drm_connector *connector)
{
- struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
- struct drm_device *dev = master->base.dev;
- struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n",
@@ -451,42 +451,26 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
aconnector->dc_sink);
dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
+ aconnector->dc_link->cur_link_settings.lane_count = 0;
}
drm_connector_unregister(connector);
- if (adev->mode_info.rfbdev)
- drm_fb_helper_remove_one_connector(&adev->mode_info.rfbdev->helper, connector);
drm_connector_put(connector);
}
-static void dm_dp_mst_register_connector(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct amdgpu_device *adev = dev->dev_private;
-
- if (adev->mode_info.rfbdev)
- drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector);
- else
- DRM_ERROR("adev->mode_info.rfbdev is NULL\n");
-
- drm_connector_register(connector);
-}
-
static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
.add_connector = dm_dp_add_mst_connector,
.destroy_connector = dm_dp_destroy_mst_connector,
- .register_connector = dm_dp_mst_register_connector
};
void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector)
{
aconnector->dm_dp_aux.aux.name = "dmdc";
- aconnector->dm_dp_aux.aux.dev = aconnector->base.kdev;
aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer;
aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc;
- drm_dp_aux_register(&aconnector->dm_dp_aux.aux);
+ drm_dp_aux_init(&aconnector->dm_dp_aux.aux);
drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux,
&aconnector->base);
@@ -547,7 +531,7 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p
memset(&params[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg));
if (vars[i].dsc_enabled && dc_dsc_compute_config(
params[i].sink->ctx->dc->res_pool->dscs[0],
- &params[i].sink->sink_dsc_caps.dsc_dec_caps,
+ &params[i].sink->dsc_caps.dsc_dec_caps,
params[i].sink->ctx->dc->debug.dsc_min_slice_height_override,
0,
params[i].timing,
@@ -568,7 +552,7 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);
dc_dsc_compute_config(
param.sink->ctx->dc->res_pool->dscs[0],
- &param.sink->sink_dsc_caps.dsc_dec_caps,
+ &param.sink->dsc_caps.dsc_dec_caps,
param.sink->ctx->dc->debug.dsc_min_slice_height_override,
(int) kbps, param.timing, &dsc_config);
@@ -765,14 +749,14 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
params[count].sink = stream->sink;
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
params[count].port = aconnector->port;
- params[count].compression_possible = stream->sink->sink_dsc_caps.dsc_dec_caps.is_dsc_supported;
+ params[count].compression_possible = stream->sink->dsc_caps.dsc_dec_caps.is_dsc_supported;
dc_dsc_get_policy_for_timing(params[count].timing, &dsc_policy);
if (!dc_dsc_compute_bandwidth_range(
stream->sink->ctx->dc->res_pool->dscs[0],
stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
dsc_policy.min_target_bpp,
dsc_policy.max_target_bpp,
- &stream->sink->sink_dsc_caps.dsc_dec_caps,
+ &stream->sink->dsc_caps.dsc_dec_caps,
&stream->timing, &params[count].bw_range))
params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
@@ -854,7 +838,7 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
if (!aconnector || !aconnector->dc_sink)
continue;
- if (!aconnector->dc_sink->sink_dsc_caps.dsc_dec_caps.is_dsc_supported)
+ if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported)
continue;
if (computed_streams[i])
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 2f1c9584ac32..37fa7b48250e 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -267,7 +267,7 @@ static struct atom_display_object_path_v2 *get_bios_object(
&& id.enum_id == obj_id.enum_id)
return &bp->object_info_tbl.v1_4->display_path[i];
}
- /* fall through */
+ fallthrough;
case OBJECT_TYPE_CONNECTOR:
case OBJECT_TYPE_GENERIC:
/* Both Generic and Connector Object ID
@@ -280,7 +280,7 @@ static struct atom_display_object_path_v2 *get_bios_object(
&& id.enum_id == obj_id.enum_id)
return &bp->object_info_tbl.v1_4->display_path[i];
}
- /* fall through */
+ fallthrough;
default:
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index 629a07a2719b..8edc2506d49e 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -221,8 +221,8 @@ static void init_transmitter_control(struct bios_parser *bp)
uint8_t frev;
uint8_t crev;
- if (BIOS_CMD_TABLE_REVISION(dig1transmittercontrol, frev, crev) == false)
- BREAK_TO_DEBUGGER();
+ BIOS_CMD_TABLE_REVISION(dig1transmittercontrol, frev, crev);
+
switch (crev) {
case 6:
bp->cmd_tbl.transmitter_control = transmitter_control_v1_6;
@@ -711,10 +711,6 @@ static void enable_disp_power_gating_dmcub(
power_gating.header.sub_type = DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING;
power_gating.power_gating.pwr = *pwr;
- /* ATOM_ENABLE is old API in DMUB */
- if (power_gating.power_gating.pwr.enable == ATOM_ENABLE)
- power_gating.power_gating.pwr.enable = ATOM_INIT;
-
dc_dmub_srv_cmd_queue(dmcub, &power_gating.header);
dc_dmub_srv_cmd_execute(dmcub);
dc_dmub_srv_wait_idle(dmcub);
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
index 7388c987c595..204d7942a6e5 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
@@ -53,25 +53,18 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
case DCE_VERSION_11_2:
case DCE_VERSION_11_22:
+ case DCE_VERSION_12_0:
+ case DCE_VERSION_12_1:
*h = dal_cmd_tbl_helper_dce112_get_table2();
return true;
#if defined(CONFIG_DRM_AMD_DC_DCN)
case DCN_VERSION_1_0:
case DCN_VERSION_1_01:
- *h = dal_cmd_tbl_helper_dce112_get_table2();
- return true;
-#endif
-
case DCN_VERSION_2_0:
- *h = dal_cmd_tbl_helper_dce112_get_table2();
- return true;
case DCN_VERSION_2_1:
*h = dal_cmd_tbl_helper_dce112_get_table2();
return true;
- case DCE_VERSION_12_0:
- case DCE_VERSION_12_1:
- *h = dal_cmd_tbl_helper_dce112_get_table2();
- return true;
+#endif
default:
/* Unsupported DCE */
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
index 5d081c42e81b..2c6db379afae 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
@@ -3265,33 +3265,33 @@ bool bw_calcs(struct dc_context *ctx,
bw_fixed_to_int(bw_mul(data->
stutter_exit_watermark[9], bw_int_to_fixed(1000)));
- calcs_output->stutter_entry_wm_ns[0].b_mark =
- bw_fixed_to_int(bw_mul(data->
- stutter_entry_watermark[4], bw_int_to_fixed(1000)));
- calcs_output->stutter_entry_wm_ns[1].b_mark =
- bw_fixed_to_int(bw_mul(data->
- stutter_entry_watermark[5], bw_int_to_fixed(1000)));
- calcs_output->stutter_entry_wm_ns[2].b_mark =
- bw_fixed_to_int(bw_mul(data->
- stutter_entry_watermark[6], bw_int_to_fixed(1000)));
- if (ctx->dc->caps.max_slave_planes) {
- calcs_output->stutter_entry_wm_ns[3].b_mark =
+ calcs_output->stutter_entry_wm_ns[0].b_mark =
bw_fixed_to_int(bw_mul(data->
- stutter_entry_watermark[0], bw_int_to_fixed(1000)));
- calcs_output->stutter_entry_wm_ns[4].b_mark =
+ stutter_entry_watermark[4], bw_int_to_fixed(1000)));
+ calcs_output->stutter_entry_wm_ns[1].b_mark =
bw_fixed_to_int(bw_mul(data->
- stutter_entry_watermark[1], bw_int_to_fixed(1000)));
- } else {
- calcs_output->stutter_entry_wm_ns[3].b_mark =
+ stutter_entry_watermark[5], bw_int_to_fixed(1000)));
+ calcs_output->stutter_entry_wm_ns[2].b_mark =
bw_fixed_to_int(bw_mul(data->
- stutter_entry_watermark[7], bw_int_to_fixed(1000)));
- calcs_output->stutter_entry_wm_ns[4].b_mark =
+ stutter_entry_watermark[6], bw_int_to_fixed(1000)));
+ if (ctx->dc->caps.max_slave_planes) {
+ calcs_output->stutter_entry_wm_ns[3].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[0], bw_int_to_fixed(1000)));
+ calcs_output->stutter_entry_wm_ns[4].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[1], bw_int_to_fixed(1000)));
+ } else {
+ calcs_output->stutter_entry_wm_ns[3].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[7], bw_int_to_fixed(1000)));
+ calcs_output->stutter_entry_wm_ns[4].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[8], bw_int_to_fixed(1000)));
+ }
+ calcs_output->stutter_entry_wm_ns[5].b_mark =
bw_fixed_to_int(bw_mul(data->
- stutter_entry_watermark[8], bw_int_to_fixed(1000)));
- }
- calcs_output->stutter_entry_wm_ns[5].b_mark =
- bw_fixed_to_int(bw_mul(data->
- stutter_entry_watermark[9], bw_int_to_fixed(1000)));
+ stutter_entry_watermark[9], bw_int_to_fixed(1000)));
calcs_output->urgent_wm_ns[0].b_mark =
bw_fixed_to_int(bw_mul(data->
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 1a37550731de..3960a8db94cb 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -703,11 +703,24 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v,
}
-unsigned int get_highest_allowed_voltage_level(uint32_t hw_internal_rev)
+unsigned int get_highest_allowed_voltage_level(uint32_t hw_internal_rev, uint32_t pci_revision_id)
{
- /* for dali & pollock, the highest voltage level we want is 0 */
- if (ASICREV_IS_POLLOCK(hw_internal_rev) || ASICREV_IS_DALI(hw_internal_rev))
- return 0;
+ /* for low power RV2 variants, the highest voltage level we want is 0 */
+ if (ASICREV_IS_RAVEN2(hw_internal_rev))
+ switch (pci_revision_id) {
+ case PRID_DALI_DE:
+ case PRID_DALI_DF:
+ case PRID_DALI_E3:
+ case PRID_DALI_E4:
+ case PRID_POLLOCK_94:
+ case PRID_POLLOCK_95:
+ case PRID_POLLOCK_E9:
+ case PRID_POLLOCK_EA:
+ case PRID_POLLOCK_EB:
+ return 0;
+ default:
+ break;
+ }
/* we are ok with all levels */
return 4;
@@ -1277,7 +1290,9 @@ bool dcn_validate_bandwidth(
PERFORMANCE_TRACE_END();
BW_VAL_TRACE_FINISH();
- if (bw_limit_pass && v->voltage_level <= get_highest_allowed_voltage_level(dc->ctx->asic_id.hw_internal_rev))
+ if (bw_limit_pass && v->voltage_level <= get_highest_allowed_voltage_level(
+ dc->ctx->asic_id.hw_internal_rev,
+ dc->ctx->asic_id.pci_revision_id))
return true;
else
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
index 3cd283195091..c0f6a8c7de7d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
@@ -87,6 +87,12 @@ AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN20)
###############################################################################
CLK_MGR_DCN21 = rn_clk_mgr.o rn_clk_mgr_vbios_smu.o
+# prevent build errors regarding soft-float vs hard-float FP ABI tags
+# this code is currently unused on ppc64, as it applies to Renoir APUs only
+ifdef CONFIG_PPC64
+CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn21/rn_clk_mgr.o := $(call cc-option,-mno-gnu-attribute)
+endif
+
AMD_DAL_CLK_MGR_DCN21 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn21/,$(CLK_MGR_DCN21))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN21)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index a78e5c74c79c..8ec2dfe45d40 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -63,6 +63,25 @@ int clk_mgr_helper_get_active_display_cnt(
return display_count;
}
+int clk_mgr_helper_get_active_plane_cnt(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i, total_plane_count;
+
+ total_plane_count = 0;
+ for (i = 0; i < context->stream_count; i++) {
+ const struct dc_stream_status stream_status = context->stream_status[i];
+
+ /*
+ * Sum up plane_count for all streams ( active and virtual ).
+ */
+ total_plane_count += stream_status.plane_count;
+ }
+
+ return total_plane_count;
+}
+
void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr)
{
struct dc_link *edp_link = get_edp_link(dc);
@@ -134,13 +153,6 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
#if defined(CONFIG_DRM_AMD_DC_DCN)
case FAMILY_RV:
- if (ASICREV_IS_DALI(asic_id.hw_internal_rev) ||
- ASICREV_IS_POLLOCK(asic_id.hw_internal_rev)) {
- /* TEMP: this check has to come before ASICREV_IS_RENOIR */
- /* which also incorrectly returns true for Dali/Pollock*/
- rv2_clk_mgr_construct(ctx, clk_mgr, pp_smu);
- break;
- }
if (ASICREV_IS_RENOIR(asic_id.hw_internal_rev)) {
rn_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
break;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index 495f01e9f2ca..55d09adbf0d9 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -115,12 +115,11 @@ void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
dpp_inst = i;
dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
- prev_dppclk_khz = clk_mgr->base.ctx->dc->current_state->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
+ prev_dppclk_khz = clk_mgr->dccg->pipe_dppclk_khz[i];
- if (safe_to_lower || prev_dppclk_khz < dppclk_khz) {
+ if (safe_to_lower || prev_dppclk_khz < dppclk_khz)
clk_mgr->dccg->funcs->update_dpp_dto(
clk_mgr->dccg, dpp_inst, dppclk_khz);
- }
}
}
@@ -158,6 +157,8 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
bool dpp_clock_lowered = false;
struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
bool force_reset = false;
+ bool p_state_change_support;
+ int total_plane_count;
if (dc->work_arounds.skip_clock_update)
return;
@@ -213,9 +214,11 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
pp_smu->set_hard_min_socclk_by_freq(&pp_smu->pp_smu, clk_mgr_base->clks.socclk_khz / 1000);
}
- if (should_update_pstate_support(safe_to_lower, new_clocks->p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
+ total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
+ p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0);
+ if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
- clk_mgr_base->clks.p_state_change_support = new_clocks->p_state_change_support;
+ clk_mgr_base->clks.p_state_change_support = p_state_change_support;
if (pp_smu && pp_smu->set_pstate_handshake_support)
pp_smu->set_pstate_handshake_support(&pp_smu->pp_smu, clk_mgr_base->clks.p_state_change_support);
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 7ae4c06232dd..ab267ddd4abe 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -46,6 +46,7 @@
/* Constants */
#define LPDDR_MEM_RETRAIN_LATENCY 4.977 /* Number obtained from LPDDR4 Training Counter Requirement doc */
+#define SMU_VER_55_51_0 0x373300 /* SMU Version that is able to set DISPCLK below 100MHz */
/* Macros */
@@ -151,6 +152,12 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
rn_vbios_smu_set_min_deep_sleep_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_deep_sleep_khz);
}
+ // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow.
+ if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
+ if (new_clocks->dppclk_khz < 100000)
+ new_clocks->dppclk_khz = 100000;
+ }
+
if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
dpp_clock_lowered = true;
@@ -399,7 +406,7 @@ void rn_init_clocks(struct clk_mgr *clk_mgr)
clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
}
-void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_range_sets *ranges)
+static void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_range_sets *ranges)
{
int i, num_valid_sets;
@@ -412,19 +419,19 @@ void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_ra
ranges->reader_wm_sets[num_valid_sets].wm_inst = bw_params->wm_table.entries[i].wm_inst;
ranges->reader_wm_sets[num_valid_sets].wm_type = bw_params->wm_table.entries[i].wm_type;
- /* We will not select WM based on dcfclk, so leave it as unconstrained */
- ranges->reader_wm_sets[num_valid_sets].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
- ranges->reader_wm_sets[num_valid_sets].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
- /* fclk wil be used to select WM*/
+ /* We will not select WM based on fclk, so leave it as unconstrained */
+ ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+ ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+ /* dcfclk wil be used to select WM*/
if (ranges->reader_wm_sets[num_valid_sets].wm_type == WM_TYPE_PSTATE_CHG) {
if (i == 0)
- ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = 0;
+ ranges->reader_wm_sets[num_valid_sets].min_drain_clk_mhz = 0;
else {
/* add 1 to make it non-overlapping with next lvl */
- ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = bw_params->clk_table.entries[i - 1].fclk_mhz + 1;
+ ranges->reader_wm_sets[num_valid_sets].min_drain_clk_mhz = bw_params->clk_table.entries[i - 1].dcfclk_mhz + 1;
}
- ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = bw_params->clk_table.entries[i].fclk_mhz;
+ ranges->reader_wm_sets[num_valid_sets].max_drain_clk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
} else {
/* unconstrained for memory retraining */
@@ -459,16 +466,15 @@ void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_ra
static void rn_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
{
struct dc_debug_options *debug = &clk_mgr_base->ctx->dc->debug;
- struct pp_smu_wm_range_sets ranges = {0};
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct pp_smu_funcs *pp_smu = clk_mgr->pp_smu;
if (!debug->disable_pplib_wm_range) {
- build_watermark_ranges(clk_mgr_base->bw_params, &ranges);
+ build_watermark_ranges(clk_mgr_base->bw_params, &clk_mgr_base->ranges);
/* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
if (pp_smu && pp_smu->rn_funcs.set_wm_ranges)
- pp_smu->rn_funcs.set_wm_ranges(&pp_smu->rn_funcs.pp_smu, &ranges);
+ pp_smu->rn_funcs.set_wm_ranges(&pp_smu->rn_funcs.pp_smu, &clk_mgr_base->ranges);
}
}
@@ -498,7 +504,7 @@ static struct clk_mgr_funcs dcn21_funcs = {
.notify_wm_ranges = rn_notify_wm_ranges
};
-struct clk_bw_params rn_bw_params = {
+static struct clk_bw_params rn_bw_params = {
.vram_type = Ddr4MemType,
.num_channels = 1,
.clk_table = {
@@ -538,7 +544,7 @@ struct clk_bw_params rn_bw_params = {
};
-struct wm_table ddr4_wm_table = {
+static struct wm_table ddr4_wm_table = {
.entries = {
{
.wm_inst = WM_A,
@@ -575,7 +581,7 @@ struct wm_table ddr4_wm_table = {
}
};
-struct wm_table lpddr4_wm_table = {
+static struct wm_table lpddr4_wm_table = {
.entries = {
{
.wm_inst = WM_A,
@@ -715,6 +721,13 @@ void rn_clk_mgr_construct(
} else {
struct clk_log_info log_info = {0};
+ clk_mgr->smu_ver = rn_vbios_smu_get_smu_version(clk_mgr);
+
+ /* SMU Version 55.51.0 and up no longer have an issue
+ * that needs to limit minimum dispclk */
+ if (clk_mgr->smu_ver >= SMU_VER_55_51_0)
+ debug->min_disp_clk_khz = 0;
+
/* TODO: Check we get what we expect during bringup */
clk_mgr->base.dentist_vco_freq_khz = get_vco_frequency_from_reg(clk_mgr);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 04441dbcba76..2ffb22177df9 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -701,7 +701,7 @@ static bool dc_construct(struct dc *dc,
dc_ctx->created_bios = true;
}
-
+ dc->vendor_signature = init_params->vendor_signature;
/* Create GPIO service */
dc_ctx->gpio_service = dal_gpio_service_create(
@@ -761,6 +761,28 @@ static bool disable_all_writeback_pipes_for_stream(
return true;
}
+void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context, struct dc_stream_state *stream, bool lock)
+{
+ int i = 0;
+
+ /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */
+ if (dc->hwss.interdependent_update_lock)
+ dc->hwss.interdependent_update_lock(dc, context, lock);
+ else {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ // Copied conditions that were previously in dce110_apply_ctx_for_surface
+ if (stream == pipe_ctx->stream) {
+ if (!pipe_ctx->top_pipe &&
+ (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
+ dc->hwss.pipe_control_lock(dc, pipe_ctx, lock);
+ }
+ }
+ }
+}
+
static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
{
int i, j;
@@ -786,11 +808,20 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
if (should_disable && old_stream) {
dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
- if (dc->hwss.apply_ctx_for_surface)
+
+ if (dc->hwss.apply_ctx_for_surface) {
+ apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true);
dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
+ apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false);
+ dc->hwss.post_unlock_program_front_end(dc, dangling_context);
+ }
+ if (dc->hwss.program_front_end_for_ctx) {
+ dc->hwss.interdependent_update_lock(dc, dc->current_state, true);
+ dc->hwss.program_front_end_for_ctx(dc, dangling_context);
+ dc->hwss.interdependent_update_lock(dc, dc->current_state, false);
+ dc->hwss.post_unlock_program_front_end(dc, dangling_context);
+ }
}
- if (dc->hwss.program_front_end_for_ctx)
- dc->hwss.program_front_end_for_ctx(dc, dangling_context);
}
current_ctx = dc->current_state;
@@ -1210,16 +1241,19 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
/* re-program planes for existing stream, in case we need to
* free up plane resource for later use
*/
- if (dc->hwss.apply_ctx_for_surface)
+ if (dc->hwss.apply_ctx_for_surface) {
for (i = 0; i < context->stream_count; i++) {
if (context->streams[i]->mode_changed)
continue;
-
+ apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
dc->hwss.apply_ctx_for_surface(
dc, context->streams[i],
context->stream_status[i].plane_count,
context); /* use new pipe config in new context */
+ apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
+ dc->hwss.post_unlock_program_front_end(dc, context);
}
+ }
/* Program hardware */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1238,19 +1272,27 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
}
/* Program all planes within new context*/
- if (dc->hwss.program_front_end_for_ctx)
+ if (dc->hwss.program_front_end_for_ctx) {
+ dc->hwss.interdependent_update_lock(dc, context, true);
dc->hwss.program_front_end_for_ctx(dc, context);
+ dc->hwss.interdependent_update_lock(dc, context, false);
+ dc->hwss.post_unlock_program_front_end(dc, context);
+ }
for (i = 0; i < context->stream_count; i++) {
const struct dc_link *link = context->streams[i]->link;
if (!context->streams[i]->mode_changed)
continue;
- if (dc->hwss.apply_ctx_for_surface)
+ if (dc->hwss.apply_ctx_for_surface) {
+ apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
dc->hwss.apply_ctx_for_surface(
dc, context->streams[i],
context->stream_status[i].plane_count,
context);
+ apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
+ dc->hwss.post_unlock_program_front_end(dc, context);
+ }
/*
* enable stereo
@@ -1318,18 +1360,12 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context)
return (result == DC_OK);
}
-bool dc_is_hw_initialized(struct dc *dc)
-{
- struct dc_bios *dcb = dc->ctx->dc_bios;
- return dcb->funcs->is_accelerated_mode(dcb);
-}
-
bool dc_post_update_surfaces_to_stream(struct dc *dc)
{
int i;
struct dc_state *context = dc->current_state;
- if (!dc->optimized_required || dc->optimize_seamless_boot_streams > 0)
+ if ((!dc->optimized_required) || dc->optimize_seamless_boot_streams > 0)
return true;
post_surface_trace(dc);
@@ -1341,9 +1377,11 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
}
+ dc->hwss.optimize_bandwidth(dc, context);
+
dc->optimized_required = false;
+ dc->wm_optimized_required = false;
- dc->hwss.optimize_bandwidth(dc, context);
return true;
}
@@ -1734,14 +1772,15 @@ static enum surface_update_type check_update_surfaces_for_stream(
if (stream_update->wb_update)
su_flags->bits.wb_update = 1;
+
+ if (stream_update->dsc_config)
+ su_flags->bits.dsc_changed = 1;
+
if (su_flags->raw != 0)
overall_type = UPDATE_TYPE_FULL;
if (stream_update->output_csc_transform || stream_update->output_color_space)
su_flags->bits.out_csc = 1;
-
- if (stream_update->dsc_config)
- overall_type = UPDATE_TYPE_FULL;
}
for (i = 0 ; i < surface_count; i++) {
@@ -1776,8 +1815,11 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
if (type == UPDATE_TYPE_FULL) {
- if (stream_update)
+ if (stream_update) {
+ uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
stream_update->stream->update_flags.raw = 0xFFFFFFFF;
+ stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
+ }
for (i = 0; i < surface_count; i++)
updates[i].surface->update_flags.raw = 0xFFFFFFFF;
}
@@ -1790,7 +1832,8 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
// Else we fallback to mem compare.
} else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
dc->optimized_required = true;
- }
+ } else if (dc->wm_optimized_required)
+ dc->optimized_required = true;
}
return type;
@@ -1829,6 +1872,8 @@ static void copy_surface_update_to_plane(
surface->time.index++;
if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
surface->time.index = 0;
+
+ surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips;
}
if (srf_update->scaling_info) {
@@ -2093,18 +2138,14 @@ static void commit_planes_do_stream_update(struct dc *dc,
}
}
- if (stream_update->dsc_config && dc->hwss.pipe_control_lock_global) {
- dc->hwss.pipe_control_lock_global(dc, pipe_ctx, true);
- dp_update_dsc_config(pipe_ctx);
- dc->hwss.pipe_control_lock_global(dc, pipe_ctx, false);
- }
/* Full fe update*/
if (update_type == UPDATE_TYPE_FAST)
continue;
- if (stream_update->dpms_off) {
- dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
+ if (stream_update->dsc_config)
+ dp_update_dsc_config(pipe_ctx);
+ if (stream_update->dpms_off) {
if (*stream_update->dpms_off) {
core_link_disable_stream(pipe_ctx);
/* for dpms, keep acquired resources*/
@@ -2118,8 +2159,6 @@ static void commit_planes_do_stream_update(struct dc *dc,
core_link_enable_stream(dc->current_state, pipe_ctx);
}
-
- dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
}
if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
@@ -2175,6 +2214,32 @@ static void commit_planes_for_stream(struct dc *dc,
context_clock_trace(dc, context);
}
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (!pipe_ctx->top_pipe &&
+ !pipe_ctx->prev_odm_pipe &&
+ pipe_ctx->stream &&
+ pipe_ctx->stream == stream) {
+ top_pipe_to_program = pipe_ctx;
+ }
+ }
+
+ if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
+ if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable)
+ top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
+ top_pipe_to_program->stream_res.tg);
+
+ if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
+ dc->hwss.interdependent_update_lock(dc, context, true);
+ else
+ /* Lock the top pipe while updating plane addrs, since freesync requires
+ * plane addr update event triggers to be synchronized.
+ * top_pipe_to_program is expected to never be NULL
+ */
+ dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
+
+
// Stream updates
if (stream_update)
commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
@@ -2189,6 +2254,12 @@ static void commit_planes_for_stream(struct dc *dc,
if (dc->hwss.program_front_end_for_ctx)
dc->hwss.program_front_end_for_ctx(dc, context);
+ if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
+ dc->hwss.interdependent_update_lock(dc, context, false);
+ else
+ dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
+
+ dc->hwss.post_unlock_program_front_end(dc, context);
return;
}
@@ -2224,8 +2295,6 @@ static void commit_planes_for_stream(struct dc *dc,
pipe_ctx->stream == stream) {
struct dc_stream_status *stream_status = NULL;
- top_pipe_to_program = pipe_ctx;
-
if (!pipe_ctx->plane_state)
continue;
@@ -2270,12 +2339,6 @@ static void commit_planes_for_stream(struct dc *dc,
// Update Type FAST, Surface updates
if (update_type == UPDATE_TYPE_FAST) {
- /* Lock the top pipe while updating plane addrs, since freesync requires
- * plane addr update event triggers to be synchronized.
- * top_pipe_to_program is expected to never be NULL
- */
- dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
-
if (dc->hwss.set_flip_control_gsl)
for (i = 0; i < surface_count; i++) {
struct dc_plane_state *plane_state = srf_updates[i].surface;
@@ -2317,9 +2380,30 @@ static void commit_planes_for_stream(struct dc *dc,
dc->hwss.update_plane_addr(dc, pipe_ctx);
}
}
+ }
+ if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
+ dc->hwss.interdependent_update_lock(dc, context, false);
+ else
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
- }
+
+ if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
+ if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
+ top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
+ top_pipe_to_program->stream_res.tg,
+ CRTC_STATE_VACTIVE);
+ top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
+ top_pipe_to_program->stream_res.tg,
+ CRTC_STATE_VBLANK);
+ top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
+ top_pipe_to_program->stream_res.tg,
+ CRTC_STATE_VACTIVE);
+ top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
+ top_pipe_to_program->stream_res.tg);
+ }
+
+ if (update_type != UPDATE_TYPE_FAST)
+ dc->hwss.post_unlock_program_front_end(dc, context);
// Fire manual trigger only when bottom plane is flipped
for (j = 0; j < dc->res_pool->pipe_count; j++) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index a09119c10d7c..67cfff1586e9 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -45,7 +45,7 @@
#include "dpcd_defs.h"
#include "dmcu.h"
#include "hw/clk_mgr.h"
-#include "../dce/dmub_psr.h"
+#include "dce/dmub_psr.h"
#define DC_LOGGER_INIT(logger)
@@ -585,20 +585,23 @@ static void read_current_link_settings_on_detect(struct dc_link *link)
LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED;
}
-static bool detect_dp(
- struct dc_link *link,
- struct display_sink_capability *sink_caps,
- bool *converter_disable_audio,
- struct audio_support *audio_support,
- enum dc_detect_reason reason)
+static bool detect_dp(struct dc_link *link,
+ struct display_sink_capability *sink_caps,
+ bool *converter_disable_audio,
+ struct audio_support *audio_support,
+ enum dc_detect_reason reason)
{
bool boot = false;
+
sink_caps->signal = link_detect_sink(link, reason);
sink_caps->transaction_type =
get_ddc_transaction_type(sink_caps->signal);
if (sink_caps->transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
+
+ dpcd_set_source_specific_data(link);
+
if (!detect_dp_sink_caps(link))
return false;
@@ -606,9 +609,8 @@ static bool detect_dp(
sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
link->type = dc_connection_mst_branch;
- dal_ddc_service_set_transaction_type(
- link->ddc,
- sink_caps->transaction_type);
+ dal_ddc_service_set_transaction_type(link->ddc,
+ sink_caps->transaction_type);
/*
* This call will initiate MST topology discovery. Which
@@ -637,13 +639,10 @@ static bool detect_dp(
if (reason == DETECT_REASON_BOOT)
boot = true;
- dm_helpers_dp_update_branch_info(
- link->ctx,
- link);
+ dm_helpers_dp_update_branch_info(link->ctx, link);
- if (!dm_helpers_dp_mst_start_top_mgr(
- link->ctx,
- link, boot)) {
+ if (!dm_helpers_dp_mst_start_top_mgr(link->ctx,
+ link, boot)) {
/* MST not supported */
link->type = dc_connection_single;
sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
@@ -651,7 +650,7 @@ static bool detect_dp(
}
if (link->type != dc_connection_mst_branch &&
- is_dp_active_dongle(link)) {
+ is_dp_active_dongle(link)) {
/* DP active dongles */
link->type = dc_connection_active_dongle;
if (!link->dpcd_caps.sink_count.bits.SINK_COUNT) {
@@ -662,14 +661,15 @@ static bool detect_dp(
return true;
}
- if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER)
+ if (link->dpcd_caps.dongle_type !=
+ DISPLAY_DONGLE_DP_HDMI_CONVERTER)
*converter_disable_audio = true;
}
} else {
/* DP passive dongles */
sink_caps->signal = dp_passive_dongle_detection(link->ddc,
- sink_caps,
- audio_support);
+ sink_caps,
+ audio_support);
}
return true;
@@ -769,8 +769,16 @@ static bool dc_link_detect_helper(struct dc_link *link,
if ((link->connector_signal == SIGNAL_TYPE_LVDS ||
link->connector_signal == SIGNAL_TYPE_EDP) &&
- link->local_sink)
+ link->local_sink) {
+
+ // need to re-write OUI and brightness in resume case
+ if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ dpcd_set_source_specific_data(link);
+ dc_link_set_default_brightness_aux(link); //TODO: use cached
+ }
+
return true;
+ }
if (false == dc_link_detect_sink(link, &new_connection_type)) {
BREAK_TO_DEBUGGER();
@@ -818,6 +826,10 @@ static bool dc_link_detect_helper(struct dc_link *link,
}
case SIGNAL_TYPE_EDP: {
+ read_current_link_settings_on_detect(link);
+
+ dpcd_set_source_specific_data(link);
+
detect_edp_sink_caps(link);
read_current_link_settings_on_detect(link);
sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
@@ -958,10 +970,16 @@ static bool dc_link_detect_helper(struct dc_link *link,
break;
}
+ if (link->local_sink->edid_caps.panel_patch.disable_fec)
+ link->ctx->dc->debug.disable_fec = true;
+
// Check if edid is the same
if ((prev_sink != NULL) && ((edid_status == EDID_THE_SAME) || (edid_status == EDID_OK)))
same_edid = is_same_edid(&prev_sink->dc_edid, &sink->dc_edid);
+ if (sink->edid_caps.panel_patch.skip_scdc_overwrite)
+ link->ctx->dc->debug.hdmi20_disable = true;
+
if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
/*
@@ -1480,9 +1498,8 @@ static void enable_stream_features(struct pipe_ctx *pipe_ctx)
}
}
-static enum dc_status enable_link_dp(
- struct dc_state *state,
- struct pipe_ctx *pipe_ctx)
+static enum dc_status enable_link_dp(struct dc_state *state,
+ struct pipe_ctx *pipe_ctx)
{
struct dc_stream_state *stream = pipe_ctx->stream;
enum dc_status status;
@@ -1492,6 +1509,7 @@ static enum dc_status enable_link_dp(
bool fec_enable;
int i;
bool apply_seamless_boot_optimization = false;
+ uint32_t bl_oled_enable_delay = 50; // in ms
// check for seamless boot
for (i = 0; i < state->stream_count; i++) {
@@ -1513,31 +1531,45 @@ static enum dc_status enable_link_dp(
pipe_ctx->stream_res.pix_clk_params.requested_sym_clk =
link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
if (state->clk_mgr && !apply_seamless_boot_optimization)
- state->clk_mgr->funcs->update_clocks(state->clk_mgr, state, false);
+ state->clk_mgr->funcs->update_clocks(state->clk_mgr,
+ state, false);
+
+ // during mode switch we do DP_SET_POWER off then on, and OUI is lost
+ dpcd_set_source_specific_data(link);
skip_video_pattern = true;
if (link_settings.link_rate == LINK_RATE_LOW)
- skip_video_pattern = false;
-
- if (perform_link_training_with_retries(
- &link_settings,
- skip_video_pattern,
- LINK_TRAINING_ATTEMPTS,
- pipe_ctx,
- pipe_ctx->stream->signal)) {
+ skip_video_pattern = false;
+
+ if (perform_link_training_with_retries(&link_settings,
+ skip_video_pattern,
+ LINK_TRAINING_ATTEMPTS,
+ pipe_ctx,
+ pipe_ctx->stream->signal)) {
link->cur_link_settings = link_settings;
status = DC_OK;
- }
- else
+ } else {
status = DC_FAIL_DP_LINK_TRAINING;
+ }
- if (link->preferred_training_settings.fec_enable != NULL)
+ if (link->preferred_training_settings.fec_enable)
fec_enable = *link->preferred_training_settings.fec_enable;
else
fec_enable = true;
dp_set_fec_enable(link, fec_enable);
+
+ // during mode set we do DP_SET_POWER off then on, aux writes are lost
+ if (link->dpcd_sink_ext_caps.bits.oled == 1 ||
+ link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1 ||
+ link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1) {
+ dc_link_set_default_brightness_aux(link); // TODO: use cached if known
+ if (link->dpcd_sink_ext_caps.bits.oled == 1)
+ msleep(bl_oled_enable_delay);
+ dc_link_backlight_enable_aux(link, true);
+ }
+
return status;
}
@@ -1733,8 +1765,7 @@ static void write_i2c_retimer_setting(
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A
* needs to be set to 1 on every 0xA-0xC write.
@@ -1752,8 +1783,7 @@ static void write_i2c_retimer_setting(
pipe_ctx->stream->link->ddc,
slave_address, &offset, 1, &value, 1);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
}
buffer[0] = offset;
@@ -1765,8 +1795,7 @@ static void write_i2c_retimer_setting(
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
}
}
}
@@ -1786,8 +1815,7 @@ static void write_i2c_retimer_setting(
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A
* needs to be set to 1 on every 0xA-0xC write.
@@ -1805,8 +1833,7 @@ static void write_i2c_retimer_setting(
pipe_ctx->stream->link->ddc,
slave_address, &offset, 1, &value, 1);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
}
buffer[0] = offset;
@@ -1818,8 +1845,7 @@ static void write_i2c_retimer_setting(
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
}
}
}
@@ -1837,8 +1863,7 @@ static void write_i2c_retimer_setting(
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Write offset 0x00 to 0x23 */
buffer[0] = 0x00;
@@ -1849,8 +1874,7 @@ static void write_i2c_retimer_setting(
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Write offset 0xff to 0x00 */
buffer[0] = 0xff;
@@ -1861,10 +1885,14 @@ static void write_i2c_retimer_setting(
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
}
+
+ return;
+
+i2c_write_fail:
+ DC_LOG_DEBUG("Set retimer failed");
}
static void write_i2c_default_retimer_setting(
@@ -1889,8 +1917,7 @@ static void write_i2c_default_retimer_setting(
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Write offset 0x0A to 0x17 */
buffer[0] = 0x0A;
@@ -1901,8 +1928,7 @@ static void write_i2c_default_retimer_setting(
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Write offset 0x0B to 0xDA or 0xD8 */
buffer[0] = 0x0B;
@@ -1913,8 +1939,7 @@ static void write_i2c_default_retimer_setting(
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Write offset 0x0A to 0x17 */
buffer[0] = 0x0A;
@@ -1925,8 +1950,7 @@ static void write_i2c_default_retimer_setting(
offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Write offset 0x0C to 0x1D or 0x91 */
buffer[0] = 0x0C;
@@ -1937,8 +1961,7 @@ static void write_i2c_default_retimer_setting(
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Write offset 0x0A to 0x17 */
buffer[0] = 0x0A;
@@ -1949,8 +1972,7 @@ static void write_i2c_default_retimer_setting(
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
if (is_vga_mode) {
@@ -1965,8 +1987,7 @@ static void write_i2c_default_retimer_setting(
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Write offset 0x00 to 0x23 */
buffer[0] = 0x00;
@@ -1977,8 +1998,7 @@ static void write_i2c_default_retimer_setting(
offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Write offset 0xff to 0x00 */
buffer[0] = 0xff;
@@ -1989,9 +2009,13 @@ static void write_i2c_default_retimer_setting(
offset = 0x%x, reg_val= 0x%x, i2c_success = %d end here\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
}
+
+ return;
+
+i2c_write_fail:
+ DC_LOG_DEBUG("Set default retimer failed");
}
static void write_i2c_redriver_setting(
@@ -2020,8 +2044,7 @@ static void write_i2c_redriver_setting(
slave_address, buffer[3], buffer[4], buffer[5], buffer[6], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ DC_LOG_DEBUG("Set redriver failed");
}
static void disable_link(struct dc_link *link, enum signal_type signal)
@@ -2400,8 +2423,8 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, bool
struct dmcu *dmcu = dc->res_pool->dmcu;
struct dmub_psr *psr = dc->res_pool->psr;
- if ((psr != NULL) && link->psr_feature_enabled)
- psr->funcs->set_psr_enable(psr, allow_active);
+ if (psr != NULL && link->psr_feature_enabled)
+ psr->funcs->psr_enable(psr, allow_active);
else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_feature_enabled)
dmcu->funcs->set_psr_enable(dmcu, allow_active, wait);
@@ -2417,7 +2440,7 @@ bool dc_link_get_psr_state(const struct dc_link *link, uint32_t *psr_state)
struct dmub_psr *psr = dc->res_pool->psr;
if (psr != NULL && link->psr_feature_enabled)
- psr->funcs->get_psr_state(psr_state);
+ psr->funcs->psr_get_state(psr, psr_state);
else if (dmcu != NULL && link->psr_feature_enabled)
dmcu->funcs->get_psr_state(dmcu, psr_state);
@@ -2589,7 +2612,7 @@ bool dc_link_setup_psr(struct dc_link *link,
psr_context->frame_delay = 0;
if (psr)
- link->psr_feature_enabled = psr->funcs->setup_psr(psr, link, psr_context);
+ link->psr_feature_enabled = psr->funcs->psr_copy_settings(psr, link, psr_context);
else
link->psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context);
@@ -2922,10 +2945,13 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
memset(&config, 0, sizeof(config));
config.otg_inst = (uint8_t) pipe_ctx->stream_res.tg->inst;
- config.stream_enc_inst = (uint8_t) pipe_ctx->stream_res.stream_enc->id;
+ /*stream_enc_inst*/
+ config.stream_enc_inst = (uint8_t) pipe_ctx->stream_res.stream_enc->stream_enc_inst;
config.link_enc_inst = pipe_ctx->stream->link->link_enc_hw_inst;
config.dpms_off = dpms_off;
config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context;
+ config.mst_supported = (pipe_ctx->stream->signal ==
+ SIGNAL_TYPE_DISPLAY_PORT_MST);
cp_psp->funcs.update_stream_config(cp_psp->handle, &config);
}
}
@@ -3061,6 +3087,9 @@ void core_link_enable_stream(
dc->hwss.unblank_stream(pipe_ctx,
&pipe_ctx->stream->link->cur_link_settings);
+ if (stream->sink_patches.delay_ignore_msa > 0)
+ msleep(stream->sink_patches.delay_ignore_msa);
+
if (dc_is_dp_signal(pipe_ctx->stream->signal))
enable_stream_features(pipe_ctx);
#if defined(CONFIG_DRM_AMD_DC_HDCP)
@@ -3373,7 +3402,7 @@ uint32_t dc_link_bandwidth_kbps(
link_bw_kbps *= 8; /* 8 bits per byte*/
link_bw_kbps *= link_setting->lane_count;
- if (link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) {
+ if (dc_link_is_fec_supported(link) && !link->dc->debug.disable_fec) {
/* Account for FEC overhead.
* We have to do it based on caps,
* and not based on FEC being set ready,
@@ -3417,3 +3446,11 @@ void dc_link_overwrite_extended_receiver_cap(
dp_overwrite_extended_receiver_cap(link);
}
+bool dc_link_is_fec_supported(const struct dc_link *link)
+{
+ return (dc_is_dp_signal(link->connector_signal) &&
+ link->link_enc->features.fec_supported &&
+ link->dpcd_caps.fec_cap.bits.FEC_CAPABLE &&
+ !IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment));
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index a49c10d5df26..256889eed93e 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -126,22 +126,16 @@ struct aux_payloads {
struct vector payloads;
};
-static struct i2c_payloads *dal_ddc_i2c_payloads_create(struct dc_context *ctx, uint32_t count)
+static bool dal_ddc_i2c_payloads_create(
+ struct dc_context *ctx,
+ struct i2c_payloads *payloads,
+ uint32_t count)
{
- struct i2c_payloads *payloads;
-
- payloads = kzalloc(sizeof(struct i2c_payloads), GFP_KERNEL);
-
- if (!payloads)
- return NULL;
-
if (dal_vector_construct(
&payloads->payloads, ctx, count, sizeof(struct i2c_payload)))
- return payloads;
-
- kfree(payloads);
- return NULL;
+ return true;
+ return false;
}
static struct i2c_payload *dal_ddc_i2c_payloads_get(struct i2c_payloads *p)
@@ -154,14 +148,12 @@ static uint32_t dal_ddc_i2c_payloads_get_count(struct i2c_payloads *p)
return p->payloads.count;
}
-static void dal_ddc_i2c_payloads_destroy(struct i2c_payloads **p)
+static void dal_ddc_i2c_payloads_destroy(struct i2c_payloads *p)
{
- if (!p || !*p)
+ if (!p)
return;
- dal_vector_destruct(&(*p)->payloads);
- kfree(*p);
- *p = NULL;
+ dal_vector_destruct(&p->payloads);
}
#define DDC_MIN(a, b) (((a) < (b)) ? (a) : (b))
@@ -524,9 +516,13 @@ bool dal_ddc_service_query_ddc_data(
uint32_t payloads_num = write_payloads + read_payloads;
+
if (write_size > EDID_SEGMENT_SIZE || read_size > EDID_SEGMENT_SIZE)
return false;
+ if (!payloads_num)
+ return false;
+
/*TODO: len of payload data for i2c and aux is uint8!!!!,
* but we want to read 256 over i2c!!!!*/
if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) {
@@ -557,23 +553,25 @@ bool dal_ddc_service_query_ddc_data(
ret = dal_ddc_submit_aux_command(ddc, &payload);
}
} else {
- struct i2c_payloads *payloads =
- dal_ddc_i2c_payloads_create(ddc->ctx, payloads_num);
+ struct i2c_command command = {0};
+ struct i2c_payloads payloads;
- struct i2c_command command = {
- .payloads = dal_ddc_i2c_payloads_get(payloads),
- .number_of_payloads = 0,
- .engine = DDC_I2C_COMMAND_ENGINE,
- .speed = ddc->ctx->dc->caps.i2c_speed_in_khz };
+ if (!dal_ddc_i2c_payloads_create(ddc->ctx, &payloads, payloads_num))
+ return false;
+
+ command.payloads = dal_ddc_i2c_payloads_get(&payloads);
+ command.number_of_payloads = 0;
+ command.engine = DDC_I2C_COMMAND_ENGINE;
+ command.speed = ddc->ctx->dc->caps.i2c_speed_in_khz;
dal_ddc_i2c_payloads_add(
- payloads, address, write_size, write_buf, true);
+ &payloads, address, write_size, write_buf, true);
dal_ddc_i2c_payloads_add(
- payloads, address, read_size, read_buf, false);
+ &payloads, address, read_size, read_buf, false);
command.number_of_payloads =
- dal_ddc_i2c_payloads_get_count(payloads);
+ dal_ddc_i2c_payloads_get_count(&payloads);
ret = dm_helpers_submit_i2c(
ddc->ctx,
@@ -686,6 +684,10 @@ void dal_ddc_service_write_scdc_data(struct ddc_service *ddc_service,
uint8_t write_buffer[2] = {0};
/*Lower than 340 Scramble bit from SCDC caps*/
+ if (ddc_service->link->local_sink &&
+ ddc_service->link->local_sink->edid_caps.panel_patch.skip_scdc_overwrite)
+ return;
+
dal_ddc_service_query_ddc_data(ddc_service, slave_address, &offset,
sizeof(offset), &sink_version, sizeof(sink_version));
if (sink_version == 1) {
@@ -715,6 +717,10 @@ void dal_ddc_service_read_scdc_data(struct ddc_service *ddc_service)
uint8_t offset = HDMI_SCDC_TMDS_CONFIG;
uint8_t tmds_config = 0;
+ if (ddc_service->link->local_sink &&
+ ddc_service->link->local_sink->edid_caps.panel_patch.skip_scdc_overwrite)
+ return;
+
dal_ddc_service_query_ddc_data(ddc_service, slave_address, &offset,
sizeof(offset), &tmds_config, sizeof(tmds_config));
if (tmds_config & 0x1) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index cb731c1d30b1..7cbb1efb4f68 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -945,6 +945,17 @@ static enum link_training_result perform_channel_equalization_sequence(
}
#define TRAINING_AUX_RD_INTERVAL 100 //us
+static void start_clock_recovery_pattern_early(struct dc_link *link,
+ struct link_training_settings *lt_settings,
+ uint32_t offset)
+{
+ DC_LOG_HW_LINK_TRAINING("%s\n GPU sends TPS1. Wait 400us.\n",
+ __func__);
+ dp_set_hw_training_pattern(link, DP_TRAINING_PATTERN_SEQUENCE_1, offset);
+ dp_set_hw_lane_settings(link, lt_settings, offset);
+ udelay(400);
+}
+
static enum link_training_result perform_clock_recovery_sequence(
struct dc_link *link,
struct link_training_settings *lt_settings,
@@ -962,7 +973,8 @@ static enum link_training_result perform_clock_recovery_sequence(
retries_cr = 0;
retry_count = 0;
- dp_set_hw_training_pattern(link, tr_pattern, offset);
+ if (!link->ctx->dc->work_arounds.lt_early_cr_pattern)
+ dp_set_hw_training_pattern(link, tr_pattern, offset);
/* najeeb - The synaptics MST hub can put the LT in
* infinite loop by switching the VS
@@ -1434,6 +1446,13 @@ enum link_training_result dc_link_dp_perform_link_training(
&link->preferred_training_settings,
&lt_settings);
+ /* Configure lttpr mode */
+ if (!link->is_lttpr_mode_transparent)
+ configure_lttpr_mode(link);
+
+ if (link->ctx->dc->work_arounds.lt_early_cr_pattern)
+ start_clock_recovery_pattern_early(link, &lt_settings, DPRX);
+
/* 1. set link rate, lane count and spread. */
dpcd_set_link_settings(link, &lt_settings);
@@ -1445,8 +1464,6 @@ enum link_training_result dc_link_dp_perform_link_training(
dp_set_fec_ready(link, fec_enable);
if (!link->is_lttpr_mode_transparent) {
- /* Configure lttpr mode */
- configure_lttpr_mode(link);
/* 2. perform link training (set link training done
* to false is done as well)
@@ -1654,6 +1671,8 @@ enum link_training_result dc_link_dp_sync_lt_attempt(
dp_set_panel_mode(link, panel_mode);
/* Attempt to train with given link training settings */
+ if (link->ctx->dc->work_arounds.lt_early_cr_pattern)
+ start_clock_recovery_pattern_early(link, &lt_settings, DPRX);
/* Set link rate, lane count and spread. */
dpcd_set_link_settings(link, &lt_settings);
@@ -1892,6 +1911,16 @@ bool dp_verify_link_cap(
/* disable PHY done possible by BIOS, will be done by driver itself */
dp_disable_link_phy(link, link->connector_signal);
+ dp_cs_id = get_clock_source_id(link);
+
+ /* link training starts with the maximum common settings
+ * supported by both sink and ASIC.
+ */
+ initial_link_settings = get_common_supported_link_settings(
+ *known_limit_link_setting,
+ max_link_cap);
+ cur_link_setting = initial_link_settings;
+
/* Temporary Renoir-specific workaround for SWDEV-215184;
* PHY will sometimes be in bad state on hotplugging display from certain USB-C dongle,
* so add extra cycle of enabling and disabling the PHY before first link training.
@@ -1902,15 +1931,6 @@ bool dp_verify_link_cap(
dp_disable_link_phy(link, link->connector_signal);
}
- dp_cs_id = get_clock_source_id(link);
-
- /* link training starts with the maximum common settings
- * supported by both sink and ASIC.
- */
- initial_link_settings = get_common_supported_link_settings(
- *known_limit_link_setting,
- max_link_cap);
- cur_link_setting = initial_link_settings;
do {
skip_video_pattern = true;
@@ -2654,9 +2674,12 @@ static void dp_test_send_link_test_pattern(struct dc_link *link)
break;
}
- test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ?
- DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 :
- DP_TEST_PATTERN_COLOR_SPACE_YCBCR601;
+ if (dpcd_test_params.bits.CLR_FORMAT == 0)
+ test_pattern_color_space = DP_TEST_PATTERN_COLOR_SPACE_RGB;
+ else
+ test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ?
+ DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 :
+ DP_TEST_PATTERN_COLOR_SPACE_YCBCR601;
dc_link_dp_set_test_pattern(
link,
@@ -3165,6 +3188,23 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data,
link->wa_flags.dp_keep_receiver_powered = false;
}
+/* Read additional sink caps defined in source specific DPCD area
+ * This function currently only reads from SinkCapability address (DP_SOURCE_SINK_CAP)
+ */
+static bool dpcd_read_sink_ext_caps(struct dc_link *link)
+{
+ uint8_t dpcd_data;
+
+ if (!link)
+ return false;
+
+ if (core_link_read_dpcd(link, DP_SOURCE_SINK_CAP, &dpcd_data, 1) != DC_OK)
+ return false;
+
+ link->dpcd_sink_ext_caps.raw = dpcd_data;
+ return true;
+}
+
static bool retrieve_link_cap(struct dc_link *link)
{
/* DP_ADAPTER_CAP - DP_DPCD_REV + 1 == 16 and also DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT + 1 == 16,
@@ -3401,6 +3441,17 @@ static bool retrieve_link_cap(struct dc_link *link)
sink_id.ieee_device_id,
sizeof(sink_id.ieee_device_id));
+ /* Quirk Apple MBP 2017 15" Retina panel: Wrong DP_MAX_LINK_RATE */
+ {
+ uint8_t str_mbp_2017[] = { 101, 68, 21, 101, 98, 97 };
+
+ if ((link->dpcd_caps.sink_dev_id == 0x0010fa) &&
+ !memcmp(link->dpcd_caps.sink_dev_id_str, str_mbp_2017,
+ sizeof(str_mbp_2017))) {
+ link->reported_link_cap.link_rate = 0x0c;
+ }
+ }
+
core_link_read_dpcd(
link,
DP_SINK_HW_REVISION_START,
@@ -3437,6 +3488,9 @@ static bool retrieve_link_cap(struct dc_link *link)
sizeof(link->dpcd_caps.dsc_caps.dsc_ext_caps.raw));
}
+ if (!dpcd_read_sink_ext_caps(link))
+ link->dpcd_sink_ext_caps.raw = 0;
+
/* Connectivity log: detection */
CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: ");
@@ -3589,6 +3643,8 @@ void detect_edp_sink_caps(struct dc_link *link)
}
}
link->verified_link_cap = link->reported_link_cap;
+
+ dc_link_set_default_brightness_aux(link);
}
void dc_link_dp_enable_hpd(const struct dc_link *link)
@@ -3680,7 +3736,8 @@ static void set_crtc_test_pattern(struct dc_link *link,
struct pipe_ctx *odm_pipe;
enum controller_dp_color_space controller_color_space;
int opp_cnt = 1;
- int count;
+ int offset = 0;
+ int dpg_width = width;
switch (test_pattern_color_space) {
case DP_TEST_PATTERN_COLOR_SPACE_RGB:
@@ -3702,33 +3759,30 @@ static void set_crtc_test_pattern(struct dc_link *link,
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
opp_cnt++;
+ dpg_width = width / opp_cnt;
+ offset = dpg_width;
- width /= opp_cnt;
+ opp->funcs->opp_set_disp_pattern_generator(opp,
+ controller_test_pattern,
+ controller_color_space,
+ color_depth,
+ NULL,
+ dpg_width,
+ height,
+ 0);
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp;
-
odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
odm_opp->funcs->opp_set_disp_pattern_generator(odm_opp,
controller_test_pattern,
controller_color_space,
color_depth,
NULL,
- width,
- height);
- }
- opp->funcs->opp_set_disp_pattern_generator(opp,
- controller_test_pattern,
- controller_color_space,
- color_depth,
- NULL,
- width,
- height);
- /* wait for dpg to blank pixel data with test pattern */
- for (count = 0; count < 1000; count++) {
- if (opp->funcs->dpg_is_blanked(opp))
- break;
- udelay(100);
+ dpg_width,
+ height,
+ offset);
+ offset += offset;
}
}
}
@@ -3746,11 +3800,12 @@ static void set_crtc_test_pattern(struct dc_link *link,
else if (opp->funcs->opp_set_disp_pattern_generator) {
struct pipe_ctx *odm_pipe;
int opp_cnt = 1;
+ int dpg_width = width;
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
opp_cnt++;
- width /= opp_cnt;
+ dpg_width = width / opp_cnt;
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp;
@@ -3760,16 +3815,18 @@ static void set_crtc_test_pattern(struct dc_link *link,
CONTROLLER_DP_COLOR_SPACE_UDEFINED,
color_depth,
NULL,
- width,
- height);
+ dpg_width,
+ height,
+ 0);
}
opp->funcs->opp_set_disp_pattern_generator(opp,
CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
CONTROLLER_DP_COLOR_SPACE_UDEFINED,
color_depth,
NULL,
- width,
- height);
+ dpg_width,
+ height,
+ 0);
}
}
break;
@@ -3947,6 +4004,11 @@ bool dc_link_dp_set_test_pattern(
default:
break;
}
+
+ if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable)
+ pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable(
+ pipe_ctx->stream_res.tg);
+ pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg);
/* update MSA to requested color space */
pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(pipe_ctx->stream_res.stream_enc,
&pipe_ctx->stream->timing,
@@ -3954,9 +4016,27 @@ bool dc_link_dp_set_test_pattern(
pipe_ctx->stream->use_vsc_sdp_for_colorimetry,
link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP);
+ if (pipe_ctx->stream->use_vsc_sdp_for_colorimetry) {
+ if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA)
+ pipe_ctx->stream->vsc_infopacket.sb[17] |= (1 << 7); // sb17 bit 7 Dynamic Range: 0 = VESA range, 1 = CTA range
+ else
+ pipe_ctx->stream->vsc_infopacket.sb[17] &= ~(1 << 7);
+ resource_build_info_frame(pipe_ctx);
+ link->dc->hwss.update_info_frame(pipe_ctx);
+ }
+
/* CRTC Patterns */
set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space);
-
+ pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg,
+ CRTC_STATE_VACTIVE);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg,
+ CRTC_STATE_VBLANK);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg,
+ CRTC_STATE_VACTIVE);
+ if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable)
+ pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable(
+ pipe_ctx->stream_res.tg);
/* Set Test Pattern state */
link->test_pattern_enabled = true;
}
@@ -4086,8 +4166,7 @@ void dp_set_fec_ready(struct dc_link *link, bool ready)
struct link_encoder *link_enc = link->link_enc;
uint8_t fec_config = 0;
- if (link->dc->debug.disable_fec ||
- IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment))
+ if (!dc_link_is_fec_supported(link) || link->dc->debug.disable_fec)
return;
if (link_enc->funcs->fec_set_ready &&
@@ -4122,8 +4201,7 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
{
struct link_encoder *link_enc = link->link_enc;
- if (link->dc->debug.disable_fec ||
- IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment))
+ if (!dc_link_is_fec_supported(link) || link->dc->debug.disable_fec)
return;
if (link_enc->funcs->fec_set_enable &&
@@ -4146,3 +4224,148 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
}
}
+void dpcd_set_source_specific_data(struct dc_link *link)
+{
+ const uint32_t post_oui_delay = 30; // 30ms
+
+ if (!link->dc->vendor_signature.is_valid) {
+ struct dpcd_amd_signature amd_signature;
+ amd_signature.AMD_IEEE_TxSignature_byte1 = 0x0;
+ amd_signature.AMD_IEEE_TxSignature_byte2 = 0x0;
+ amd_signature.AMD_IEEE_TxSignature_byte3 = 0x1A;
+ amd_signature.device_id_byte1 =
+ (uint8_t)(link->ctx->asic_id.chip_id);
+ amd_signature.device_id_byte2 =
+ (uint8_t)(link->ctx->asic_id.chip_id >> 8);
+ memset(&amd_signature.zero, 0, 4);
+ amd_signature.dce_version =
+ (uint8_t)(link->ctx->dce_version);
+ amd_signature.dal_version_byte1 = 0x0; // needed? where to get?
+ amd_signature.dal_version_byte2 = 0x0; // needed? where to get?
+
+ core_link_write_dpcd(link, DP_SOURCE_OUI,
+ (uint8_t *)(&amd_signature),
+ sizeof(amd_signature));
+
+ } else {
+ core_link_write_dpcd(link, DP_SOURCE_OUI,
+ link->dc->vendor_signature.data.raw,
+ sizeof(link->dc->vendor_signature.data.raw));
+ }
+
+ // Sink may need to configure internals based on vendor, so allow some
+ // time before proceeding with possibly vendor specific transactions
+ msleep(post_oui_delay);
+}
+
+bool dc_link_set_backlight_level_nits(struct dc_link *link,
+ bool isHDR,
+ uint32_t backlight_millinits,
+ uint32_t transition_time_in_ms)
+{
+ struct dpcd_source_backlight_set dpcd_backlight_set;
+ uint8_t backlight_control = isHDR ? 1 : 0;
+
+ if (!link || (link->connector_signal != SIGNAL_TYPE_EDP &&
+ link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
+ return false;
+
+ // OLEDs have no PWM, they can only use AUX
+ if (link->dpcd_sink_ext_caps.bits.oled == 1)
+ backlight_control = 1;
+
+ *(uint32_t *)&dpcd_backlight_set.backlight_level_millinits = backlight_millinits;
+ *(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms;
+
+
+ if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
+ (uint8_t *)(&dpcd_backlight_set),
+ sizeof(dpcd_backlight_set)) != DC_OK)
+ return false;
+
+ if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL,
+ &backlight_control, 1) != DC_OK)
+ return false;
+
+ return true;
+}
+
+bool dc_link_get_backlight_level_nits(struct dc_link *link,
+ uint32_t *backlight_millinits_avg,
+ uint32_t *backlight_millinits_peak)
+{
+ union dpcd_source_backlight_get dpcd_backlight_get;
+
+ memset(&dpcd_backlight_get, 0, sizeof(union dpcd_source_backlight_get));
+
+ if (!link || (link->connector_signal != SIGNAL_TYPE_EDP &&
+ link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
+ return false;
+
+ if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CURRENT_PEAK,
+ dpcd_backlight_get.raw,
+ sizeof(union dpcd_source_backlight_get)))
+ return false;
+
+ *backlight_millinits_avg =
+ dpcd_backlight_get.bytes.backlight_millinits_avg;
+ *backlight_millinits_peak =
+ dpcd_backlight_get.bytes.backlight_millinits_peak;
+
+ /* On non-supported panels dpcd_read usually succeeds with 0 returned */
+ if (*backlight_millinits_avg == 0 ||
+ *backlight_millinits_avg > *backlight_millinits_peak)
+ return false;
+
+ return true;
+}
+
+bool dc_link_backlight_enable_aux(struct dc_link *link, bool enable)
+{
+ uint8_t backlight_enable = enable ? 1 : 0;
+
+ if (!link || (link->connector_signal != SIGNAL_TYPE_EDP &&
+ link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
+ return false;
+
+ if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_ENABLE,
+ &backlight_enable, 1) != DC_OK)
+ return false;
+
+ return true;
+}
+
+// we read default from 0x320 because we expect BIOS wrote it there
+// regular get_backlight_nit reads from panel set at 0x326
+bool dc_link_read_default_bl_aux(struct dc_link *link, uint32_t *backlight_millinits)
+{
+ if (!link || (link->connector_signal != SIGNAL_TYPE_EDP &&
+ link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
+ return false;
+
+ if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
+ (uint8_t *) backlight_millinits,
+ sizeof(uint32_t)))
+ return false;
+
+ return true;
+}
+
+bool dc_link_set_default_brightness_aux(struct dc_link *link)
+{
+ uint32_t default_backlight;
+
+ if (link &&
+ (link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1 ||
+ link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1)) {
+ if (!dc_link_read_default_bl_aux(link, &default_backlight))
+ default_backlight = 150000;
+ // if < 5 nits or > 5000, it might be wrong readback
+ if (default_backlight < 5000 || default_backlight > 5000000)
+ default_backlight = 150000; //
+
+ return dc_link_set_backlight_level_nits(link, true,
+ default_backlight, 0);
+ }
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index ddb855045767..51e0ee6e7695 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -153,18 +153,19 @@ bool edp_receiver_ready_T9(struct dc_link *link)
unsigned char edpRev = 0;
enum dc_status result = DC_OK;
result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev));
- if (edpRev < DP_EDP_12)
- return true;
- /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
- do {
- sinkstatus = 1;
- result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus));
- if (sinkstatus == 0)
- break;
- if (result != DC_OK)
- break;
- udelay(100); //MAx T9
- } while (++tries < 50);
+
+ /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
+ if (result == DC_OK && edpRev >= DP_EDP_12) {
+ do {
+ sinkstatus = 1;
+ result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus));
+ if (sinkstatus == 0)
+ break;
+ if (result != DC_OK)
+ break;
+ udelay(100); //MAx T9
+ } while (++tries < 50);
+ }
if (link->local_sink->edid_caps.panel_patch.extra_delay_backlight_off > 0)
udelay(link->local_sink->edid_caps.panel_patch.extra_delay_backlight_off * 1000);
@@ -183,21 +184,22 @@ bool edp_receiver_ready_T7(struct dc_link *link)
unsigned long long time_taken_in_ns = 0;
result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev));
- if (result == DC_OK && edpRev < DP_EDP_12)
- return true;
- /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
- enter_timestamp = dm_get_timestamp(link->ctx);
- do {
- sinkstatus = 0;
- result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus));
- if (sinkstatus == 1)
- break;
- if (result != DC_OK)
- break;
- udelay(25);
- finish_timestamp = dm_get_timestamp(link->ctx);
- time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp, enter_timestamp);
- } while (time_taken_in_ns < 50 * 1000000); //MAx T7 is 50ms
+
+ if (result == DC_OK && edpRev >= DP_EDP_12) {
+ /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
+ enter_timestamp = dm_get_timestamp(link->ctx);
+ do {
+ sinkstatus = 0;
+ result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus));
+ if (sinkstatus == 1)
+ break;
+ if (result != DC_OK)
+ break;
+ udelay(25);
+ finish_timestamp = dm_get_timestamp(link->ctx);
+ time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp, enter_timestamp);
+ } while (time_taken_in_ns < 50 * 1000000); //MAx T7 is 50ms
+ }
if (link->local_sink->edid_caps.panel_patch.extra_t7_ms > 0)
udelay(link->local_sink->edid_caps.panel_patch.extra_t7_ms * 1000);
@@ -429,6 +431,7 @@ void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
dsc_cfg.color_depth = stream->timing.display_color_depth;
+ dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
@@ -533,6 +536,7 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable)
dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
dsc_cfg.color_depth = stream->timing.display_color_depth;
+ dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
DC_LOG_DSC(" ");
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index a0eb9e533a61..75c7ce4c7581 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -46,12 +46,12 @@
#include "dce100/dce100_resource.h"
#include "dce110/dce110_resource.h"
#include "dce112/dce112_resource.h"
+#include "dce120/dce120_resource.h"
#if defined(CONFIG_DRM_AMD_DC_DCN)
#include "dcn10/dcn10_resource.h"
-#endif
#include "dcn20/dcn20_resource.h"
#include "dcn21/dcn21_resource.h"
-#include "dce120/dce120_resource.h"
+#endif
#define DC_LOGGER_INIT(logger)
@@ -532,6 +532,51 @@ static inline void get_vp_scan_direction(
*flip_horz_scan_dir = !*flip_horz_scan_dir;
}
+int get_num_odm_splits(struct pipe_ctx *pipe)
+{
+ int odm_split_count = 0;
+ struct pipe_ctx *next_pipe = pipe->next_odm_pipe;
+ while (next_pipe) {
+ odm_split_count++;
+ next_pipe = next_pipe->next_odm_pipe;
+ }
+ pipe = pipe->prev_odm_pipe;
+ while (pipe) {
+ odm_split_count++;
+ pipe = pipe->prev_odm_pipe;
+ }
+ return odm_split_count;
+}
+
+static void calculate_split_count_and_index(struct pipe_ctx *pipe_ctx, int *split_count, int *split_idx)
+{
+ *split_count = get_num_odm_splits(pipe_ctx);
+ *split_idx = 0;
+ if (*split_count == 0) {
+ /*Check for mpc split*/
+ struct pipe_ctx *split_pipe = pipe_ctx->top_pipe;
+
+ while (split_pipe && split_pipe->plane_state == pipe_ctx->plane_state) {
+ (*split_idx)++;
+ (*split_count)++;
+ split_pipe = split_pipe->top_pipe;
+ }
+ split_pipe = pipe_ctx->bottom_pipe;
+ while (split_pipe && split_pipe->plane_state == pipe_ctx->plane_state) {
+ (*split_count)++;
+ split_pipe = split_pipe->bottom_pipe;
+ }
+ } else {
+ /*Get odm split index*/
+ struct pipe_ctx *split_pipe = pipe_ctx->prev_odm_pipe;
+
+ while (split_pipe) {
+ (*split_idx)++;
+ split_pipe = split_pipe->prev_odm_pipe;
+ }
+ }
+}
+
static void calculate_viewport(struct pipe_ctx *pipe_ctx)
{
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
@@ -541,16 +586,16 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
struct rect clip, dest;
int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
|| data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
- bool pri_split = pipe_ctx->bottom_pipe &&
- pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state;
- bool sec_split = pipe_ctx->top_pipe &&
- pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
+ int split_count = 0;
+ int split_idx = 0;
bool orthogonal_rotation, flip_y_start, flip_x_start;
+ calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx);
+
if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE ||
stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) {
- pri_split = false;
- sec_split = false;
+ split_count = 0;
+ split_idx = 0;
}
/* The actual clip is an intersection between stream
@@ -609,23 +654,32 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
data->viewport.height = clip.height * surf_src.height / dest.height;
/* Handle split */
- if (pri_split || sec_split) {
+ if (split_count) {
+ /* extra pixels in the division remainder need to go to pipes after
+ * the extra pixel index minus one(epimo) defined here as:
+ */
+ int epimo = 0;
+
if (orthogonal_rotation) {
- if (flip_y_start != pri_split)
- data->viewport.height /= 2;
- else {
- data->viewport.y += data->viewport.height / 2;
- /* Ceil offset pipe */
- data->viewport.height = (data->viewport.height + 1) / 2;
- }
+ if (flip_y_start)
+ split_idx = split_count - split_idx;
+
+ epimo = split_count - data->viewport.height % (split_count + 1);
+
+ data->viewport.y += (data->viewport.height / (split_count + 1)) * split_idx;
+ if (split_idx > epimo)
+ data->viewport.y += split_idx - epimo - 1;
+ data->viewport.height = data->viewport.height / (split_count + 1) + (split_idx > epimo ? 1 : 0);
} else {
- if (flip_x_start != pri_split)
- data->viewport.width /= 2;
- else {
- data->viewport.x += data->viewport.width / 2;
- /* Ceil offset pipe */
- data->viewport.width = (data->viewport.width + 1) / 2;
- }
+ if (flip_x_start)
+ split_idx = split_count - split_idx;
+
+ epimo = split_count - data->viewport.width % (split_count + 1);
+
+ data->viewport.x += (data->viewport.width / (split_count + 1)) * split_idx;
+ if (split_idx > epimo)
+ data->viewport.x += split_idx - epimo - 1;
+ data->viewport.width = data->viewport.width / (split_count + 1) + (split_idx > epimo ? 1 : 0);
}
}
@@ -644,58 +698,58 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx)
{
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
const struct dc_stream_state *stream = pipe_ctx->stream;
+ struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
struct rect surf_clip = plane_state->clip_rect;
- bool pri_split = pipe_ctx->bottom_pipe &&
- pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state;
- bool sec_split = pipe_ctx->top_pipe &&
- pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
- bool top_bottom_split = stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM;
-
- pipe_ctx->plane_res.scl_data.recout.x = stream->dst.x;
+ bool pri_split_tb = pipe_ctx->bottom_pipe &&
+ pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state &&
+ stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM;
+ bool sec_split_tb = pipe_ctx->top_pipe &&
+ pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state &&
+ stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM;
+ int split_count = 0;
+ int split_idx = 0;
+
+ calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx);
+
+ data->recout.x = stream->dst.x;
if (stream->src.x < surf_clip.x)
- pipe_ctx->plane_res.scl_data.recout.x += (surf_clip.x
- - stream->src.x) * stream->dst.width
+ data->recout.x += (surf_clip.x - stream->src.x) * stream->dst.width
/ stream->src.width;
- pipe_ctx->plane_res.scl_data.recout.width = surf_clip.width *
- stream->dst.width / stream->src.width;
- if (pipe_ctx->plane_res.scl_data.recout.width + pipe_ctx->plane_res.scl_data.recout.x >
- stream->dst.x + stream->dst.width)
- pipe_ctx->plane_res.scl_data.recout.width =
- stream->dst.x + stream->dst.width
- - pipe_ctx->plane_res.scl_data.recout.x;
+ data->recout.width = surf_clip.width * stream->dst.width / stream->src.width;
+ if (data->recout.width + data->recout.x > stream->dst.x + stream->dst.width)
+ data->recout.width = stream->dst.x + stream->dst.width - data->recout.x;
- pipe_ctx->plane_res.scl_data.recout.y = stream->dst.y;
+ data->recout.y = stream->dst.y;
if (stream->src.y < surf_clip.y)
- pipe_ctx->plane_res.scl_data.recout.y += (surf_clip.y
- - stream->src.y) * stream->dst.height
+ data->recout.y += (surf_clip.y - stream->src.y) * stream->dst.height
/ stream->src.height;
- pipe_ctx->plane_res.scl_data.recout.height = surf_clip.height *
- stream->dst.height / stream->src.height;
- if (pipe_ctx->plane_res.scl_data.recout.height + pipe_ctx->plane_res.scl_data.recout.y >
- stream->dst.y + stream->dst.height)
- pipe_ctx->plane_res.scl_data.recout.height =
- stream->dst.y + stream->dst.height
- - pipe_ctx->plane_res.scl_data.recout.y;
+ data->recout.height = surf_clip.height * stream->dst.height / stream->src.height;
+ if (data->recout.height + data->recout.y > stream->dst.y + stream->dst.height)
+ data->recout.height = stream->dst.y + stream->dst.height - data->recout.y;
/* Handle h & v split, handle rotation using viewport */
- if (sec_split && top_bottom_split) {
- pipe_ctx->plane_res.scl_data.recout.y +=
- pipe_ctx->plane_res.scl_data.recout.height / 2;
+ if (sec_split_tb) {
+ data->recout.y += data->recout.height / 2;
/* Floor primary pipe, ceil 2ndary pipe */
- pipe_ctx->plane_res.scl_data.recout.height =
- (pipe_ctx->plane_res.scl_data.recout.height + 1) / 2;
- } else if (pri_split && top_bottom_split)
- pipe_ctx->plane_res.scl_data.recout.height /= 2;
- else if (sec_split) {
- pipe_ctx->plane_res.scl_data.recout.x +=
- pipe_ctx->plane_res.scl_data.recout.width / 2;
- /* Ceil offset pipe */
- pipe_ctx->plane_res.scl_data.recout.width =
- (pipe_ctx->plane_res.scl_data.recout.width + 1) / 2;
- } else if (pri_split)
- pipe_ctx->plane_res.scl_data.recout.width /= 2;
+ data->recout.height = (data->recout.height + 1) / 2;
+ } else if (pri_split_tb)
+ data->recout.height /= 2;
+ else if (split_count) {
+ /* extra pixels in the division remainder need to go to pipes after
+ * the extra pixel index minus one(epimo) defined here as:
+ */
+ int epimo = split_count - data->recout.width % (split_count + 1);
+
+ /*no recout offset due to odm */
+ if (!pipe_ctx->next_odm_pipe && !pipe_ctx->prev_odm_pipe) {
+ data->recout.x += (data->recout.width / (split_count + 1)) * split_idx;
+ if (split_idx > epimo)
+ data->recout.x += split_idx - epimo - 1;
+ }
+ data->recout.width = data->recout.width / (split_count + 1) + (split_idx > epimo ? 1 : 0);
+ }
}
static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
@@ -832,12 +886,14 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)
{
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
const struct dc_stream_state *stream = pipe_ctx->stream;
+ struct pipe_ctx *odm_pipe = pipe_ctx->prev_odm_pipe;
struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
struct rect src = pipe_ctx->plane_state->src_rect;
int recout_skip_h, recout_skip_v, surf_size_h, surf_size_v;
int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
|| data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
bool orthogonal_rotation, flip_vert_scan_dir, flip_horz_scan_dir;
+ int odm_idx = 0;
/*
* Need to calculate the scan direction for viewport to make adjustments
@@ -869,6 +925,14 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)
* stream->dst.width / stream->src.width -
src.x * plane_state->dst_rect.width / src.width
* stream->dst.width / stream->src.width);
+ /*modified recout_skip_h calculation due to odm having no recout offset*/
+ while (odm_pipe) {
+ odm_idx++;
+ odm_pipe = odm_pipe->prev_odm_pipe;
+ }
+ if (odm_idx)
+ recout_skip_h += odm_idx * data->recout.width;
+
recout_skip_v = data->recout.y - (stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
* stream->dst.height / stream->src.height -
src.y * plane_state->dst_rect.height / src.height
@@ -1021,6 +1085,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
store_h_border_left + timing->h_border_right;
pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable +
timing->v_border_top + timing->v_border_bottom;
+ if (pipe_ctx->next_odm_pipe || pipe_ctx->prev_odm_pipe)
+ pipe_ctx->plane_res.scl_data.h_active /= get_num_odm_splits(pipe_ctx) + 1;
/* Taps calculations */
if (pipe_ctx->plane_res.xfm != NULL)
@@ -2034,7 +2100,7 @@ enum dc_status resource_map_pool_resources(
for (i = 0; i < context->stream_count; i++)
if (context->streams[i] == stream) {
context->stream_status[i].primary_otg_inst = pipe_ctx->stream_res.tg->inst;
- context->stream_status[i].stream_enc_inst = pipe_ctx->stream_res.stream_enc->id;
+ context->stream_status[i].stream_enc_inst = pipe_ctx->stream_res.stream_enc->stream_enc_inst;
context->stream_status[i].audio_inst =
pipe_ctx->stream_res.audio ? pipe_ctx->stream_res.audio->inst : -1;
@@ -2108,10 +2174,10 @@ enum dc_status dc_validate_global_state(
if (pipe_ctx->stream != stream)
continue;
- if (dc->res_pool->funcs->get_default_swizzle_mode &&
+ if (dc->res_pool->funcs->patch_unknown_plane_state &&
pipe_ctx->plane_state &&
pipe_ctx->plane_state->tiling_info.gfx9.swizzle == DC_SW_UNKNOWN) {
- result = dc->res_pool->funcs->get_default_swizzle_mode(pipe_ctx->plane_state);
+ result = dc->res_pool->funcs->patch_unknown_plane_state(pipe_ctx->plane_state);
if (result != DC_OK)
return result;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c b/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
index a96d8de9380e..64cf24a9ab08 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
@@ -47,6 +47,9 @@ int dc_setup_system_context(struct dc *dc, struct dc_phy_addr_space_config *pa_c
*/
memcpy(&dc->vm_pa_config, pa_config, sizeof(struct dc_phy_addr_space_config));
dc->vm_pa_config.valid = true;
+
+ if (pa_config->is_hvm_enabled == 0)
+ dc->debug.nv12_iflip_vm_wa = false;
}
return num_vmids;
@@ -62,7 +65,7 @@ int dc_get_vmid_use_vector(struct dc *dc)
int i;
int in_use = 0;
- for (i = 0; i < dc->vm_helper->num_vmid; i++)
+ for (i = 0; i < MAX_HUBP; i++)
in_use |= dc->vm_helper->hubp_vmid_usage[i].vmid_usage[0]
| dc->vm_helper->hubp_vmid_usage[i].vmid_usage[1];
return in_use;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 8ff25b5dd2f6..d3ceb39e428e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -39,7 +39,7 @@
#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
-#define DC_VER "3.2.69"
+#define DC_VER "3.2.76"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -126,6 +126,7 @@ struct dc_bug_wa {
bool no_connect_phy_config;
bool dedcn20_305_wa;
bool skip_clock_update;
+ bool lt_early_cr_pattern;
};
struct dc_dcc_surface_param {
@@ -229,6 +230,7 @@ struct dc_config {
bool forced_clocks;
bool disable_extended_timeout_support; // Used to disable extended timeout and lttpr feature as well
bool multi_mon_pp_mclk_switch;
+ bool psr_on_dmub;
};
enum visual_confirm {
@@ -388,6 +390,7 @@ struct dc_debug_options {
int always_scale;
bool disable_pplib_clock_request;
bool disable_clock_gate;
+ bool disable_mem_low_power;
bool disable_dmcu;
bool disable_psr;
bool force_abm_enable;
@@ -453,6 +456,7 @@ struct dc_phy_addr_space_config {
} gart_config;
bool valid;
+ bool is_hvm_enabled;
uint64_t page_table_default_page_addr;
};
@@ -518,6 +522,7 @@ struct dc {
/* Require to optimize clocks and bandwidth for added/removed planes */
bool optimized_required;
+ bool wm_optimized_required;
/* Require to maintain clocks and bandwidth for UEFI enabled HW */
int optimize_seamless_boot_streams;
@@ -526,6 +531,7 @@ struct dc {
struct compressor *fbc_compressor;
struct dc_debug_data debug_data;
+ struct dpcd_vendor_signature vendor_signature;
const char *build_id;
struct vm_helper *vm_helper;
@@ -565,12 +571,14 @@ struct dc_init_data {
struct dc_reg_helper_state *dmub_offload;
struct dc_config flags;
- uint32_t log_mask;
+ uint64_t log_mask;
+
/**
* gpu_info FW provided soc bounding box struct or 0 if not
* available in FW
*/
const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
+ struct dpcd_vendor_signature vendor_signature;
};
struct dc_callback_init {
@@ -682,7 +690,6 @@ struct dc_3dlut {
struct kref refcount;
struct tetrahedral_params lut_3d;
struct fixed31_32 hdr_multiplier;
- bool initialized; /*remove after diag fix*/
union dc_3dlut_state state;
struct dc_context *ctx;
};
@@ -865,6 +872,7 @@ struct dc_flip_addrs {
unsigned int flip_timestamp_in_us;
bool flip_immediate;
/* TODO: add flip duration for FreeSync */
+ bool triplebuffer_flips;
};
bool dc_post_update_surfaces_to_stream(
@@ -979,6 +987,20 @@ struct dpcd_caps {
};
+union dpcd_sink_ext_caps {
+ struct {
+ /* 0 - Sink supports backlight adjust via PWM during SDR/HDR mode
+ * 1 - Sink supports backlight adjust via AUX during SDR/HDR mode.
+ */
+ uint8_t sdr_aux_backlight_control : 1;
+ uint8_t hdr_aux_backlight_control : 1;
+ uint8_t reserved_1 : 2;
+ uint8_t oled : 1;
+ uint8_t reserved : 3;
+ } bits;
+ uint8_t raw;
+};
+
#include "dc_link.h"
/*******************************************************************************
@@ -1004,6 +1026,11 @@ struct dc_sink_dsc_caps {
struct dsc_dec_dpcd_caps dsc_dec_caps;
};
+struct dc_sink_fec_caps {
+ bool is_rx_fec_supported;
+ bool is_topology_fec_supported;
+};
+
/*
* The sink structure contains EDID and other display device properties
*/
@@ -1017,7 +1044,10 @@ struct dc_sink {
struct stereo_3d_features features_3d[TIMING_3D_FORMAT_MAX];
bool converter_disable_audio;
- struct dc_sink_dsc_caps sink_dsc_caps;
+ struct dc_sink_dsc_caps dsc_caps;
+ struct dc_sink_fec_caps fec_caps;
+
+ bool is_vsc_sdp_colorimetry_supported;
/* private to DC core */
struct dc_link *link;
@@ -1075,7 +1105,6 @@ unsigned int dc_get_current_backlight_pwm(struct dc *dc);
unsigned int dc_get_target_backlight_pwm(struct dc *dc);
bool dc_is_dmcu_initialized(struct dc *dc);
-bool dc_is_hw_initialized(struct dc *dc);
enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping);
void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index dfe4472c9e40..bb2730e9521e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -432,6 +432,54 @@ struct dp_sink_hw_fw_revision {
uint8_t ieee_fw_rev[2];
};
+struct dpcd_vendor_signature {
+ bool is_valid;
+
+ union dpcd_ieee_vendor_signature {
+ struct {
+ uint8_t ieee_oui[3];/*24-bit IEEE OUI*/
+ uint8_t ieee_device_id[6];/*usually 6-byte ASCII name*/
+ uint8_t ieee_hw_rev;
+ uint8_t ieee_fw_rev[2];
+ };
+ uint8_t raw[12];
+ } data;
+};
+
+struct dpcd_amd_signature {
+ uint8_t AMD_IEEE_TxSignature_byte1;
+ uint8_t AMD_IEEE_TxSignature_byte2;
+ uint8_t AMD_IEEE_TxSignature_byte3;
+ uint8_t device_id_byte1;
+ uint8_t device_id_byte2;
+ uint8_t zero[4];
+ uint8_t dce_version;
+ uint8_t dal_version_byte1;
+ uint8_t dal_version_byte2;
+};
+
+struct dpcd_source_backlight_set {
+ struct {
+ uint8_t byte0;
+ uint8_t byte1;
+ uint8_t byte2;
+ uint8_t byte3;
+ } backlight_level_millinits;
+
+ struct {
+ uint8_t byte0;
+ uint8_t byte1;
+ } backlight_transition_time_ms;
+};
+
+union dpcd_source_backlight_get {
+ struct {
+ uint32_t backlight_millinits_peak; /* 326h */
+ uint32_t backlight_millinits_avg; /* 32Ah */
+ } bytes;
+ uint8_t raw[8];
+};
+
/*DPCD register of DP receiver capability field bits-*/
union edp_configuration_cap {
struct {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index d25603128394..00ff5e98278c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -26,6 +26,7 @@
#ifndef DC_LINK_H_
#define DC_LINK_H_
+#include "dc.h"
#include "dc_types.h"
#include "grph_object_defs.h"
@@ -128,6 +129,7 @@ struct dc_link {
enum edp_revision edp_revision;
bool psr_feature_enabled;
bool psr_allow_active;
+ union dpcd_sink_ext_caps dpcd_sink_ext_caps;
/* MST record stream using this link */
struct link_flags {
@@ -178,6 +180,21 @@ bool dc_link_set_backlight_level(const struct dc_link *dc_link,
uint32_t backlight_pwm_u16_16,
uint32_t frame_ramp);
+/* Set/get nits-based backlight level of an embedded panel (eDP, LVDS). */
+bool dc_link_set_backlight_level_nits(struct dc_link *link,
+ bool isHDR,
+ uint32_t backlight_millinits,
+ uint32_t transition_time_in_ms);
+
+bool dc_link_get_backlight_level_nits(struct dc_link *link,
+ uint32_t *backlight_millinits,
+ uint32_t *backlight_millinits_peak);
+
+bool dc_link_backlight_enable_aux(struct dc_link *link, bool enable);
+
+bool dc_link_read_default_bl_aux(struct dc_link *link, uint32_t *backlight_millinits);
+bool dc_link_set_default_brightness_aux(struct dc_link *link);
+
int dc_link_get_backlight_level(const struct dc_link *dc_link);
bool dc_link_set_abm_disable(const struct dc_link *dc_link);
@@ -316,4 +333,7 @@ bool dc_submit_i2c_oem(
uint32_t dc_bandwidth_in_kbps_from_timing(
const struct dc_crtc_timing *timing);
+
+bool dc_link_is_fec_supported(const struct dc_link *link);
+
#endif /* DC_LINK_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 92096de79dec..a5c7ef47b8d3 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -118,6 +118,7 @@ union stream_update_flags {
uint32_t dpms_off:1;
uint32_t gamut_remap:1;
uint32_t wb_update:1;
+ uint32_t dsc_changed : 1;
} bits;
uint32_t raw;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index e59532d98cb4..0d210104ba0a 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -229,7 +229,9 @@ struct dc_panel_patch {
unsigned int extra_t12_ms;
unsigned int extra_delay_backlight_off;
unsigned int extra_t7_ms;
- unsigned int manage_secondary_link;
+ unsigned int skip_scdc_overwrite;
+ unsigned int delay_ignore_msa;
+ unsigned int disable_fec;
};
struct dc_edid_caps {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile b/drivers/gpu/drm/amd/display/dc/dce/Makefile
index fdf3d8f87eee..fbfcff700971 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile
@@ -29,7 +29,7 @@
DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \
dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \
dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \
-dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o
+dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o dmub_psr.o
AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE))
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index f1a5d2c6aa37..743042d5905a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -400,7 +400,7 @@ static bool acquire(
{
enum gpio_result result;
- if (!is_engine_available(engine))
+ if ((engine == NULL) || !is_engine_available(engine))
return false;
result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE,
@@ -645,7 +645,7 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
case AUX_TRANSACTION_REPLY_AUX_DEFER:
case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER:
retry_on_defer = true;
- /* fall through */
+ fallthrough;
case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK:
if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES) {
goto fail;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index 30d953acd016..f0cebe721bcc 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -378,6 +378,11 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu)
struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
const struct dc_config *config = &dmcu->ctx->dc->config;
bool status = false;
+ struct dc_context *ctx = dmcu->ctx;
+ unsigned int i;
+ // 5 4 3 2 1 0
+ // F E D C B A - bit 0 is A, bit 5 is F
+ unsigned int tx_interrupt_mask = 0;
PERF_TRACE();
/* Definition of DC_DMCU_SCRATCH
@@ -387,6 +392,15 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu)
*/
dmcu->dmcu_state = REG_READ(DC_DMCU_SCRATCH);
+ for (i = 0; i < ctx->dc->link_count; i++) {
+ if (ctx->dc->links[i]->link_enc->features.flags.bits.DP_IS_USB_C) {
+ if (ctx->dc->links[i]->link_enc->transmitter >= TRANSMITTER_UNIPHY_A &&
+ ctx->dc->links[i]->link_enc->transmitter <= TRANSMITTER_UNIPHY_F) {
+ tx_interrupt_mask |= 1 << ctx->dc->links[i]->link_enc->transmitter;
+ }
+ }
+ }
+
switch (dmcu->dmcu_state) {
case DMCU_UNLOADED:
status = false;
@@ -401,6 +415,8 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu)
/* Set backlight ramping stepsize */
REG_WRITE(MASTER_COMM_DATA_REG2, abm_gain_stepsize);
+ REG_WRITE(MASTER_COMM_DATA_REG3, tx_interrupt_mask);
+
/* Set command to initialize microcontroller */
REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0,
MCP_INIT_DMCU);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
index 066188ba7949..24adec407972 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
@@ -267,6 +267,9 @@ static void set_speed(
uint32_t xtal_ref_div = 0;
uint32_t prescale = 0;
+ if (speed == 0)
+ return;
+
REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div);
if (xtal_ref_div == 0)
@@ -274,17 +277,15 @@ static void set_speed(
prescale = ((dce_i2c_hw->reference_frequency * 2) / xtal_ref_div) / speed;
- if (speed) {
- if (dce_i2c_hw->masks->DC_I2C_DDC1_START_STOP_TIMING_CNTL)
- REG_UPDATE_N(SPEED, 3,
- FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale,
- FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2,
- FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL), speed > 50 ? 2:1);
- else
- REG_UPDATE_N(SPEED, 2,
- FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale,
- FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2);
- }
+ if (dce_i2c_hw->masks->DC_I2C_DDC1_START_STOP_TIMING_CNTL)
+ REG_UPDATE_N(SPEED, 3,
+ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale,
+ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2,
+ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL), speed > 50 ? 2:1);
+ else
+ REG_UPDATE_N(SPEED, 2,
+ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale,
+ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2);
}
static bool setup_engine(
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
index 8aa937f496c4..51481e922eb9 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
@@ -479,7 +479,7 @@ static void program_grph_pixel_format(
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
sign = 1;
floating = 1;
- /* fall through */
+ fallthrough;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: /* shouldn't this get float too? */
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
grph_depth = 3;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c b/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c
index 48862bebf29e..7311f312369f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c
@@ -22,1004 +22,1330 @@
* Authors: AMD
*
*/
-
#include "transform.h"
+//=========================================
+// <num_taps> = 2
+// <num_phases> = 16
+// <scale_ratio> = 0.833333 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = s1.10
+// <CoefOut> = s1.12
+//=========================================
static const uint16_t filter_2tap_16p[18] = {
- 4096, 0,
- 3840, 256,
- 3584, 512,
- 3328, 768,
- 3072, 1024,
- 2816, 1280,
- 2560, 1536,
- 2304, 1792,
- 2048, 2048
+ 0x1000, 0x0000,
+ 0x0FF0, 0x0010,
+ 0x0FB0, 0x0050,
+ 0x0F34, 0x00CC,
+ 0x0E68, 0x0198,
+ 0x0D44, 0x02BC,
+ 0x0BC4, 0x043C,
+ 0x09FC, 0x0604,
+ 0x0800, 0x0800
};
+//=========================================
+// <num_taps> = 3
+// <num_phases> = 16
+// <scale_ratio> = 0.83333 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_3tap_16p_upscale[27] = {
- 2048, 2048, 0,
- 1708, 2424, 16348,
- 1372, 2796, 16308,
- 1056, 3148, 16272,
- 768, 3464, 16244,
- 512, 3728, 16236,
- 296, 3928, 16252,
- 124, 4052, 16296,
- 0, 4096, 0
+ 0x0804, 0x07FC, 0x0000,
+ 0x06AC, 0x0978, 0x3FDC,
+ 0x055C, 0x0AF0, 0x3FB4,
+ 0x0420, 0x0C50, 0x3F90,
+ 0x0300, 0x0D88, 0x3F78,
+ 0x0200, 0x0E90, 0x3F70,
+ 0x0128, 0x0F5C, 0x3F7C,
+ 0x007C, 0x0FD8, 0x3FAC,
+ 0x0000, 0x1000, 0x0000
};
-static const uint16_t filter_3tap_16p_117[27] = {
- 2048, 2048, 0,
- 1824, 2276, 16376,
- 1600, 2496, 16380,
- 1376, 2700, 16,
- 1156, 2880, 52,
- 948, 3032, 108,
- 756, 3144, 192,
- 580, 3212, 296,
- 428, 3236, 428
+//=========================================
+// <num_taps> = 3
+// <num_phases> = 16
+// <scale_ratio> = 1.16666 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_3tap_16p_116[27] = {
+ 0x0804, 0x07FC, 0x0000,
+ 0x0700, 0x0914, 0x3FEC,
+ 0x0604, 0x0A1C, 0x3FE0,
+ 0x050C, 0x0B14, 0x3FE0,
+ 0x041C, 0x0BF4, 0x3FF0,
+ 0x0340, 0x0CB0, 0x0010,
+ 0x0274, 0x0D3C, 0x0050,
+ 0x01C0, 0x0D94, 0x00AC,
+ 0x0128, 0x0DB4, 0x0124
};
-static const uint16_t filter_3tap_16p_150[27] = {
- 2048, 2048, 0,
- 1872, 2184, 36,
- 1692, 2308, 88,
- 1516, 2420, 156,
- 1340, 2516, 236,
- 1168, 2592, 328,
- 1004, 2648, 440,
- 844, 2684, 560,
- 696, 2696, 696
+//=========================================
+// <num_taps> = 3
+// <num_phases> = 16
+// <scale_ratio> = 1.49999 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_3tap_16p_149[27] = {
+ 0x0804, 0x07FC, 0x0000,
+ 0x0730, 0x08CC, 0x0004,
+ 0x0660, 0x098C, 0x0014,
+ 0x0590, 0x0A3C, 0x0034,
+ 0x04C4, 0x0AD4, 0x0068,
+ 0x0400, 0x0B54, 0x00AC,
+ 0x0348, 0x0BB0, 0x0108,
+ 0x029C, 0x0BEC, 0x0178,
+ 0x0200, 0x0C00, 0x0200
};
+//=========================================
+// <num_taps> = 3
+// <num_phases> = 16
+// <scale_ratio> = 1.83332 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_3tap_16p_183[27] = {
- 2048, 2048, 0,
- 1892, 2104, 92,
- 1744, 2152, 196,
- 1592, 2196, 300,
- 1448, 2232, 412,
- 1304, 2256, 528,
- 1168, 2276, 648,
- 1032, 2288, 772,
- 900, 2292, 900
+ 0x0804, 0x07FC, 0x0000,
+ 0x0754, 0x0880, 0x002C,
+ 0x06A8, 0x08F0, 0x0068,
+ 0x05FC, 0x0954, 0x00B0,
+ 0x0550, 0x09AC, 0x0104,
+ 0x04A8, 0x09F0, 0x0168,
+ 0x0408, 0x0A20, 0x01D8,
+ 0x036C, 0x0A40, 0x0254,
+ 0x02DC, 0x0A48, 0x02DC
};
+//=========================================
+// <num_taps> = 4
+// <num_phases> = 16
+// <scale_ratio> = 0.83333 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_4tap_16p_upscale[36] = {
- 0, 4096, 0, 0,
- 16240, 4056, 180, 16380,
- 16136, 3952, 404, 16364,
- 16072, 3780, 664, 16344,
- 16040, 3556, 952, 16312,
- 16036, 3284, 1268, 16272,
- 16052, 2980, 1604, 16224,
- 16084, 2648, 1952, 16176,
- 16128, 2304, 2304, 16128
+ 0x0000, 0x1000, 0x0000, 0x0000,
+ 0x3F74, 0x0FDC, 0x00B4, 0x3FFC,
+ 0x3F0C, 0x0F70, 0x0194, 0x3FF0,
+ 0x3ECC, 0x0EC4, 0x0298, 0x3FD8,
+ 0x3EAC, 0x0DE4, 0x03B8, 0x3FB8,
+ 0x3EA4, 0x0CD8, 0x04F4, 0x3F90,
+ 0x3EB8, 0x0BA0, 0x0644, 0x3F64,
+ 0x3ED8, 0x0A54, 0x07A0, 0x3F34,
+ 0x3F00, 0x08FC, 0x0900, 0x3F04
};
-static const uint16_t filter_4tap_16p_117[36] = {
- 428, 3236, 428, 0,
- 276, 3232, 604, 16364,
- 148, 3184, 800, 16340,
- 44, 3104, 1016, 16312,
- 16344, 2984, 1244, 16284,
- 16284, 2832, 1488, 16256,
- 16244, 2648, 1732, 16236,
- 16220, 2440, 1976, 16220,
- 16212, 2216, 2216, 16212
+//=========================================
+// <num_taps> = 4
+// <num_phases> = 16
+// <scale_ratio> = 1.16666 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_4tap_16p_116[36] = {
+ 0x01A8, 0x0CB4, 0x01A4, 0x0000,
+ 0x0110, 0x0CB0, 0x0254, 0x3FEC,
+ 0x0090, 0x0C80, 0x031C, 0x3FD4,
+ 0x0024, 0x0C2C, 0x03F4, 0x3FBC,
+ 0x3FD8, 0x0BAC, 0x04DC, 0x3FA0,
+ 0x3F9C, 0x0B14, 0x05CC, 0x3F84,
+ 0x3F70, 0x0A60, 0x06C4, 0x3F6C,
+ 0x3F5C, 0x098C, 0x07BC, 0x3F5C,
+ 0x3F54, 0x08AC, 0x08AC, 0x3F54
};
-static const uint16_t filter_4tap_16p_150[36] = {
- 696, 2700, 696, 0,
- 560, 2700, 848, 16364,
- 436, 2676, 1008, 16348,
- 328, 2628, 1180, 16336,
- 232, 2556, 1356, 16328,
- 152, 2460, 1536, 16328,
- 84, 2344, 1716, 16332,
- 28, 2208, 1888, 16348,
- 16376, 2052, 2052, 16376
+//=========================================
+// <num_taps> = 4
+// <num_phases> = 16
+// <scale_ratio> = 1.49999 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_4tap_16p_149[36] = {
+ 0x02B8, 0x0A90, 0x02B8, 0x0000,
+ 0x0230, 0x0A90, 0x0350, 0x3FF0,
+ 0x01B8, 0x0A78, 0x03F0, 0x3FE0,
+ 0x0148, 0x0A48, 0x049C, 0x3FD4,
+ 0x00E8, 0x0A00, 0x054C, 0x3FCC,
+ 0x0098, 0x09A0, 0x0600, 0x3FC8,
+ 0x0054, 0x0928, 0x06B4, 0x3FD0,
+ 0x001C, 0x08A4, 0x0760, 0x3FE0,
+ 0x3FFC, 0x0804, 0x0804, 0x3FFC
};
+//=========================================
+// <num_taps> = 4
+// <num_phases> = 16
+// <scale_ratio> = 1.83332 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_4tap_16p_183[36] = {
- 940, 2208, 940, 0,
- 832, 2200, 1052, 4,
- 728, 2180, 1164, 16,
- 628, 2148, 1280, 36,
- 536, 2100, 1392, 60,
- 448, 2044, 1504, 92,
- 368, 1976, 1612, 132,
- 296, 1900, 1716, 176,
- 232, 1812, 1812, 232
+ 0x03B0, 0x08A0, 0x03B0, 0x0000,
+ 0x0348, 0x0898, 0x041C, 0x0004,
+ 0x02DC, 0x0884, 0x0490, 0x0010,
+ 0x0278, 0x0864, 0x0500, 0x0024,
+ 0x021C, 0x0838, 0x0570, 0x003C,
+ 0x01C8, 0x07FC, 0x05E0, 0x005C,
+ 0x0178, 0x07B8, 0x064C, 0x0084,
+ 0x0130, 0x076C, 0x06B0, 0x00B4,
+ 0x00F0, 0x0714, 0x0710, 0x00EC
};
+//=========================================
+// <num_taps> = 2
+// <num_phases> = 64
+// <scale_ratio> = 0.833333 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = s1.10
+// <CoefOut> = s1.12
+//=========================================
static const uint16_t filter_2tap_64p[66] = {
- 4096, 0,
- 4032, 64,
- 3968, 128,
- 3904, 192,
- 3840, 256,
- 3776, 320,
- 3712, 384,
- 3648, 448,
- 3584, 512,
- 3520, 576,
- 3456, 640,
- 3392, 704,
- 3328, 768,
- 3264, 832,
- 3200, 896,
- 3136, 960,
- 3072, 1024,
- 3008, 1088,
- 2944, 1152,
- 2880, 1216,
- 2816, 1280,
- 2752, 1344,
- 2688, 1408,
- 2624, 1472,
- 2560, 1536,
- 2496, 1600,
- 2432, 1664,
- 2368, 1728,
- 2304, 1792,
- 2240, 1856,
- 2176, 1920,
- 2112, 1984,
- 2048, 2048 };
+ 0x1000, 0x0000,
+ 0x1000, 0x0000,
+ 0x0FFC, 0x0004,
+ 0x0FF8, 0x0008,
+ 0x0FF0, 0x0010,
+ 0x0FE4, 0x001C,
+ 0x0FD8, 0x0028,
+ 0x0FC4, 0x003C,
+ 0x0FB0, 0x0050,
+ 0x0F98, 0x0068,
+ 0x0F7C, 0x0084,
+ 0x0F58, 0x00A8,
+ 0x0F34, 0x00CC,
+ 0x0F08, 0x00F8,
+ 0x0ED8, 0x0128,
+ 0x0EA4, 0x015C,
+ 0x0E68, 0x0198,
+ 0x0E28, 0x01D8,
+ 0x0DE4, 0x021C,
+ 0x0D98, 0x0268,
+ 0x0D44, 0x02BC,
+ 0x0CEC, 0x0314,
+ 0x0C90, 0x0370,
+ 0x0C2C, 0x03D4,
+ 0x0BC4, 0x043C,
+ 0x0B58, 0x04A8,
+ 0x0AE8, 0x0518,
+ 0x0A74, 0x058C,
+ 0x09FC, 0x0604,
+ 0x0980, 0x0680,
+ 0x0900, 0x0700,
+ 0x0880, 0x0780,
+ 0x0800, 0x0800
+};
+//=========================================
+// <num_taps> = 3
+// <num_phases> = 64
+// <scale_ratio> = 0.83333 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_3tap_64p_upscale[99] = {
- 2048, 2048, 0,
- 1960, 2140, 16376,
- 1876, 2236, 16364,
- 1792, 2328, 16356,
- 1708, 2424, 16348,
- 1620, 2516, 16336,
- 1540, 2612, 16328,
- 1456, 2704, 16316,
- 1372, 2796, 16308,
- 1292, 2884, 16296,
- 1212, 2976, 16288,
- 1136, 3060, 16280,
- 1056, 3148, 16272,
- 984, 3228, 16264,
- 908, 3312, 16256,
- 836, 3388, 16248,
- 768, 3464, 16244,
- 700, 3536, 16240,
- 636, 3604, 16236,
- 572, 3668, 16236,
- 512, 3728, 16236,
- 456, 3784, 16236,
- 400, 3836, 16240,
- 348, 3884, 16244,
- 296, 3928, 16252,
- 252, 3964, 16260,
- 204, 4000, 16268,
- 164, 4028, 16284,
- 124, 4052, 16296,
- 88, 4072, 16316,
- 56, 4084, 16336,
- 24, 4092, 16356,
- 0, 4096, 0
+ 0x0804, 0x07FC, 0x0000,
+ 0x07A8, 0x0860, 0x3FF8,
+ 0x0754, 0x08BC, 0x3FF0,
+ 0x0700, 0x0918, 0x3FE8,
+ 0x06AC, 0x0978, 0x3FDC,
+ 0x0654, 0x09D8, 0x3FD4,
+ 0x0604, 0x0A34, 0x3FC8,
+ 0x05B0, 0x0A90, 0x3FC0,
+ 0x055C, 0x0AF0, 0x3FB4,
+ 0x050C, 0x0B48, 0x3FAC,
+ 0x04BC, 0x0BA0, 0x3FA4,
+ 0x0470, 0x0BF4, 0x3F9C,
+ 0x0420, 0x0C50, 0x3F90,
+ 0x03D8, 0x0C9C, 0x3F8C,
+ 0x038C, 0x0CF0, 0x3F84,
+ 0x0344, 0x0D40, 0x3F7C,
+ 0x0300, 0x0D88, 0x3F78,
+ 0x02BC, 0x0DD0, 0x3F74,
+ 0x027C, 0x0E14, 0x3F70,
+ 0x023C, 0x0E54, 0x3F70,
+ 0x0200, 0x0E90, 0x3F70,
+ 0x01C8, 0x0EC8, 0x3F70,
+ 0x0190, 0x0EFC, 0x3F74,
+ 0x015C, 0x0F2C, 0x3F78,
+ 0x0128, 0x0F5C, 0x3F7C,
+ 0x00FC, 0x0F7C, 0x3F88,
+ 0x00CC, 0x0FA4, 0x3F90,
+ 0x00A4, 0x0FC0, 0x3F9C,
+ 0x007C, 0x0FD8, 0x3FAC,
+ 0x0058, 0x0FE8, 0x3FC0,
+ 0x0038, 0x0FF4, 0x3FD4,
+ 0x0018, 0x1000, 0x3FE8,
+ 0x0000, 0x1000, 0x0000
};
-static const uint16_t filter_3tap_64p_117[99] = {
- 2048, 2048, 0,
- 1992, 2104, 16380,
- 1936, 2160, 16380,
- 1880, 2220, 16376,
- 1824, 2276, 16376,
- 1768, 2332, 16376,
- 1712, 2388, 16376,
- 1656, 2444, 16376,
- 1600, 2496, 16380,
- 1544, 2548, 0,
- 1488, 2600, 4,
- 1432, 2652, 8,
- 1376, 2700, 16,
- 1320, 2748, 20,
- 1264, 2796, 32,
- 1212, 2840, 40,
- 1156, 2880, 52,
- 1104, 2920, 64,
- 1052, 2960, 80,
- 1000, 2996, 92,
- 948, 3032, 108,
- 900, 3060, 128,
- 852, 3092, 148,
- 804, 3120, 168,
- 756, 3144, 192,
- 712, 3164, 216,
- 668, 3184, 240,
- 624, 3200, 268,
- 580, 3212, 296,
- 540, 3220, 328,
- 500, 3228, 360,
- 464, 3232, 392,
- 428, 3236, 428
+//=========================================
+// <num_taps> = 3
+// <num_phases> = 64
+// <scale_ratio> = 1.16666 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_3tap_64p_116[99] = {
+ 0x0804, 0x07FC, 0x0000,
+ 0x07C0, 0x0844, 0x3FFC,
+ 0x0780, 0x0888, 0x3FF8,
+ 0x0740, 0x08D0, 0x3FF0,
+ 0x0700, 0x0914, 0x3FEC,
+ 0x06C0, 0x0958, 0x3FE8,
+ 0x0684, 0x0998, 0x3FE4,
+ 0x0644, 0x09DC, 0x3FE0,
+ 0x0604, 0x0A1C, 0x3FE0,
+ 0x05C4, 0x0A5C, 0x3FE0,
+ 0x0588, 0x0A9C, 0x3FDC,
+ 0x0548, 0x0ADC, 0x3FDC,
+ 0x050C, 0x0B14, 0x3FE0,
+ 0x04CC, 0x0B54, 0x3FE0,
+ 0x0490, 0x0B8C, 0x3FE4,
+ 0x0458, 0x0BC0, 0x3FE8,
+ 0x041C, 0x0BF4, 0x3FF0,
+ 0x03E0, 0x0C28, 0x3FF8,
+ 0x03A8, 0x0C58, 0x0000,
+ 0x0374, 0x0C88, 0x0004,
+ 0x0340, 0x0CB0, 0x0010,
+ 0x0308, 0x0CD8, 0x0020,
+ 0x02D8, 0x0CFC, 0x002C,
+ 0x02A0, 0x0D20, 0x0040,
+ 0x0274, 0x0D3C, 0x0050,
+ 0x0244, 0x0D58, 0x0064,
+ 0x0214, 0x0D70, 0x007C,
+ 0x01E8, 0x0D84, 0x0094,
+ 0x01C0, 0x0D94, 0x00AC,
+ 0x0198, 0x0DA0, 0x00C8,
+ 0x0170, 0x0DAC, 0x00E4,
+ 0x014C, 0x0DB0, 0x0104,
+ 0x0128, 0x0DB4, 0x0124
};
-static const uint16_t filter_3tap_64p_150[99] = {
- 2048, 2048, 0,
- 2004, 2080, 8,
- 1960, 2116, 16,
- 1916, 2148, 28,
- 1872, 2184, 36,
- 1824, 2216, 48,
- 1780, 2248, 60,
- 1736, 2280, 76,
- 1692, 2308, 88,
- 1648, 2336, 104,
- 1604, 2368, 120,
- 1560, 2392, 136,
- 1516, 2420, 156,
- 1472, 2444, 172,
- 1428, 2472, 192,
- 1384, 2492, 212,
- 1340, 2516, 236,
- 1296, 2536, 256,
- 1252, 2556, 280,
- 1212, 2576, 304,
- 1168, 2592, 328,
- 1124, 2608, 356,
- 1084, 2624, 384,
- 1044, 2636, 412,
- 1004, 2648, 440,
- 964, 2660, 468,
- 924, 2668, 500,
- 884, 2676, 528,
- 844, 2684, 560,
- 808, 2688, 596,
- 768, 2692, 628,
- 732, 2696, 664,
- 696, 2696, 696
+//=========================================
+// <num_taps> = 3
+// <num_phases> = 64
+// <scale_ratio> = 1.49999 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_3tap_64p_149[99] = {
+ 0x0804, 0x07FC, 0x0000,
+ 0x07CC, 0x0834, 0x0000,
+ 0x0798, 0x0868, 0x0000,
+ 0x0764, 0x089C, 0x0000,
+ 0x0730, 0x08CC, 0x0004,
+ 0x0700, 0x08FC, 0x0004,
+ 0x06CC, 0x092C, 0x0008,
+ 0x0698, 0x095C, 0x000C,
+ 0x0660, 0x098C, 0x0014,
+ 0x062C, 0x09B8, 0x001C,
+ 0x05FC, 0x09E4, 0x0020,
+ 0x05C4, 0x0A10, 0x002C,
+ 0x0590, 0x0A3C, 0x0034,
+ 0x055C, 0x0A64, 0x0040,
+ 0x0528, 0x0A8C, 0x004C,
+ 0x04F8, 0x0AB0, 0x0058,
+ 0x04C4, 0x0AD4, 0x0068,
+ 0x0490, 0x0AF8, 0x0078,
+ 0x0460, 0x0B18, 0x0088,
+ 0x0430, 0x0B38, 0x0098,
+ 0x0400, 0x0B54, 0x00AC,
+ 0x03D0, 0x0B6C, 0x00C4,
+ 0x03A0, 0x0B88, 0x00D8,
+ 0x0374, 0x0B9C, 0x00F0,
+ 0x0348, 0x0BB0, 0x0108,
+ 0x0318, 0x0BC4, 0x0124,
+ 0x02EC, 0x0BD4, 0x0140,
+ 0x02C4, 0x0BE0, 0x015C,
+ 0x029C, 0x0BEC, 0x0178,
+ 0x0274, 0x0BF4, 0x0198,
+ 0x024C, 0x0BFC, 0x01B8,
+ 0x0228, 0x0BFC, 0x01DC,
+ 0x0200, 0x0C00, 0x0200
};
+//=========================================
+// <num_taps> = 3
+// <num_phases> = 64
+// <scale_ratio> = 1.83332 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_3tap_64p_183[99] = {
- 2048, 2048, 0,
- 2008, 2060, 20,
- 1968, 2076, 44,
- 1932, 2088, 68,
- 1892, 2104, 92,
- 1856, 2116, 120,
- 1816, 2128, 144,
- 1780, 2140, 168,
- 1744, 2152, 196,
- 1704, 2164, 220,
- 1668, 2176, 248,
- 1632, 2188, 272,
- 1592, 2196, 300,
- 1556, 2204, 328,
- 1520, 2216, 356,
- 1484, 2224, 384,
- 1448, 2232, 412,
- 1412, 2240, 440,
- 1376, 2244, 468,
- 1340, 2252, 496,
- 1304, 2256, 528,
- 1272, 2264, 556,
- 1236, 2268, 584,
- 1200, 2272, 616,
- 1168, 2276, 648,
- 1132, 2280, 676,
- 1100, 2284, 708,
- 1064, 2288, 740,
- 1032, 2288, 772,
- 996, 2292, 800,
- 964, 2292, 832,
- 932, 2292, 868,
- 900, 2292, 900
+ 0x0804, 0x07FC, 0x0000,
+ 0x07D4, 0x0824, 0x0008,
+ 0x07AC, 0x0840, 0x0014,
+ 0x0780, 0x0860, 0x0020,
+ 0x0754, 0x0880, 0x002C,
+ 0x0728, 0x089C, 0x003C,
+ 0x0700, 0x08B8, 0x0048,
+ 0x06D4, 0x08D4, 0x0058,
+ 0x06A8, 0x08F0, 0x0068,
+ 0x067C, 0x090C, 0x0078,
+ 0x0650, 0x0924, 0x008C,
+ 0x0628, 0x093C, 0x009C,
+ 0x05FC, 0x0954, 0x00B0,
+ 0x05D0, 0x096C, 0x00C4,
+ 0x05A8, 0x0980, 0x00D8,
+ 0x0578, 0x0998, 0x00F0,
+ 0x0550, 0x09AC, 0x0104,
+ 0x0528, 0x09BC, 0x011C,
+ 0x04FC, 0x09D0, 0x0134,
+ 0x04D4, 0x09E0, 0x014C,
+ 0x04A8, 0x09F0, 0x0168,
+ 0x0480, 0x09FC, 0x0184,
+ 0x045C, 0x0A08, 0x019C,
+ 0x0434, 0x0A14, 0x01B8,
+ 0x0408, 0x0A20, 0x01D8,
+ 0x03E0, 0x0A2C, 0x01F4,
+ 0x03B8, 0x0A34, 0x0214,
+ 0x0394, 0x0A38, 0x0234,
+ 0x036C, 0x0A40, 0x0254,
+ 0x0348, 0x0A44, 0x0274,
+ 0x0324, 0x0A48, 0x0294,
+ 0x0300, 0x0A48, 0x02B8,
+ 0x02DC, 0x0A48, 0x02DC
};
+//=========================================
+// <num_taps> = 4
+// <num_phases> = 64
+// <scale_ratio> = 0.83333 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_4tap_64p_upscale[132] = {
- 0, 4096, 0, 0,
- 16344, 4092, 40, 0,
- 16308, 4084, 84, 16380,
- 16272, 4072, 132, 16380,
- 16240, 4056, 180, 16380,
- 16212, 4036, 232, 16376,
- 16184, 4012, 288, 16372,
- 16160, 3984, 344, 16368,
- 16136, 3952, 404, 16364,
- 16116, 3916, 464, 16360,
- 16100, 3872, 528, 16356,
- 16084, 3828, 596, 16348,
- 16072, 3780, 664, 16344,
- 16060, 3728, 732, 16336,
- 16052, 3676, 804, 16328,
- 16044, 3616, 876, 16320,
- 16040, 3556, 952, 16312,
- 16036, 3492, 1028, 16300,
- 16032, 3424, 1108, 16292,
- 16032, 3356, 1188, 16280,
- 16036, 3284, 1268, 16272,
- 16036, 3212, 1352, 16260,
- 16040, 3136, 1436, 16248,
- 16044, 3056, 1520, 16236,
- 16052, 2980, 1604, 16224,
- 16060, 2896, 1688, 16212,
- 16064, 2816, 1776, 16200,
- 16076, 2732, 1864, 16188,
- 16084, 2648, 1952, 16176,
- 16092, 2564, 2040, 16164,
- 16104, 2476, 2128, 16152,
- 16116, 2388, 2216, 16140,
- 16128, 2304, 2304, 16128 };
+ 0x0000, 0x1000, 0x0000, 0x0000,
+ 0x3FDC, 0x0FFC, 0x0028, 0x0000,
+ 0x3FB4, 0x0FF8, 0x0054, 0x0000,
+ 0x3F94, 0x0FE8, 0x0084, 0x0000,
+ 0x3F74, 0x0FDC, 0x00B4, 0x3FFC,
+ 0x3F58, 0x0FC4, 0x00E8, 0x3FFC,
+ 0x3F3C, 0x0FAC, 0x0120, 0x3FF8,
+ 0x3F24, 0x0F90, 0x0158, 0x3FF4,
+ 0x3F0C, 0x0F70, 0x0194, 0x3FF0,
+ 0x3EF8, 0x0F4C, 0x01D0, 0x3FEC,
+ 0x3EE8, 0x0F20, 0x0210, 0x3FE8,
+ 0x3ED8, 0x0EF4, 0x0254, 0x3FE0,
+ 0x3ECC, 0x0EC4, 0x0298, 0x3FD8,
+ 0x3EC0, 0x0E90, 0x02DC, 0x3FD4,
+ 0x3EB8, 0x0E58, 0x0324, 0x3FCC,
+ 0x3EB0, 0x0E20, 0x036C, 0x3FC4,
+ 0x3EAC, 0x0DE4, 0x03B8, 0x3FB8,
+ 0x3EA8, 0x0DA4, 0x0404, 0x3FB0,
+ 0x3EA4, 0x0D60, 0x0454, 0x3FA8,
+ 0x3EA4, 0x0D1C, 0x04A4, 0x3F9C,
+ 0x3EA4, 0x0CD8, 0x04F4, 0x3F90,
+ 0x3EA8, 0x0C88, 0x0548, 0x3F88,
+ 0x3EAC, 0x0C3C, 0x059C, 0x3F7C,
+ 0x3EB0, 0x0BF0, 0x05F0, 0x3F70,
+ 0x3EB8, 0x0BA0, 0x0644, 0x3F64,
+ 0x3EBC, 0x0B54, 0x0698, 0x3F58,
+ 0x3EC4, 0x0B00, 0x06F0, 0x3F4C,
+ 0x3ECC, 0x0AAC, 0x0748, 0x3F40,
+ 0x3ED8, 0x0A54, 0x07A0, 0x3F34,
+ 0x3EE0, 0x0A04, 0x07F8, 0x3F24,
+ 0x3EEC, 0x09AC, 0x0850, 0x3F18,
+ 0x3EF8, 0x0954, 0x08A8, 0x3F0C,
+ 0x3F00, 0x08FC, 0x0900, 0x3F04
+};
-static const uint16_t filter_4tap_64p_117[132] = {
- 420, 3248, 420, 0,
- 380, 3248, 464, 16380,
- 344, 3248, 508, 16372,
- 308, 3248, 552, 16368,
- 272, 3240, 596, 16364,
- 236, 3236, 644, 16356,
- 204, 3224, 692, 16352,
- 172, 3212, 744, 16344,
- 144, 3196, 796, 16340,
- 116, 3180, 848, 16332,
- 88, 3160, 900, 16324,
- 60, 3136, 956, 16320,
- 36, 3112, 1012, 16312,
- 16, 3084, 1068, 16304,
- 16380, 3056, 1124, 16296,
- 16360, 3024, 1184, 16292,
- 16340, 2992, 1244, 16284,
- 16324, 2956, 1304, 16276,
- 16308, 2920, 1364, 16268,
- 16292, 2880, 1424, 16264,
- 16280, 2836, 1484, 16256,
- 16268, 2792, 1548, 16252,
- 16256, 2748, 1608, 16244,
- 16248, 2700, 1668, 16240,
- 16240, 2652, 1732, 16232,
- 16232, 2604, 1792, 16228,
- 16228, 2552, 1856, 16224,
- 16220, 2500, 1916, 16220,
- 16216, 2444, 1980, 16216,
- 16216, 2388, 2040, 16216,
- 16212, 2332, 2100, 16212,
- 16212, 2276, 2160, 16212,
- 16212, 2220, 2220, 16212 };
+//=========================================
+// <num_taps> = 4
+// <num_phases> = 64
+// <scale_ratio> = 1.16666 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_4tap_64p_116[132] = {
+ 0x01A8, 0x0CB4, 0x01A4, 0x0000,
+ 0x017C, 0x0CB8, 0x01D0, 0x3FFC,
+ 0x0158, 0x0CB8, 0x01F8, 0x3FF8,
+ 0x0130, 0x0CB4, 0x0228, 0x3FF4,
+ 0x0110, 0x0CB0, 0x0254, 0x3FEC,
+ 0x00EC, 0x0CA8, 0x0284, 0x3FE8,
+ 0x00CC, 0x0C9C, 0x02B4, 0x3FE4,
+ 0x00AC, 0x0C90, 0x02E8, 0x3FDC,
+ 0x0090, 0x0C80, 0x031C, 0x3FD4,
+ 0x0070, 0x0C70, 0x0350, 0x3FD0,
+ 0x0058, 0x0C5C, 0x0384, 0x3FC8,
+ 0x003C, 0x0C48, 0x03BC, 0x3FC0,
+ 0x0024, 0x0C2C, 0x03F4, 0x3FBC,
+ 0x0010, 0x0C10, 0x042C, 0x3FB4,
+ 0x3FFC, 0x0BF4, 0x0464, 0x3FAC,
+ 0x3FE8, 0x0BD4, 0x04A0, 0x3FA4,
+ 0x3FD8, 0x0BAC, 0x04DC, 0x3FA0,
+ 0x3FC4, 0x0B8C, 0x0518, 0x3F98,
+ 0x3FB4, 0x0B68, 0x0554, 0x3F90,
+ 0x3FA8, 0x0B40, 0x0590, 0x3F88,
+ 0x3F9C, 0x0B14, 0x05CC, 0x3F84,
+ 0x3F90, 0x0AEC, 0x0608, 0x3F7C,
+ 0x3F84, 0x0ABC, 0x0648, 0x3F78,
+ 0x3F7C, 0x0A90, 0x0684, 0x3F70,
+ 0x3F70, 0x0A60, 0x06C4, 0x3F6C,
+ 0x3F6C, 0x0A2C, 0x0700, 0x3F68,
+ 0x3F64, 0x09F8, 0x0740, 0x3F64,
+ 0x3F60, 0x09C4, 0x077C, 0x3F60,
+ 0x3F5C, 0x098C, 0x07BC, 0x3F5C,
+ 0x3F58, 0x0958, 0x07F8, 0x3F58,
+ 0x3F58, 0x091C, 0x0834, 0x3F58,
+ 0x3F54, 0x08E4, 0x0870, 0x3F58,
+ 0x3F54, 0x08AC, 0x08AC, 0x3F54
+};
-static const uint16_t filter_4tap_64p_150[132] = {
- 696, 2700, 696, 0,
- 660, 2704, 732, 16380,
- 628, 2704, 768, 16376,
- 596, 2704, 804, 16372,
- 564, 2700, 844, 16364,
- 532, 2696, 884, 16360,
- 500, 2692, 924, 16356,
- 472, 2684, 964, 16352,
- 440, 2676, 1004, 16352,
- 412, 2668, 1044, 16348,
- 384, 2656, 1088, 16344,
- 360, 2644, 1128, 16340,
- 332, 2632, 1172, 16336,
- 308, 2616, 1216, 16336,
- 284, 2600, 1260, 16332,
- 260, 2580, 1304, 16332,
- 236, 2560, 1348, 16328,
- 216, 2540, 1392, 16328,
- 196, 2516, 1436, 16328,
- 176, 2492, 1480, 16324,
- 156, 2468, 1524, 16324,
- 136, 2440, 1568, 16328,
- 120, 2412, 1612, 16328,
- 104, 2384, 1656, 16328,
- 88, 2352, 1700, 16332,
- 72, 2324, 1744, 16332,
- 60, 2288, 1788, 16336,
- 48, 2256, 1828, 16340,
- 36, 2220, 1872, 16344,
- 24, 2184, 1912, 16352,
- 12, 2148, 1952, 16356,
- 4, 2112, 1996, 16364,
- 16380, 2072, 2036, 16372 };
+//=========================================
+// <num_taps> = 4
+// <num_phases> = 64
+// <scale_ratio> = 1.49999 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_4tap_64p_149[132] = {
+ 0x02B8, 0x0A90, 0x02B8, 0x0000,
+ 0x0294, 0x0A94, 0x02DC, 0x3FFC,
+ 0x0274, 0x0A94, 0x0300, 0x3FF8,
+ 0x0250, 0x0A94, 0x0328, 0x3FF4,
+ 0x0230, 0x0A90, 0x0350, 0x3FF0,
+ 0x0214, 0x0A8C, 0x0374, 0x3FEC,
+ 0x01F0, 0x0A88, 0x03A0, 0x3FE8,
+ 0x01D4, 0x0A80, 0x03C8, 0x3FE4,
+ 0x01B8, 0x0A78, 0x03F0, 0x3FE0,
+ 0x0198, 0x0A70, 0x041C, 0x3FDC,
+ 0x0180, 0x0A64, 0x0444, 0x3FD8,
+ 0x0164, 0x0A54, 0x0470, 0x3FD8,
+ 0x0148, 0x0A48, 0x049C, 0x3FD4,
+ 0x0130, 0x0A38, 0x04C8, 0x3FD0,
+ 0x0118, 0x0A24, 0x04F4, 0x3FD0,
+ 0x0100, 0x0A14, 0x0520, 0x3FCC,
+ 0x00E8, 0x0A00, 0x054C, 0x3FCC,
+ 0x00D4, 0x09E8, 0x057C, 0x3FC8,
+ 0x00C0, 0x09D0, 0x05A8, 0x3FC8,
+ 0x00AC, 0x09B8, 0x05D4, 0x3FC8,
+ 0x0098, 0x09A0, 0x0600, 0x3FC8,
+ 0x0084, 0x0984, 0x0630, 0x3FC8,
+ 0x0074, 0x0964, 0x065C, 0x3FCC,
+ 0x0064, 0x0948, 0x0688, 0x3FCC,
+ 0x0054, 0x0928, 0x06B4, 0x3FD0,
+ 0x0044, 0x0908, 0x06E0, 0x3FD4,
+ 0x0038, 0x08E8, 0x070C, 0x3FD4,
+ 0x002C, 0x08C4, 0x0738, 0x3FD8,
+ 0x001C, 0x08A4, 0x0760, 0x3FE0,
+ 0x0014, 0x087C, 0x078C, 0x3FE4,
+ 0x0008, 0x0858, 0x07B4, 0x3FEC,
+ 0x0000, 0x0830, 0x07DC, 0x3FF4,
+ 0x3FFC, 0x0804, 0x0804, 0x3FFC
+};
+//=========================================
+// <num_taps> = 4
+// <num_phases> = 64
+// <scale_ratio> = 1.83332 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_4tap_64p_183[132] = {
- 944, 2204, 944, 0,
- 916, 2204, 972, 0,
- 888, 2200, 996, 0,
- 860, 2200, 1024, 4,
- 832, 2196, 1052, 4,
- 808, 2192, 1080, 8,
- 780, 2188, 1108, 12,
- 756, 2180, 1140, 12,
- 728, 2176, 1168, 16,
- 704, 2168, 1196, 20,
- 680, 2160, 1224, 24,
- 656, 2152, 1252, 28,
- 632, 2144, 1280, 36,
- 608, 2132, 1308, 40,
- 584, 2120, 1336, 48,
- 560, 2112, 1364, 52,
- 536, 2096, 1392, 60,
- 516, 2084, 1420, 68,
- 492, 2072, 1448, 76,
- 472, 2056, 1476, 84,
- 452, 2040, 1504, 92,
- 428, 2024, 1532, 100,
- 408, 2008, 1560, 112,
- 392, 1992, 1584, 120,
- 372, 1972, 1612, 132,
- 352, 1956, 1636, 144,
- 336, 1936, 1664, 156,
- 316, 1916, 1688, 168,
- 300, 1896, 1712, 180,
- 284, 1876, 1736, 192,
- 268, 1852, 1760, 208,
- 252, 1832, 1784, 220,
- 236, 1808, 1808, 236 };
+ 0x03B0, 0x08A0, 0x03B0, 0x0000,
+ 0x0394, 0x08A0, 0x03CC, 0x0000,
+ 0x037C, 0x089C, 0x03E8, 0x0000,
+ 0x0360, 0x089C, 0x0400, 0x0004,
+ 0x0348, 0x0898, 0x041C, 0x0004,
+ 0x032C, 0x0894, 0x0438, 0x0008,
+ 0x0310, 0x0890, 0x0454, 0x000C,
+ 0x02F8, 0x0888, 0x0474, 0x000C,
+ 0x02DC, 0x0884, 0x0490, 0x0010,
+ 0x02C4, 0x087C, 0x04AC, 0x0014,
+ 0x02AC, 0x0874, 0x04C8, 0x0018,
+ 0x0290, 0x086C, 0x04E4, 0x0020,
+ 0x0278, 0x0864, 0x0500, 0x0024,
+ 0x0264, 0x0858, 0x051C, 0x0028,
+ 0x024C, 0x084C, 0x0538, 0x0030,
+ 0x0234, 0x0844, 0x0554, 0x0034,
+ 0x021C, 0x0838, 0x0570, 0x003C,
+ 0x0208, 0x0828, 0x058C, 0x0044,
+ 0x01F0, 0x081C, 0x05A8, 0x004C,
+ 0x01DC, 0x080C, 0x05C4, 0x0054,
+ 0x01C8, 0x07FC, 0x05E0, 0x005C,
+ 0x01B4, 0x07EC, 0x05FC, 0x0064,
+ 0x019C, 0x07DC, 0x0618, 0x0070,
+ 0x018C, 0x07CC, 0x0630, 0x0078,
+ 0x0178, 0x07B8, 0x064C, 0x0084,
+ 0x0164, 0x07A8, 0x0664, 0x0090,
+ 0x0150, 0x0794, 0x0680, 0x009C,
+ 0x0140, 0x0780, 0x0698, 0x00A8,
+ 0x0130, 0x076C, 0x06B0, 0x00B4,
+ 0x0120, 0x0758, 0x06C8, 0x00C0,
+ 0x0110, 0x0740, 0x06E0, 0x00D0,
+ 0x0100, 0x072C, 0x06F8, 0x00DC,
+ 0x00F0, 0x0714, 0x0710, 0x00EC
+};
+//=========================================
+// <num_taps> = 5
+// <num_phases> = 64
+// <scale_ratio> = 0.83333 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_5tap_64p_upscale[165] = {
- 15936, 2496, 2496, 15936, 0,
- 15948, 2404, 2580, 15924, 0,
- 15960, 2312, 2664, 15912, 4,
- 15976, 2220, 2748, 15904, 8,
- 15992, 2128, 2832, 15896, 12,
- 16004, 2036, 2912, 15888, 16,
- 16020, 1944, 2992, 15880, 20,
- 16036, 1852, 3068, 15876, 20,
- 16056, 1760, 3140, 15876, 24,
- 16072, 1668, 3216, 15872, 28,
- 16088, 1580, 3284, 15872, 32,
- 16104, 1492, 3352, 15876, 32,
- 16120, 1404, 3420, 15876, 36,
- 16140, 1316, 3480, 15884, 40,
- 16156, 1228, 3540, 15892, 40,
- 16172, 1144, 3600, 15900, 40,
- 16188, 1060, 3652, 15908, 44,
- 16204, 980, 3704, 15924, 44,
- 16220, 900, 3756, 15936, 44,
- 16236, 824, 3800, 15956, 44,
- 16248, 744, 3844, 15972, 44,
- 16264, 672, 3884, 15996, 44,
- 16276, 600, 3920, 16020, 44,
- 16292, 528, 3952, 16044, 40,
- 16304, 460, 3980, 16072, 40,
- 16316, 396, 4008, 16104, 36,
- 16328, 332, 4032, 16136, 32,
- 16336, 272, 4048, 16172, 28,
- 16348, 212, 4064, 16208, 24,
- 16356, 156, 4080, 16248, 16,
- 16368, 100, 4088, 16292, 12,
- 16376, 48, 4092, 16336, 4,
- 0, 0, 4096, 0, 0 };
+ 0x3E40, 0x09C0, 0x09C0, 0x3E40, 0x0000,
+ 0x3E50, 0x0964, 0x0A18, 0x3E34, 0x0000,
+ 0x3E5C, 0x0908, 0x0A6C, 0x3E2C, 0x0004,
+ 0x3E6C, 0x08AC, 0x0AC0, 0x3E20, 0x0008,
+ 0x3E78, 0x0850, 0x0B14, 0x3E18, 0x000C,
+ 0x3E88, 0x07F4, 0x0B60, 0x3E14, 0x0010,
+ 0x3E98, 0x0798, 0x0BB0, 0x3E0C, 0x0014,
+ 0x3EA8, 0x073C, 0x0C00, 0x3E08, 0x0014,
+ 0x3EB8, 0x06E4, 0x0C48, 0x3E04, 0x0018,
+ 0x3ECC, 0x0684, 0x0C90, 0x3E04, 0x001C,
+ 0x3EDC, 0x062C, 0x0CD4, 0x3E04, 0x0020,
+ 0x3EEC, 0x05D4, 0x0D1C, 0x3E04, 0x0020,
+ 0x3EFC, 0x057C, 0x0D5C, 0x3E08, 0x0024,
+ 0x3F0C, 0x0524, 0x0D98, 0x3E10, 0x0028,
+ 0x3F20, 0x04CC, 0x0DD8, 0x3E14, 0x0028,
+ 0x3F30, 0x0478, 0x0E14, 0x3E1C, 0x0028,
+ 0x3F40, 0x0424, 0x0E48, 0x3E28, 0x002C,
+ 0x3F50, 0x03D4, 0x0E7C, 0x3E34, 0x002C,
+ 0x3F60, 0x0384, 0x0EAC, 0x3E44, 0x002C,
+ 0x3F6C, 0x0338, 0x0EDC, 0x3E54, 0x002C,
+ 0x3F7C, 0x02E8, 0x0F08, 0x3E68, 0x002C,
+ 0x3F8C, 0x02A0, 0x0F2C, 0x3E7C, 0x002C,
+ 0x3F98, 0x0258, 0x0F50, 0x3E94, 0x002C,
+ 0x3FA4, 0x0210, 0x0F74, 0x3EB0, 0x0028,
+ 0x3FB0, 0x01CC, 0x0F90, 0x3ECC, 0x0028,
+ 0x3FC0, 0x018C, 0x0FA8, 0x3EE8, 0x0024,
+ 0x3FC8, 0x014C, 0x0FC0, 0x3F0C, 0x0020,
+ 0x3FD4, 0x0110, 0x0FD4, 0x3F2C, 0x001C,
+ 0x3FE0, 0x00D4, 0x0FE0, 0x3F54, 0x0018,
+ 0x3FE8, 0x009C, 0x0FF0, 0x3F7C, 0x0010,
+ 0x3FF0, 0x0064, 0x0FFC, 0x3FA4, 0x000C,
+ 0x3FFC, 0x0030, 0x0FFC, 0x3FD4, 0x0004,
+ 0x0000, 0x0000, 0x1000, 0x0000, 0x0000
+};
-static const uint16_t filter_5tap_64p_117[165] = {
- 16056, 2372, 2372, 16056, 0,
- 16052, 2312, 2432, 16060, 0,
- 16052, 2252, 2488, 16064, 0,
- 16052, 2188, 2548, 16072, 0,
- 16052, 2124, 2600, 16076, 0,
- 16052, 2064, 2656, 16088, 0,
- 16052, 2000, 2708, 16096, 0,
- 16056, 1932, 2760, 16108, 0,
- 16060, 1868, 2808, 16120, 0,
- 16064, 1804, 2856, 16132, 0,
- 16068, 1740, 2904, 16148, 16380,
- 16076, 1676, 2948, 16164, 16380,
- 16080, 1612, 2992, 16180, 16376,
- 16088, 1544, 3032, 16200, 16372,
- 16096, 1480, 3072, 16220, 16372,
- 16104, 1420, 3108, 16244, 16368,
- 16112, 1356, 3144, 16268, 16364,
- 16120, 1292, 3180, 16292, 16360,
- 16128, 1232, 3212, 16320, 16356,
- 16136, 1168, 3240, 16344, 16352,
- 16144, 1108, 3268, 16376, 16344,
- 16156, 1048, 3292, 20, 16340,
- 16164, 988, 3316, 52, 16332,
- 16172, 932, 3336, 88, 16328,
- 16184, 872, 3356, 124, 16320,
- 16192, 816, 3372, 160, 16316,
- 16204, 760, 3388, 196, 16308,
- 16212, 708, 3400, 236, 16300,
- 16220, 656, 3412, 276, 16292,
- 16232, 604, 3420, 320, 16284,
- 16240, 552, 3424, 364, 16276,
- 16248, 504, 3428, 408, 16268,
- 16256, 456, 3428, 456, 16256 };
+//=========================================
+// <num_taps> = 5
+// <num_phases> = 64
+// <scale_ratio> = 1.16666 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_5tap_64p_116[165] = {
+ 0x3EDC, 0x0924, 0x0924, 0x3EDC, 0x0000,
+ 0x3ED8, 0x08EC, 0x095C, 0x3EE0, 0x0000,
+ 0x3ED4, 0x08B0, 0x0994, 0x3EE8, 0x0000,
+ 0x3ED0, 0x0878, 0x09C8, 0x3EF0, 0x0000,
+ 0x3ED0, 0x083C, 0x09FC, 0x3EF8, 0x0000,
+ 0x3ED0, 0x0800, 0x0A2C, 0x3F04, 0x0000,
+ 0x3ED0, 0x07C4, 0x0A5C, 0x3F10, 0x0000,
+ 0x3ED0, 0x0788, 0x0A8C, 0x3F1C, 0x0000,
+ 0x3ED0, 0x074C, 0x0AC0, 0x3F28, 0x3FFC,
+ 0x3ED4, 0x0710, 0x0AE8, 0x3F38, 0x3FFC,
+ 0x3ED8, 0x06D0, 0x0B18, 0x3F48, 0x3FF8,
+ 0x3EDC, 0x0694, 0x0B3C, 0x3F5C, 0x3FF8,
+ 0x3EE0, 0x0658, 0x0B68, 0x3F6C, 0x3FF4,
+ 0x3EE4, 0x061C, 0x0B90, 0x3F80, 0x3FF0,
+ 0x3EEC, 0x05DC, 0x0BB4, 0x3F98, 0x3FEC,
+ 0x3EF0, 0x05A0, 0x0BD8, 0x3FB0, 0x3FE8,
+ 0x3EF8, 0x0564, 0x0BF8, 0x3FC8, 0x3FE4,
+ 0x3EFC, 0x0528, 0x0C1C, 0x3FE0, 0x3FE0,
+ 0x3F04, 0x04EC, 0x0C38, 0x3FFC, 0x3FDC,
+ 0x3F0C, 0x04B4, 0x0C54, 0x0014, 0x3FD8,
+ 0x3F14, 0x047C, 0x0C70, 0x0030, 0x3FD0,
+ 0x3F1C, 0x0440, 0x0C88, 0x0050, 0x3FCC,
+ 0x3F24, 0x0408, 0x0CA0, 0x0070, 0x3FC4,
+ 0x3F2C, 0x03D0, 0x0CB0, 0x0094, 0x3FC0,
+ 0x3F34, 0x0398, 0x0CC4, 0x00B8, 0x3FB8,
+ 0x3F3C, 0x0364, 0x0CD4, 0x00DC, 0x3FB0,
+ 0x3F48, 0x032C, 0x0CE0, 0x0100, 0x3FAC,
+ 0x3F50, 0x02F8, 0x0CEC, 0x0128, 0x3FA4,
+ 0x3F58, 0x02C4, 0x0CF8, 0x0150, 0x3F9C,
+ 0x3F60, 0x0290, 0x0D00, 0x017C, 0x3F94,
+ 0x3F68, 0x0260, 0x0D04, 0x01A8, 0x3F8C,
+ 0x3F74, 0x0230, 0x0D04, 0x01D4, 0x3F84,
+ 0x3F7C, 0x0200, 0x0D08, 0x0200, 0x3F7C
+};
-static const uint16_t filter_5tap_64p_150[165] = {
- 16368, 2064, 2064, 16368, 0,
- 16352, 2028, 2100, 16380, 16380,
- 16340, 1996, 2132, 12, 16376,
- 16328, 1960, 2168, 24, 16376,
- 16316, 1924, 2204, 44, 16372,
- 16308, 1888, 2236, 60, 16368,
- 16296, 1848, 2268, 76, 16364,
- 16288, 1812, 2300, 96, 16360,
- 16280, 1772, 2328, 116, 16356,
- 16272, 1736, 2360, 136, 16352,
- 16268, 1696, 2388, 160, 16348,
- 16260, 1656, 2416, 180, 16344,
- 16256, 1616, 2440, 204, 16340,
- 16248, 1576, 2464, 228, 16336,
- 16244, 1536, 2492, 252, 16332,
- 16240, 1496, 2512, 276, 16324,
- 16240, 1456, 2536, 304, 16320,
- 16236, 1416, 2556, 332, 16316,
- 16232, 1376, 2576, 360, 16312,
- 16232, 1336, 2592, 388, 16308,
- 16232, 1296, 2612, 416, 16300,
- 16232, 1256, 2628, 448, 16296,
- 16232, 1216, 2640, 480, 16292,
- 16232, 1172, 2652, 512, 16288,
- 16232, 1132, 2664, 544, 16284,
- 16232, 1092, 2676, 576, 16280,
- 16236, 1056, 2684, 608, 16272,
- 16236, 1016, 2692, 644, 16268,
- 16240, 976, 2700, 680, 16264,
- 16240, 936, 2704, 712, 16260,
- 16244, 900, 2708, 748, 16256,
- 16248, 860, 2708, 788, 16252,
- 16248, 824, 2708, 824, 16248 };
+//=========================================
+// <num_taps> = 5
+// <num_phases> = 64
+// <scale_ratio> = 1.49999 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_5tap_64p_149[165] = {
+ 0x3FF4, 0x080C, 0x080C, 0x3FF4, 0x0000,
+ 0x3FE8, 0x07E8, 0x0830, 0x0000, 0x0000,
+ 0x3FDC, 0x07C8, 0x0850, 0x0010, 0x3FFC,
+ 0x3FD0, 0x07A4, 0x0878, 0x001C, 0x3FF8,
+ 0x3FC4, 0x0780, 0x0898, 0x0030, 0x3FF4,
+ 0x3FB8, 0x075C, 0x08B8, 0x0040, 0x3FF4,
+ 0x3FB0, 0x0738, 0x08D8, 0x0050, 0x3FF0,
+ 0x3FA8, 0x0710, 0x08F8, 0x0064, 0x3FEC,
+ 0x3FA0, 0x06EC, 0x0914, 0x0078, 0x3FE8,
+ 0x3F98, 0x06C4, 0x0934, 0x008C, 0x3FE4,
+ 0x3F90, 0x06A0, 0x094C, 0x00A4, 0x3FE0,
+ 0x3F8C, 0x0678, 0x0968, 0x00B8, 0x3FDC,
+ 0x3F84, 0x0650, 0x0984, 0x00D0, 0x3FD8,
+ 0x3F80, 0x0628, 0x099C, 0x00E8, 0x3FD4,
+ 0x3F7C, 0x0600, 0x09B8, 0x0100, 0x3FCC,
+ 0x3F78, 0x05D8, 0x09D0, 0x0118, 0x3FC8,
+ 0x3F74, 0x05B0, 0x09E4, 0x0134, 0x3FC4,
+ 0x3F70, 0x0588, 0x09F8, 0x0150, 0x3FC0,
+ 0x3F70, 0x0560, 0x0A08, 0x016C, 0x3FBC,
+ 0x3F6C, 0x0538, 0x0A20, 0x0188, 0x3FB4,
+ 0x3F6C, 0x0510, 0x0A30, 0x01A4, 0x3FB0,
+ 0x3F6C, 0x04E8, 0x0A3C, 0x01C4, 0x3FAC,
+ 0x3F6C, 0x04C0, 0x0A48, 0x01E4, 0x3FA8,
+ 0x3F6C, 0x0498, 0x0A58, 0x0200, 0x3FA4,
+ 0x3F6C, 0x0470, 0x0A60, 0x0224, 0x3FA0,
+ 0x3F6C, 0x0448, 0x0A70, 0x0244, 0x3F98,
+ 0x3F70, 0x0420, 0x0A78, 0x0264, 0x3F94,
+ 0x3F70, 0x03F8, 0x0A80, 0x0288, 0x3F90,
+ 0x3F74, 0x03D4, 0x0A84, 0x02A8, 0x3F8C,
+ 0x3F74, 0x03AC, 0x0A8C, 0x02CC, 0x3F88,
+ 0x3F78, 0x0384, 0x0A90, 0x02F0, 0x3F84,
+ 0x3F7C, 0x0360, 0x0A90, 0x0314, 0x3F80,
+ 0x3F7C, 0x033C, 0x0A90, 0x033C, 0x3F7C
+};
+//=========================================
+// <num_taps> = 5
+// <num_phases> = 64
+// <scale_ratio> = 1.83332 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_5tap_64p_183[165] = {
- 228, 1816, 1816, 228, 0,
- 216, 1792, 1836, 248, 16380,
- 200, 1772, 1860, 264, 16376,
- 184, 1748, 1884, 280, 16376,
- 168, 1728, 1904, 300, 16372,
- 156, 1704, 1928, 316, 16368,
- 144, 1680, 1948, 336, 16364,
- 128, 1656, 1968, 356, 16364,
- 116, 1632, 1988, 376, 16360,
- 104, 1604, 2008, 396, 16356,
- 96, 1580, 2024, 416, 16356,
- 84, 1556, 2044, 440, 16352,
- 72, 1528, 2060, 460, 16348,
- 64, 1504, 2076, 484, 16348,
- 52, 1476, 2092, 504, 16344,
- 44, 1448, 2104, 528, 16344,
- 36, 1424, 2120, 552, 16340,
- 28, 1396, 2132, 576, 16340,
- 20, 1368, 2144, 600, 16340,
- 12, 1340, 2156, 624, 16336,
- 4, 1312, 2168, 652, 16336,
- 0, 1284, 2180, 676, 16336,
- 16376, 1256, 2188, 700, 16332,
- 16372, 1228, 2196, 728, 16332,
- 16368, 1200, 2204, 752, 16332,
- 16364, 1172, 2212, 780, 16332,
- 16356, 1144, 2216, 808, 16332,
- 16352, 1116, 2220, 836, 16332,
- 16352, 1084, 2224, 860, 16332,
- 16348, 1056, 2228, 888, 16336,
- 16344, 1028, 2232, 916, 16336,
- 16340, 1000, 2232, 944, 16336,
- 16340, 972, 2232, 972, 16340 };
+ 0x0168, 0x069C, 0x0698, 0x0164, 0x0000,
+ 0x0154, 0x068C, 0x06AC, 0x0174, 0x0000,
+ 0x0144, 0x0674, 0x06C0, 0x0188, 0x0000,
+ 0x0138, 0x0664, 0x06D0, 0x0198, 0x3FFC,
+ 0x0128, 0x0654, 0x06E0, 0x01A8, 0x3FFC,
+ 0x0118, 0x0640, 0x06F0, 0x01BC, 0x3FFC,
+ 0x010C, 0x0630, 0x0700, 0x01CC, 0x3FF8,
+ 0x00FC, 0x061C, 0x0710, 0x01E0, 0x3FF8,
+ 0x00F0, 0x060C, 0x071C, 0x01F0, 0x3FF8,
+ 0x00E4, 0x05F4, 0x072C, 0x0204, 0x3FF8,
+ 0x00D8, 0x05E4, 0x0738, 0x0218, 0x3FF4,
+ 0x00CC, 0x05D0, 0x0744, 0x022C, 0x3FF4,
+ 0x00C0, 0x05B8, 0x0754, 0x0240, 0x3FF4,
+ 0x00B4, 0x05A4, 0x0760, 0x0254, 0x3FF4,
+ 0x00A8, 0x0590, 0x076C, 0x0268, 0x3FF4,
+ 0x009C, 0x057C, 0x0778, 0x027C, 0x3FF4,
+ 0x0094, 0x0564, 0x0780, 0x0294, 0x3FF4,
+ 0x0088, 0x0550, 0x0788, 0x02A8, 0x3FF8,
+ 0x0080, 0x0538, 0x0794, 0x02BC, 0x3FF8,
+ 0x0074, 0x0524, 0x079C, 0x02D4, 0x3FF8,
+ 0x006C, 0x0510, 0x07A4, 0x02E8, 0x3FF8,
+ 0x0064, 0x04F4, 0x07AC, 0x0300, 0x3FFC,
+ 0x005C, 0x04E4, 0x07B0, 0x0314, 0x3FFC,
+ 0x0054, 0x04C8, 0x07B8, 0x032C, 0x0000,
+ 0x004C, 0x04B4, 0x07C0, 0x0340, 0x0000,
+ 0x0044, 0x04A0, 0x07C4, 0x0358, 0x0000,
+ 0x003C, 0x0488, 0x07C8, 0x0370, 0x0004,
+ 0x0038, 0x0470, 0x07CC, 0x0384, 0x0008,
+ 0x0030, 0x045C, 0x07D0, 0x039C, 0x0008,
+ 0x002C, 0x0444, 0x07D0, 0x03B4, 0x000C,
+ 0x0024, 0x042C, 0x07D4, 0x03CC, 0x0010,
+ 0x0020, 0x0414, 0x07D4, 0x03E0, 0x0018,
+ 0x001C, 0x03FC, 0x07D4, 0x03F8, 0x001C
+};
+//=========================================
+// <num_taps> = 6
+// <num_phases> = 64
+// <scale_ratio> = 0.83333 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_6tap_64p_upscale[198] = {
- 0, 0, 4092, 0, 0, 0,
- 12, 16332, 4092, 52, 16368, 0,
- 24, 16280, 4088, 108, 16356, 0,
- 36, 16236, 4080, 168, 16340, 0,
- 44, 16188, 4064, 228, 16324, 0,
- 56, 16148, 4052, 292, 16308, 0,
- 64, 16108, 4032, 356, 16292, 4,
- 72, 16072, 4008, 424, 16276, 4,
- 80, 16036, 3980, 492, 16256, 4,
- 88, 16004, 3952, 564, 16240, 8,
- 96, 15972, 3920, 636, 16220, 8,
- 100, 15944, 3884, 712, 16204, 12,
- 108, 15916, 3844, 788, 16184, 16,
- 112, 15896, 3800, 864, 16164, 20,
- 116, 15872, 3756, 944, 16144, 20,
- 120, 15852, 3708, 1024, 16124, 24,
- 120, 15836, 3656, 1108, 16104, 28,
- 124, 15824, 3600, 1192, 16084, 32,
- 124, 15808, 3544, 1276, 16064, 36,
- 124, 15800, 3484, 1360, 16044, 40,
- 128, 15792, 3420, 1448, 16024, 44,
- 128, 15784, 3352, 1536, 16004, 48,
- 124, 15780, 3288, 1624, 15988, 52,
- 124, 15776, 3216, 1712, 15968, 56,
- 124, 15776, 3144, 1800, 15948, 64,
- 120, 15776, 3068, 1888, 15932, 68,
- 120, 15780, 2992, 1976, 15912, 72,
- 116, 15784, 2916, 2064, 15896, 76,
- 112, 15792, 2836, 2152, 15880, 80,
- 108, 15796, 2752, 2244, 15868, 84,
- 104, 15804, 2672, 2328, 15852, 88,
- 104, 15816, 2588, 2416, 15840, 92,
- 100, 15828, 2504, 2504, 15828, 100 };
+ 0x0000, 0x0000, 0x1000, 0x0000, 0x0000, 0x0000,
+ 0x000C, 0x3FD0, 0x0FFC, 0x0034, 0x3FF4, 0x0000,
+ 0x0018, 0x3F9C, 0x0FF8, 0x006C, 0x3FE8, 0x0000,
+ 0x0024, 0x3F6C, 0x0FF0, 0x00A8, 0x3FD8, 0x0000,
+ 0x002C, 0x3F44, 0x0FE4, 0x00E4, 0x3FC8, 0x0000,
+ 0x0038, 0x3F18, 0x0FD4, 0x0124, 0x3FB8, 0x0000,
+ 0x0040, 0x3EF0, 0x0FC0, 0x0164, 0x3FA8, 0x0004,
+ 0x0048, 0x3EC8, 0x0FAC, 0x01A8, 0x3F98, 0x0004,
+ 0x0050, 0x3EA8, 0x0F94, 0x01EC, 0x3F84, 0x0004,
+ 0x0058, 0x3E84, 0x0F74, 0x0234, 0x3F74, 0x0008,
+ 0x0060, 0x3E68, 0x0F54, 0x027C, 0x3F60, 0x0008,
+ 0x0064, 0x3E4C, 0x0F30, 0x02C8, 0x3F4C, 0x000C,
+ 0x006C, 0x3E30, 0x0F04, 0x0314, 0x3F3C, 0x0010,
+ 0x0070, 0x3E18, 0x0EDC, 0x0360, 0x3F28, 0x0014,
+ 0x0074, 0x3E04, 0x0EB0, 0x03B0, 0x3F14, 0x0014,
+ 0x0078, 0x3DF0, 0x0E80, 0x0400, 0x3F00, 0x0018,
+ 0x0078, 0x3DE0, 0x0E4C, 0x0454, 0x3EEC, 0x001C,
+ 0x007C, 0x3DD0, 0x0E14, 0x04A8, 0x3ED8, 0x0020,
+ 0x007C, 0x3DC4, 0x0DDC, 0x04FC, 0x3EC4, 0x0024,
+ 0x007C, 0x3DBC, 0x0DA0, 0x0550, 0x3EB0, 0x0028,
+ 0x0080, 0x3DB4, 0x0D5C, 0x05A8, 0x3E9C, 0x002C,
+ 0x0080, 0x3DAC, 0x0D1C, 0x0600, 0x3E88, 0x0030,
+ 0x007C, 0x3DA8, 0x0CDC, 0x0658, 0x3E74, 0x0034,
+ 0x007C, 0x3DA4, 0x0C94, 0x06B0, 0x3E64, 0x0038,
+ 0x007C, 0x3DA4, 0x0C48, 0x0708, 0x3E50, 0x0040,
+ 0x0078, 0x3DA4, 0x0C00, 0x0760, 0x3E40, 0x0044,
+ 0x0078, 0x3DA8, 0x0BB4, 0x07B8, 0x3E2C, 0x0048,
+ 0x0074, 0x3DAC, 0x0B68, 0x0810, 0x3E1C, 0x004C,
+ 0x0070, 0x3DB4, 0x0B18, 0x0868, 0x3E0C, 0x0050,
+ 0x006C, 0x3DBC, 0x0AC4, 0x08C4, 0x3DFC, 0x0054,
+ 0x0068, 0x3DC4, 0x0A74, 0x0918, 0x3DF0, 0x0058,
+ 0x0068, 0x3DCC, 0x0A20, 0x0970, 0x3DE0, 0x005C,
+ 0x0064, 0x3DD4, 0x09C8, 0x09C8, 0x3DD4, 0x0064
+};
-static const uint16_t filter_6tap_64p_117[198] = {
- 16168, 476, 3568, 476, 16168, 0,
- 16180, 428, 3564, 528, 16156, 0,
- 16192, 376, 3556, 584, 16144, 4,
- 16204, 328, 3548, 636, 16128, 4,
- 16216, 280, 3540, 692, 16116, 8,
- 16228, 232, 3524, 748, 16104, 12,
- 16240, 188, 3512, 808, 16092, 12,
- 16252, 148, 3492, 864, 16080, 16,
- 16264, 104, 3472, 924, 16068, 16,
- 16276, 64, 3452, 984, 16056, 20,
- 16284, 28, 3428, 1044, 16048, 24,
- 16296, 16376, 3400, 1108, 16036, 24,
- 16304, 16340, 3372, 1168, 16024, 28,
- 16316, 16304, 3340, 1232, 16016, 32,
- 16324, 16272, 3308, 1296, 16004, 32,
- 16332, 16244, 3272, 1360, 15996, 36,
- 16344, 16212, 3236, 1424, 15988, 36,
- 16352, 16188, 3200, 1488, 15980, 40,
- 16360, 16160, 3160, 1552, 15972, 40,
- 16368, 16136, 3116, 1616, 15964, 40,
- 16372, 16112, 3072, 1680, 15956, 44,
- 16380, 16092, 3028, 1744, 15952, 44,
- 0, 16072, 2980, 1808, 15948, 44,
- 8, 16052, 2932, 1872, 15944, 48,
- 12, 16036, 2880, 1936, 15940, 48,
- 16, 16020, 2828, 2000, 15936, 48,
- 20, 16008, 2776, 2064, 15936, 48,
- 24, 15996, 2724, 2128, 15936, 48,
- 28, 15984, 2668, 2192, 15936, 48,
- 32, 15972, 2612, 2252, 15940, 44,
- 36, 15964, 2552, 2316, 15940, 44,
- 40, 15956, 2496, 2376, 15944, 44,
- 40, 15952, 2436, 2436, 15952, 40 };
+//=========================================
+// <num_taps> = 6
+// <num_phases> = 64
+// <scale_ratio> = 1.16666 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_6tap_64p_116[198] = {
+ 0x3F0C, 0x0240, 0x0D68, 0x0240, 0x3F0C, 0x0000,
+ 0x3F18, 0x0210, 0x0D64, 0x0274, 0x3F00, 0x0000,
+ 0x3F24, 0x01E0, 0x0D58, 0x02A8, 0x3EF8, 0x0004,
+ 0x3F2C, 0x01B0, 0x0D58, 0x02DC, 0x3EEC, 0x0004,
+ 0x3F38, 0x0180, 0x0D50, 0x0310, 0x3EE0, 0x0008,
+ 0x3F44, 0x0154, 0x0D40, 0x0348, 0x3ED8, 0x0008,
+ 0x3F50, 0x0128, 0x0D34, 0x037C, 0x3ECC, 0x000C,
+ 0x3F5C, 0x00FC, 0x0D20, 0x03B4, 0x3EC4, 0x0010,
+ 0x3F64, 0x00D4, 0x0D14, 0x03EC, 0x3EB8, 0x0010,
+ 0x3F70, 0x00AC, 0x0CFC, 0x0424, 0x3EB0, 0x0014,
+ 0x3F78, 0x0084, 0x0CE8, 0x0460, 0x3EA8, 0x0014,
+ 0x3F84, 0x0060, 0x0CCC, 0x0498, 0x3EA0, 0x0018,
+ 0x3F90, 0x003C, 0x0CB4, 0x04D0, 0x3E98, 0x0018,
+ 0x3F98, 0x0018, 0x0C9C, 0x050C, 0x3E90, 0x0018,
+ 0x3FA0, 0x3FFC, 0x0C78, 0x0548, 0x3E88, 0x001C,
+ 0x3FAC, 0x3FDC, 0x0C54, 0x0584, 0x3E84, 0x001C,
+ 0x3FB4, 0x3FBC, 0x0C3C, 0x05BC, 0x3E7C, 0x001C,
+ 0x3FBC, 0x3FA0, 0x0C14, 0x05F8, 0x3E78, 0x0020,
+ 0x3FC4, 0x3F84, 0x0BF0, 0x0634, 0x3E74, 0x0020,
+ 0x3FCC, 0x3F68, 0x0BCC, 0x0670, 0x3E70, 0x0020,
+ 0x3FD4, 0x3F50, 0x0BA4, 0x06AC, 0x3E6C, 0x0020,
+ 0x3FDC, 0x3F38, 0x0B78, 0x06E8, 0x3E6C, 0x0020,
+ 0x3FE0, 0x3F24, 0x0B50, 0x0724, 0x3E68, 0x0020,
+ 0x3FE8, 0x3F0C, 0x0B24, 0x0760, 0x3E68, 0x0020,
+ 0x3FF0, 0x3EFC, 0x0AF4, 0x0798, 0x3E68, 0x0020,
+ 0x3FF4, 0x3EE8, 0x0AC8, 0x07D4, 0x3E68, 0x0020,
+ 0x3FFC, 0x3ED8, 0x0A94, 0x0810, 0x3E6C, 0x001C,
+ 0x0000, 0x3EC8, 0x0A64, 0x0848, 0x3E70, 0x001C,
+ 0x0000, 0x3EB8, 0x0A38, 0x0880, 0x3E74, 0x001C,
+ 0x0004, 0x3EAC, 0x0A04, 0x08BC, 0x3E78, 0x0018,
+ 0x0008, 0x3EA4, 0x09D0, 0x08F4, 0x3E7C, 0x0014,
+ 0x000C, 0x3E98, 0x0998, 0x092C, 0x3E84, 0x0014,
+ 0x0010, 0x3E90, 0x0964, 0x0960, 0x3E8C, 0x0010
+};
-static const uint16_t filter_6tap_64p_150[198] = {
- 16148, 920, 2724, 920, 16148, 0,
- 16152, 880, 2724, 956, 16148, 0,
- 16152, 844, 2720, 996, 16144, 0,
- 16156, 804, 2716, 1032, 16144, 0,
- 16156, 768, 2712, 1072, 16144, 0,
- 16160, 732, 2708, 1112, 16144, 16380,
- 16164, 696, 2700, 1152, 16144, 16380,
- 16168, 660, 2692, 1192, 16148, 16380,
- 16172, 628, 2684, 1232, 16148, 16380,
- 16176, 592, 2672, 1272, 16152, 16376,
- 16180, 560, 2660, 1312, 16152, 16376,
- 16184, 524, 2648, 1348, 16156, 16376,
- 16192, 492, 2632, 1388, 16160, 16372,
- 16196, 460, 2616, 1428, 16164, 16372,
- 16200, 432, 2600, 1468, 16168, 16368,
- 16204, 400, 2584, 1508, 16176, 16364,
- 16212, 368, 2564, 1548, 16180, 16364,
- 16216, 340, 2544, 1588, 16188, 16360,
- 16220, 312, 2524, 1628, 16196, 16356,
- 16228, 284, 2504, 1668, 16204, 16356,
- 16232, 256, 2480, 1704, 16212, 16352,
- 16240, 232, 2456, 1744, 16224, 16348,
- 16244, 204, 2432, 1780, 16232, 16344,
- 16248, 180, 2408, 1820, 16244, 16340,
- 16256, 156, 2380, 1856, 16256, 16336,
- 16260, 132, 2352, 1896, 16268, 16332,
- 16268, 108, 2324, 1932, 16280, 16328,
- 16272, 88, 2296, 1968, 16292, 16324,
- 16276, 64, 2268, 2004, 16308, 16320,
- 16284, 44, 2236, 2036, 16324, 16312,
- 16288, 24, 2204, 2072, 16340, 16308,
- 16292, 8, 2172, 2108, 16356, 16304,
- 16300, 16372, 2140, 2140, 16372, 16300 };
+//=========================================
+// <num_taps> = 6
+// <num_phases> = 64
+// <scale_ratio> = 1.49999 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_6tap_64p_149[198] = {
+ 0x3F14, 0x0394, 0x0AB0, 0x0394, 0x3F14, 0x0000,
+ 0x3F18, 0x036C, 0x0AB0, 0x03B8, 0x3F14, 0x0000,
+ 0x3F18, 0x0348, 0x0AAC, 0x03E0, 0x3F14, 0x0000,
+ 0x3F1C, 0x0320, 0x0AAC, 0x0408, 0x3F10, 0x0000,
+ 0x3F20, 0x02FC, 0x0AA8, 0x042C, 0x3F10, 0x0000,
+ 0x3F24, 0x02D8, 0x0AA0, 0x0454, 0x3F10, 0x0000,
+ 0x3F28, 0x02B4, 0x0A98, 0x047C, 0x3F10, 0x0000,
+ 0x3F28, 0x0290, 0x0A90, 0x04A4, 0x3F14, 0x0000,
+ 0x3F30, 0x026C, 0x0A84, 0x04CC, 0x3F14, 0x0000,
+ 0x3F34, 0x024C, 0x0A7C, 0x04F4, 0x3F14, 0x3FFC,
+ 0x3F38, 0x0228, 0x0A70, 0x051C, 0x3F18, 0x3FFC,
+ 0x3F3C, 0x0208, 0x0A64, 0x0544, 0x3F1C, 0x3FF8,
+ 0x3F40, 0x01E8, 0x0A54, 0x056C, 0x3F20, 0x3FF8,
+ 0x3F44, 0x01C8, 0x0A48, 0x0594, 0x3F24, 0x3FF4,
+ 0x3F4C, 0x01A8, 0x0A34, 0x05BC, 0x3F28, 0x3FF4,
+ 0x3F50, 0x0188, 0x0A28, 0x05E4, 0x3F2C, 0x3FF0,
+ 0x3F54, 0x016C, 0x0A10, 0x060C, 0x3F34, 0x3FF0,
+ 0x3F5C, 0x014C, 0x09FC, 0x0634, 0x3F3C, 0x3FEC,
+ 0x3F60, 0x0130, 0x09EC, 0x065C, 0x3F40, 0x3FE8,
+ 0x3F68, 0x0114, 0x09D0, 0x0684, 0x3F48, 0x3FE8,
+ 0x3F6C, 0x00F8, 0x09B8, 0x06AC, 0x3F54, 0x3FE4,
+ 0x3F74, 0x00E0, 0x09A0, 0x06D0, 0x3F5C, 0x3FE0,
+ 0x3F78, 0x00C4, 0x098C, 0x06F8, 0x3F64, 0x3FDC,
+ 0x3F7C, 0x00AC, 0x0970, 0x0720, 0x3F70, 0x3FD8,
+ 0x3F84, 0x0094, 0x0954, 0x0744, 0x3F7C, 0x3FD4,
+ 0x3F88, 0x007C, 0x093C, 0x0768, 0x3F88, 0x3FD0,
+ 0x3F90, 0x0064, 0x091C, 0x0790, 0x3F94, 0x3FCC,
+ 0x3F94, 0x0050, 0x08FC, 0x07B4, 0x3FA4, 0x3FC8,
+ 0x3F98, 0x003C, 0x08E0, 0x07D8, 0x3FB0, 0x3FC4,
+ 0x3FA0, 0x0024, 0x08C0, 0x07FC, 0x3FC0, 0x3FC0,
+ 0x3FA4, 0x0014, 0x08A4, 0x081C, 0x3FD0, 0x3FB8,
+ 0x3FAC, 0x0000, 0x0880, 0x0840, 0x3FE0, 0x3FB4,
+ 0x3FB0, 0x3FF0, 0x0860, 0x0860, 0x3FF0, 0x3FB0
+};
+//=========================================
+// <num_taps> = 6
+// <num_phases> = 64
+// <scale_ratio> = 1.83332 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_6tap_64p_183[198] = {
- 16296, 1032, 2196, 1032, 16296, 0,
- 16292, 1004, 2200, 1060, 16304, 16380,
- 16288, 976, 2200, 1088, 16308, 16380,
- 16284, 952, 2196, 1116, 16312, 16376,
- 16284, 924, 2196, 1144, 16320, 16376,
- 16280, 900, 2192, 1172, 16324, 16372,
- 16276, 872, 2192, 1200, 16332, 16368,
- 16276, 848, 2188, 1228, 16340, 16368,
- 16272, 820, 2180, 1256, 16348, 16364,
- 16272, 796, 2176, 1280, 16356, 16360,
- 16268, 768, 2168, 1308, 16364, 16360,
- 16268, 744, 2164, 1336, 16372, 16356,
- 16268, 716, 2156, 1364, 16380, 16352,
- 16264, 692, 2148, 1392, 4, 16352,
- 16264, 668, 2136, 1420, 16, 16348,
- 16264, 644, 2128, 1448, 28, 16344,
- 16264, 620, 2116, 1472, 36, 16340,
- 16264, 596, 2108, 1500, 48, 16340,
- 16268, 572, 2096, 1524, 60, 16336,
- 16268, 548, 2080, 1552, 72, 16332,
- 16268, 524, 2068, 1576, 88, 16328,
- 16268, 504, 2056, 1604, 100, 16324,
- 16272, 480, 2040, 1628, 112, 16324,
- 16272, 456, 2024, 1652, 128, 16320,
- 16272, 436, 2008, 1680, 144, 16316,
- 16276, 416, 1992, 1704, 156, 16312,
- 16276, 392, 1976, 1724, 172, 16308,
- 16280, 372, 1956, 1748, 188, 16308,
- 16280, 352, 1940, 1772, 204, 16304,
- 16284, 332, 1920, 1796, 224, 16300,
- 16288, 312, 1900, 1816, 240, 16296,
- 16288, 296, 1880, 1840, 256, 16296,
- 16292, 276, 1860, 1860, 276, 16292 };
+ 0x002C, 0x0420, 0x076C, 0x041C, 0x002C, 0x0000,
+ 0x0028, 0x040C, 0x0768, 0x0430, 0x0034, 0x0000,
+ 0x0020, 0x03F8, 0x0768, 0x0448, 0x003C, 0x3FFC,
+ 0x0018, 0x03E4, 0x0768, 0x045C, 0x0044, 0x3FFC,
+ 0x0014, 0x03D0, 0x0768, 0x0470, 0x004C, 0x3FF8,
+ 0x000C, 0x03BC, 0x0764, 0x0484, 0x0058, 0x3FF8,
+ 0x0008, 0x03A4, 0x0764, 0x049C, 0x0060, 0x3FF4,
+ 0x0004, 0x0390, 0x0760, 0x04B0, 0x0068, 0x3FF4,
+ 0x0000, 0x037C, 0x0760, 0x04C4, 0x0070, 0x3FF0,
+ 0x3FFC, 0x0364, 0x075C, 0x04D8, 0x007C, 0x3FF0,
+ 0x3FF8, 0x0350, 0x0758, 0x04F0, 0x0084, 0x3FEC,
+ 0x3FF4, 0x033C, 0x0750, 0x0504, 0x0090, 0x3FEC,
+ 0x3FF0, 0x0328, 0x074C, 0x0518, 0x009C, 0x3FE8,
+ 0x3FEC, 0x0314, 0x0744, 0x052C, 0x00A8, 0x3FE8,
+ 0x3FE8, 0x0304, 0x0740, 0x0540, 0x00B0, 0x3FE4,
+ 0x3FE4, 0x02EC, 0x073C, 0x0554, 0x00BC, 0x3FE4,
+ 0x3FE0, 0x02DC, 0x0734, 0x0568, 0x00C8, 0x3FE0,
+ 0x3FE0, 0x02C4, 0x072C, 0x057C, 0x00D4, 0x3FE0,
+ 0x3FDC, 0x02B4, 0x0724, 0x058C, 0x00E4, 0x3FDC,
+ 0x3FDC, 0x02A0, 0x0718, 0x05A0, 0x00F0, 0x3FDC,
+ 0x3FD8, 0x028C, 0x0714, 0x05B4, 0x00FC, 0x3FD8,
+ 0x3FD8, 0x0278, 0x0704, 0x05C8, 0x010C, 0x3FD8,
+ 0x3FD4, 0x0264, 0x0700, 0x05D8, 0x0118, 0x3FD8,
+ 0x3FD4, 0x0254, 0x06F0, 0x05EC, 0x0128, 0x3FD4,
+ 0x3FD0, 0x0244, 0x06E8, 0x05FC, 0x0134, 0x3FD4,
+ 0x3FD0, 0x0230, 0x06DC, 0x060C, 0x0144, 0x3FD4,
+ 0x3FD0, 0x021C, 0x06D0, 0x0620, 0x0154, 0x3FD0,
+ 0x3FD0, 0x0208, 0x06C4, 0x0630, 0x0164, 0x3FD0,
+ 0x3FD0, 0x01F8, 0x06B8, 0x0640, 0x0170, 0x3FD0,
+ 0x3FCC, 0x01E8, 0x06AC, 0x0650, 0x0180, 0x3FD0,
+ 0x3FCC, 0x01D8, 0x069C, 0x0660, 0x0190, 0x3FD0,
+ 0x3FCC, 0x01C4, 0x068C, 0x0670, 0x01A4, 0x3FD0,
+ 0x3FCC, 0x01B8, 0x0680, 0x067C, 0x01B4, 0x3FCC
+};
+//=========================================
+// <num_taps> = 7
+// <num_phases> = 64
+// <scale_ratio> = 0.83333 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_7tap_64p_upscale[231] = {
- 176, 15760, 2488, 2488, 15760, 176, 0,
- 172, 15772, 2404, 2572, 15752, 180, 16380,
- 168, 15784, 2324, 2656, 15740, 184, 16380,
- 164, 15800, 2240, 2736, 15732, 188, 16376,
- 160, 15812, 2152, 2816, 15728, 192, 16376,
- 152, 15828, 2068, 2896, 15724, 192, 16376,
- 148, 15848, 1984, 2972, 15720, 196, 16372,
- 140, 15864, 1896, 3048, 15720, 196, 16372,
- 136, 15884, 1812, 3124, 15720, 196, 16368,
- 128, 15900, 1724, 3196, 15720, 196, 16368,
- 120, 15920, 1640, 3268, 15724, 196, 16368,
- 116, 15940, 1552, 3336, 15732, 196, 16364,
- 108, 15964, 1468, 3400, 15740, 196, 16364,
- 104, 15984, 1384, 3464, 15748, 192, 16364,
- 96, 16004, 1300, 3524, 15760, 188, 16364,
- 88, 16028, 1216, 3584, 15776, 184, 16364,
- 84, 16048, 1132, 3640, 15792, 180, 16360,
- 76, 16072, 1048, 3692, 15812, 176, 16360,
- 68, 16092, 968, 3744, 15832, 168, 16360,
- 64, 16116, 888, 3788, 15856, 160, 16360,
- 56, 16140, 812, 3832, 15884, 152, 16360,
- 52, 16160, 732, 3876, 15912, 144, 16360,
- 44, 16184, 656, 3912, 15944, 136, 16364,
- 40, 16204, 584, 3944, 15976, 124, 16364,
- 32, 16228, 512, 3976, 16012, 116, 16364,
- 28, 16248, 440, 4004, 16048, 104, 16364,
- 24, 16268, 372, 4028, 16092, 88, 16368,
- 20, 16288, 304, 4048, 16132, 76, 16368,
- 12, 16308, 240, 4064, 16180, 60, 16372,
- 8, 16328, 176, 4076, 16228, 48, 16372,
- 4, 16348, 112, 4088, 16276, 32, 16376,
- 0, 16364, 56, 4092, 16328, 16, 16380,
- 0, 0, 0, 4096, 0, 0, 0 };
+ 0x00B0, 0x3D98, 0x09BC, 0x09B8, 0x3D94, 0x00B0, 0x0000,
+ 0x00AC, 0x3DA0, 0x0968, 0x0A10, 0x3D88, 0x00B4, 0x0000,
+ 0x00A8, 0x3DAC, 0x0914, 0x0A60, 0x3D80, 0x00B8, 0x0000,
+ 0x00A4, 0x3DB8, 0x08C0, 0x0AB4, 0x3D78, 0x00BC, 0x3FFC,
+ 0x00A0, 0x3DC8, 0x0868, 0x0B00, 0x3D74, 0x00C0, 0x3FFC,
+ 0x0098, 0x3DD8, 0x0818, 0x0B54, 0x3D6C, 0x00C0, 0x3FF8,
+ 0x0094, 0x3DE8, 0x07C0, 0x0B9C, 0x3D6C, 0x00C4, 0x3FF8,
+ 0x008C, 0x3DFC, 0x0768, 0x0BEC, 0x3D68, 0x00C4, 0x3FF8,
+ 0x0088, 0x3E0C, 0x0714, 0x0C38, 0x3D68, 0x00C4, 0x3FF4,
+ 0x0080, 0x3E20, 0x06BC, 0x0C80, 0x3D6C, 0x00C4, 0x3FF4,
+ 0x0078, 0x3E34, 0x0668, 0x0CC4, 0x3D70, 0x00C4, 0x3FF4,
+ 0x0074, 0x3E48, 0x0610, 0x0D08, 0x3D78, 0x00C4, 0x3FF0,
+ 0x006C, 0x3E5C, 0x05BC, 0x0D48, 0x3D80, 0x00C4, 0x3FF0,
+ 0x0068, 0x3E74, 0x0568, 0x0D84, 0x3D88, 0x00C0, 0x3FF0,
+ 0x0060, 0x3E88, 0x0514, 0x0DC8, 0x3D94, 0x00BC, 0x3FEC,
+ 0x0058, 0x3E9C, 0x04C0, 0x0E04, 0x3DA4, 0x00B8, 0x3FEC,
+ 0x0054, 0x3EB4, 0x046C, 0x0E38, 0x3DB4, 0x00B4, 0x3FEC,
+ 0x004C, 0x3ECC, 0x0418, 0x0E6C, 0x3DC8, 0x00B0, 0x3FEC,
+ 0x0044, 0x3EE0, 0x03C8, 0x0EA4, 0x3DDC, 0x00A8, 0x3FEC,
+ 0x0040, 0x3EF8, 0x0378, 0x0ED0, 0x3DF4, 0x00A0, 0x3FEC,
+ 0x0038, 0x3F0C, 0x032C, 0x0EFC, 0x3E10, 0x0098, 0x3FEC,
+ 0x0034, 0x3F24, 0x02DC, 0x0F24, 0x3E2C, 0x0090, 0x3FEC,
+ 0x002C, 0x3F38, 0x0294, 0x0F4C, 0x3E48, 0x0088, 0x3FEC,
+ 0x0028, 0x3F50, 0x0248, 0x0F68, 0x3E6C, 0x007C, 0x3FF0,
+ 0x0020, 0x3F64, 0x0200, 0x0F88, 0x3E90, 0x0074, 0x3FF0,
+ 0x001C, 0x3F7C, 0x01B8, 0x0FA4, 0x3EB4, 0x0068, 0x3FF0,
+ 0x0018, 0x3F90, 0x0174, 0x0FBC, 0x3EDC, 0x0058, 0x3FF4,
+ 0x0014, 0x3FA4, 0x0130, 0x0FD0, 0x3F08, 0x004C, 0x3FF4,
+ 0x000C, 0x3FB8, 0x00F0, 0x0FE4, 0x3F34, 0x003C, 0x3FF8,
+ 0x0008, 0x3FCC, 0x00B0, 0x0FF0, 0x3F64, 0x0030, 0x3FF8,
+ 0x0004, 0x3FDC, 0x0070, 0x0FFC, 0x3F98, 0x0020, 0x3FFC,
+ 0x0000, 0x3FF0, 0x0038, 0x0FFC, 0x3FCC, 0x0010, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x1000, 0x0000, 0x0000, 0x0000
+};
-static const uint16_t filter_7tap_64p_117[231] = {
- 92, 15868, 2464, 2464, 15868, 92, 0,
- 96, 15864, 2404, 2528, 15876, 88, 0,
- 100, 15860, 2344, 2584, 15884, 84, 0,
- 104, 15856, 2280, 2644, 15892, 76, 0,
- 108, 15852, 2216, 2700, 15904, 72, 0,
- 108, 15852, 2152, 2756, 15916, 64, 0,
- 112, 15852, 2088, 2812, 15932, 60, 0,
- 112, 15852, 2024, 2864, 15948, 52, 0,
- 112, 15856, 1960, 2916, 15964, 44, 0,
- 116, 15860, 1892, 2964, 15984, 36, 0,
- 116, 15864, 1828, 3016, 16004, 24, 4,
- 116, 15868, 1760, 3060, 16024, 16, 4,
- 116, 15876, 1696, 3108, 16048, 8, 8,
- 116, 15884, 1628, 3152, 16072, 16380, 8,
- 112, 15892, 1564, 3192, 16100, 16372, 8,
- 112, 15900, 1496, 3232, 16124, 16360, 12,
- 112, 15908, 1428, 3268, 16156, 16348, 12,
- 108, 15920, 1364, 3304, 16188, 16336, 16,
- 108, 15928, 1300, 3340, 16220, 16324, 20,
- 104, 15940, 1232, 3372, 16252, 16312, 20,
- 104, 15952, 1168, 3400, 16288, 16300, 24,
- 100, 15964, 1104, 3428, 16328, 16284, 28,
- 96, 15980, 1040, 3452, 16364, 16272, 28,
- 96, 15992, 976, 3476, 20, 16256, 32,
- 92, 16004, 916, 3496, 64, 16244, 36,
- 88, 16020, 856, 3516, 108, 16228, 40,
- 84, 16032, 792, 3532, 152, 16216, 44,
- 80, 16048, 732, 3544, 200, 16200, 48,
- 80, 16064, 676, 3556, 248, 16184, 48,
- 76, 16080, 616, 3564, 296, 16168, 52,
- 72, 16092, 560, 3568, 344, 16156, 56,
- 68, 16108, 504, 3572, 396, 16140, 60,
- 64, 16124, 452, 3576, 452, 16124, 64 };
+//=========================================
+// <num_taps> = 7
+// <num_phases> = 64
+// <scale_ratio> = 1.16666 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_7tap_64p_116[231] = {
+ 0x0020, 0x3E58, 0x0988, 0x0988, 0x3E58, 0x0020, 0x0000,
+ 0x0024, 0x3E4C, 0x0954, 0x09C0, 0x3E64, 0x0018, 0x0000,
+ 0x002C, 0x3E44, 0x091C, 0x09F4, 0x3E70, 0x0010, 0x0000,
+ 0x0030, 0x3E3C, 0x08E8, 0x0A24, 0x3E80, 0x0008, 0x0000,
+ 0x0034, 0x3E34, 0x08AC, 0x0A5C, 0x3E90, 0x0000, 0x0000,
+ 0x003C, 0x3E30, 0x0870, 0x0A84, 0x3EA0, 0x3FFC, 0x0004,
+ 0x0040, 0x3E28, 0x0838, 0x0AB4, 0x3EB4, 0x3FF4, 0x0004,
+ 0x0044, 0x3E24, 0x07FC, 0x0AE4, 0x3EC8, 0x3FEC, 0x0004,
+ 0x0048, 0x3E24, 0x07C4, 0x0B08, 0x3EDC, 0x3FE4, 0x0008,
+ 0x0048, 0x3E20, 0x0788, 0x0B3C, 0x3EF4, 0x3FD8, 0x0008,
+ 0x004C, 0x3E20, 0x074C, 0x0B60, 0x3F0C, 0x3FD0, 0x000C,
+ 0x0050, 0x3E20, 0x0710, 0x0B8C, 0x3F24, 0x3FC4, 0x000C,
+ 0x0050, 0x3E20, 0x06D4, 0x0BB0, 0x3F40, 0x3FBC, 0x0010,
+ 0x0054, 0x3E24, 0x0698, 0x0BD4, 0x3F5C, 0x3FB0, 0x0010,
+ 0x0054, 0x3E24, 0x065C, 0x0BFC, 0x3F78, 0x3FA4, 0x0014,
+ 0x0054, 0x3E28, 0x0624, 0x0C1C, 0x3F98, 0x3F98, 0x0014,
+ 0x0058, 0x3E2C, 0x05E4, 0x0C3C, 0x3FB8, 0x3F8C, 0x0018,
+ 0x0058, 0x3E34, 0x05A8, 0x0C58, 0x3FD8, 0x3F80, 0x001C,
+ 0x0058, 0x3E38, 0x0570, 0x0C78, 0x3FF8, 0x3F74, 0x001C,
+ 0x0058, 0x3E40, 0x0534, 0x0C94, 0x0018, 0x3F68, 0x0020,
+ 0x0058, 0x3E48, 0x04F4, 0x0CAC, 0x0040, 0x3F5C, 0x0024,
+ 0x0058, 0x3E50, 0x04BC, 0x0CC4, 0x0064, 0x3F50, 0x0024,
+ 0x0054, 0x3E58, 0x0484, 0x0CD8, 0x008C, 0x3F44, 0x0028,
+ 0x0054, 0x3E60, 0x0448, 0x0CEC, 0x00B4, 0x3F38, 0x002C,
+ 0x0054, 0x3E68, 0x0410, 0x0CFC, 0x00E0, 0x3F28, 0x0030,
+ 0x0054, 0x3E74, 0x03D4, 0x0D0C, 0x010C, 0x3F1C, 0x0030,
+ 0x0050, 0x3E7C, 0x03A0, 0x0D18, 0x0138, 0x3F10, 0x0034,
+ 0x0050, 0x3E88, 0x0364, 0x0D24, 0x0164, 0x3F04, 0x0038,
+ 0x004C, 0x3E94, 0x0330, 0x0D30, 0x0194, 0x3EF4, 0x0038,
+ 0x004C, 0x3EA0, 0x02F8, 0x0D34, 0x01C4, 0x3EE8, 0x003C,
+ 0x0048, 0x3EAC, 0x02C0, 0x0D3C, 0x01F4, 0x3EDC, 0x0040,
+ 0x0048, 0x3EB8, 0x0290, 0x0D3C, 0x0224, 0x3ED0, 0x0040,
+ 0x0044, 0x3EC4, 0x0258, 0x0D40, 0x0258, 0x3EC4, 0x0044
+};
-static const uint16_t filter_7tap_64p_150[231] = {
- 16224, 16380, 2208, 2208, 16380, 16224, 0,
- 16232, 16360, 2172, 2236, 16, 16216, 0,
- 16236, 16340, 2140, 2268, 40, 16212, 0,
- 16244, 16324, 2104, 2296, 60, 16204, 4,
- 16252, 16304, 2072, 2324, 84, 16196, 4,
- 16256, 16288, 2036, 2352, 108, 16192, 4,
- 16264, 16268, 2000, 2380, 132, 16184, 8,
- 16272, 16252, 1960, 2408, 160, 16176, 8,
- 16276, 16240, 1924, 2432, 184, 16172, 8,
- 16284, 16224, 1888, 2456, 212, 16164, 8,
- 16288, 16212, 1848, 2480, 240, 16160, 12,
- 16296, 16196, 1812, 2500, 268, 16152, 12,
- 16300, 16184, 1772, 2524, 296, 16144, 12,
- 16308, 16172, 1736, 2544, 324, 16140, 12,
- 16312, 16164, 1696, 2564, 356, 16136, 12,
- 16320, 16152, 1656, 2584, 388, 16128, 12,
- 16324, 16144, 1616, 2600, 416, 16124, 12,
- 16328, 16136, 1576, 2616, 448, 16116, 12,
- 16332, 16128, 1536, 2632, 480, 16112, 12,
- 16340, 16120, 1496, 2648, 516, 16108, 12,
- 16344, 16112, 1456, 2660, 548, 16104, 12,
- 16348, 16104, 1416, 2672, 580, 16100, 12,
- 16352, 16100, 1376, 2684, 616, 16096, 12,
- 16356, 16096, 1336, 2696, 652, 16092, 12,
- 16360, 16092, 1296, 2704, 688, 16088, 12,
- 16364, 16088, 1256, 2712, 720, 16084, 12,
- 16368, 16084, 1220, 2720, 760, 16084, 8,
- 16368, 16080, 1180, 2724, 796, 16080, 8,
- 16372, 16080, 1140, 2732, 832, 16080, 8,
- 16376, 16076, 1100, 2732, 868, 16076, 4,
- 16380, 16076, 1060, 2736, 908, 16076, 4,
- 16380, 16076, 1020, 2740, 944, 16076, 0,
- 0, 16076, 984, 2740, 984, 16076, 0 };
+//=========================================
+// <num_taps> = 7
+// <num_phases> = 64
+// <scale_ratio> = 1.49999 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_7tap_64p_149[231] = {
+ 0x3F68, 0x3FEC, 0x08A8, 0x08AC, 0x3FF0, 0x3F68, 0x0000,
+ 0x3F70, 0x3FDC, 0x0888, 0x08CC, 0x0000, 0x3F60, 0x0000,
+ 0x3F74, 0x3FC8, 0x0868, 0x08F0, 0x0014, 0x3F58, 0x0000,
+ 0x3F7C, 0x3FB4, 0x0844, 0x0908, 0x002C, 0x3F54, 0x0004,
+ 0x3F84, 0x3FA4, 0x0820, 0x0924, 0x0044, 0x3F4C, 0x0004,
+ 0x3F88, 0x3F90, 0x0800, 0x0944, 0x005C, 0x3F44, 0x0004,
+ 0x3F90, 0x3F80, 0x07D8, 0x095C, 0x0074, 0x3F40, 0x0008,
+ 0x3F98, 0x3F70, 0x07B0, 0x097C, 0x008C, 0x3F38, 0x0008,
+ 0x3F9C, 0x3F60, 0x0790, 0x0994, 0x00A8, 0x3F30, 0x0008,
+ 0x3FA4, 0x3F54, 0x0764, 0x09B0, 0x00C4, 0x3F28, 0x0008,
+ 0x3FA8, 0x3F48, 0x0740, 0x09C4, 0x00DC, 0x3F24, 0x000C,
+ 0x3FB0, 0x3F38, 0x0718, 0x09DC, 0x00FC, 0x3F1C, 0x000C,
+ 0x3FB4, 0x3F2C, 0x06F0, 0x09F4, 0x0118, 0x3F18, 0x000C,
+ 0x3FBC, 0x3F24, 0x06C8, 0x0A08, 0x0134, 0x3F10, 0x000C,
+ 0x3FC0, 0x3F18, 0x06A0, 0x0A1C, 0x0154, 0x3F08, 0x0010,
+ 0x3FC8, 0x3F10, 0x0678, 0x0A2C, 0x0170, 0x3F04, 0x0010,
+ 0x3FCC, 0x3F04, 0x0650, 0x0A40, 0x0190, 0x3F00, 0x0010,
+ 0x3FD0, 0x3EFC, 0x0628, 0x0A54, 0x01B0, 0x3EF8, 0x0010,
+ 0x3FD4, 0x3EF4, 0x0600, 0x0A64, 0x01D0, 0x3EF4, 0x0010,
+ 0x3FDC, 0x3EEC, 0x05D8, 0x0A6C, 0x01F4, 0x3EF0, 0x0010,
+ 0x3FE0, 0x3EE8, 0x05B0, 0x0A7C, 0x0214, 0x3EE8, 0x0010,
+ 0x3FE4, 0x3EE0, 0x0588, 0x0A88, 0x0238, 0x3EE4, 0x0010,
+ 0x3FE8, 0x3EDC, 0x055C, 0x0A98, 0x0258, 0x3EE0, 0x0010,
+ 0x3FEC, 0x3ED8, 0x0534, 0x0AA0, 0x027C, 0x3EDC, 0x0010,
+ 0x3FF0, 0x3ED4, 0x050C, 0x0AAC, 0x02A0, 0x3ED8, 0x000C,
+ 0x3FF4, 0x3ED0, 0x04E4, 0x0AB4, 0x02C4, 0x3ED4, 0x000C,
+ 0x3FF4, 0x3ECC, 0x04C0, 0x0ABC, 0x02E8, 0x3ED0, 0x000C,
+ 0x3FF8, 0x3ECC, 0x0494, 0x0AC0, 0x030C, 0x3ED0, 0x000C,
+ 0x3FFC, 0x3EC8, 0x046C, 0x0AC8, 0x0334, 0x3ECC, 0x0008,
+ 0x0000, 0x3EC8, 0x0444, 0x0AC8, 0x0358, 0x3ECC, 0x0008,
+ 0x0000, 0x3EC8, 0x041C, 0x0ACC, 0x0380, 0x3EC8, 0x0008,
+ 0x0000, 0x3EC8, 0x03F4, 0x0AD0, 0x03A8, 0x3EC8, 0x0004,
+ 0x0004, 0x3EC8, 0x03CC, 0x0AD0, 0x03CC, 0x3EC8, 0x0004
+};
+//=========================================
+// <num_taps> = 7
+// <num_phases> = 64
+// <scale_ratio> = 1.83332 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_7tap_64p_183[231] = {
- 16216, 324, 1884, 1884, 324, 16216, 0,
- 16220, 304, 1864, 1904, 344, 16216, 0,
- 16224, 284, 1844, 1924, 364, 16216, 0,
- 16224, 264, 1824, 1944, 384, 16212, 16380,
- 16228, 248, 1804, 1960, 408, 16212, 16380,
- 16228, 228, 1784, 1976, 428, 16208, 16380,
- 16232, 212, 1760, 1996, 452, 16208, 16380,
- 16236, 192, 1740, 2012, 472, 16208, 16376,
- 16240, 176, 1716, 2028, 496, 16208, 16376,
- 16240, 160, 1696, 2040, 516, 16208, 16376,
- 16244, 144, 1672, 2056, 540, 16208, 16376,
- 16248, 128, 1648, 2068, 564, 16208, 16372,
- 16252, 112, 1624, 2084, 588, 16208, 16372,
- 16256, 96, 1600, 2096, 612, 16208, 16368,
- 16256, 84, 1576, 2108, 636, 16208, 16368,
- 16260, 68, 1552, 2120, 660, 16208, 16368,
- 16264, 56, 1524, 2132, 684, 16212, 16364,
- 16268, 40, 1500, 2140, 712, 16212, 16364,
- 16272, 28, 1476, 2152, 736, 16216, 16360,
- 16276, 16, 1448, 2160, 760, 16216, 16356,
- 16280, 4, 1424, 2168, 788, 16220, 16356,
- 16284, 16376, 1396, 2176, 812, 16224, 16352,
- 16288, 16368, 1372, 2184, 840, 16224, 16352,
- 16292, 16356, 1344, 2188, 864, 16228, 16348,
- 16292, 16344, 1320, 2196, 892, 16232, 16344,
- 16296, 16336, 1292, 2200, 916, 16236, 16344,
- 16300, 16324, 1264, 2204, 944, 16240, 16340,
- 16304, 16316, 1240, 2208, 972, 16248, 16336,
- 16308, 16308, 1212, 2212, 996, 16252, 16332,
- 16312, 16300, 1184, 2216, 1024, 16256, 16332,
- 16316, 16292, 1160, 2216, 1052, 16264, 16328,
- 16316, 16284, 1132, 2216, 1076, 16268, 16324,
- 16320, 16276, 1104, 2216, 1104, 16276, 16320 };
+ 0x3FA4, 0x01E8, 0x0674, 0x0674, 0x01E8, 0x3FA4, 0x0000,
+ 0x3FA4, 0x01D4, 0x0668, 0x0684, 0x01F8, 0x3FA4, 0x0000,
+ 0x3FA4, 0x01C4, 0x0658, 0x0690, 0x0208, 0x3FA8, 0x0000,
+ 0x3FA0, 0x01B4, 0x064C, 0x06A0, 0x021C, 0x3FA8, 0x3FFC,
+ 0x3FA0, 0x01A4, 0x063C, 0x06AC, 0x022C, 0x3FAC, 0x3FFC,
+ 0x3FA0, 0x0194, 0x0630, 0x06B4, 0x0240, 0x3FAC, 0x3FFC,
+ 0x3FA0, 0x0184, 0x0620, 0x06C4, 0x0250, 0x3FB0, 0x3FF8,
+ 0x3FA0, 0x0174, 0x0614, 0x06CC, 0x0264, 0x3FB0, 0x3FF8,
+ 0x3FA0, 0x0164, 0x0604, 0x06D8, 0x0278, 0x3FB4, 0x3FF4,
+ 0x3FA0, 0x0154, 0x05F4, 0x06E4, 0x0288, 0x3FB8, 0x3FF4,
+ 0x3FA0, 0x0148, 0x05E4, 0x06EC, 0x029C, 0x3FBC, 0x3FF0,
+ 0x3FA0, 0x0138, 0x05D4, 0x06F4, 0x02B0, 0x3FC0, 0x3FF0,
+ 0x3FA0, 0x0128, 0x05C4, 0x0704, 0x02C4, 0x3FC0, 0x3FEC,
+ 0x3FA0, 0x011C, 0x05B4, 0x0708, 0x02D8, 0x3FC4, 0x3FEC,
+ 0x3FA4, 0x010C, 0x05A4, 0x0714, 0x02E8, 0x3FC8, 0x3FE8,
+ 0x3FA4, 0x0100, 0x0590, 0x0718, 0x02FC, 0x3FD0, 0x3FE8,
+ 0x3FA4, 0x00F0, 0x0580, 0x0724, 0x0310, 0x3FD4, 0x3FE4,
+ 0x3FA4, 0x00E4, 0x056C, 0x072C, 0x0324, 0x3FD8, 0x3FE4,
+ 0x3FA8, 0x00D8, 0x055C, 0x0730, 0x0338, 0x3FDC, 0x3FE0,
+ 0x3FA8, 0x00CC, 0x0548, 0x0738, 0x034C, 0x3FE4, 0x3FDC,
+ 0x3FA8, 0x00BC, 0x0538, 0x0740, 0x0360, 0x3FE8, 0x3FDC,
+ 0x3FAC, 0x00B0, 0x0528, 0x0744, 0x0374, 0x3FEC, 0x3FD8,
+ 0x3FAC, 0x00A4, 0x0514, 0x0748, 0x0388, 0x3FF4, 0x3FD8,
+ 0x3FB0, 0x0098, 0x0500, 0x074C, 0x039C, 0x3FFC, 0x3FD4,
+ 0x3FB0, 0x0090, 0x04EC, 0x0750, 0x03B0, 0x0000, 0x3FD4,
+ 0x3FB0, 0x0084, 0x04DC, 0x0758, 0x03C4, 0x0004, 0x3FD0,
+ 0x3FB4, 0x0078, 0x04CC, 0x0758, 0x03D8, 0x000C, 0x3FCC,
+ 0x3FB4, 0x006C, 0x04B8, 0x075C, 0x03EC, 0x0014, 0x3FCC,
+ 0x3FB8, 0x0064, 0x04A0, 0x0760, 0x0400, 0x001C, 0x3FC8,
+ 0x3FB8, 0x0058, 0x0490, 0x0760, 0x0414, 0x0024, 0x3FC8,
+ 0x3FBC, 0x0050, 0x047C, 0x0760, 0x0428, 0x002C, 0x3FC4,
+ 0x3FBC, 0x0048, 0x0464, 0x0764, 0x043C, 0x0034, 0x3FC4,
+ 0x3FC0, 0x003C, 0x0454, 0x0764, 0x0450, 0x003C, 0x3FC0
+};
+//=========================================
+// <num_taps> = 8
+// <num_phases> = 64
+// <scale_ratio> = 0.83333 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_8tap_64p_upscale[264] = {
- 0, 0, 0, 4096, 0, 0, 0, 0,
- 16376, 20, 16328, 4092, 56, 16364, 4, 0,
- 16372, 36, 16272, 4088, 116, 16340, 12, 0,
- 16364, 56, 16220, 4080, 180, 16320, 20, 0,
- 16360, 76, 16172, 4064, 244, 16296, 24, 16380,
- 16356, 92, 16124, 4048, 312, 16276, 32, 16380,
- 16352, 108, 16080, 4032, 380, 16252, 40, 16380,
- 16344, 124, 16036, 4008, 452, 16228, 48, 16380,
- 16340, 136, 15996, 3980, 524, 16204, 56, 16380,
- 16340, 152, 15956, 3952, 600, 16180, 64, 16376,
- 16336, 164, 15920, 3920, 672, 16156, 76, 16376,
- 16332, 176, 15888, 3884, 752, 16132, 84, 16376,
- 16328, 188, 15860, 3844, 828, 16104, 92, 16372,
- 16328, 200, 15828, 3800, 908, 16080, 100, 16372,
- 16324, 208, 15804, 3756, 992, 16056, 108, 16372,
- 16324, 216, 15780, 3708, 1072, 16032, 120, 16368,
- 16320, 224, 15760, 3656, 1156, 16008, 128, 16368,
- 16320, 232, 15740, 3604, 1240, 15984, 136, 16364,
- 16320, 240, 15724, 3548, 1324, 15960, 144, 16364,
- 16320, 244, 15708, 3488, 1412, 15936, 152, 16360,
- 16320, 248, 15696, 3428, 1496, 15912, 160, 16360,
- 16320, 252, 15688, 3364, 1584, 15892, 172, 16356,
- 16320, 256, 15680, 3296, 1672, 15868, 180, 16352,
- 16320, 256, 15672, 3228, 1756, 15848, 188, 16352,
- 16320, 256, 15668, 3156, 1844, 15828, 192, 16348,
- 16320, 260, 15668, 3084, 1932, 15808, 200, 16348,
- 16320, 256, 15668, 3012, 2020, 15792, 208, 16344,
- 16324, 256, 15668, 2936, 2108, 15772, 216, 16344,
- 16324, 256, 15672, 2856, 2192, 15756, 220, 16340,
- 16324, 252, 15676, 2776, 2280, 15740, 228, 16336,
- 16328, 252, 15684, 2696, 2364, 15728, 232, 16336,
- 16328, 248, 15692, 2616, 2448, 15716, 240, 16332,
- 16332, 244, 15704, 2532, 2532, 15704, 244, 16332 };
+ 0x0000, 0x0000, 0x0000, 0x1000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x3FFC, 0x0014, 0x3FC8, 0x1000, 0x0038, 0x3FEC, 0x0004, 0x0000,
+ 0x3FF4, 0x0024, 0x3F94, 0x0FFC, 0x0074, 0x3FD8, 0x000C, 0x0000,
+ 0x3FF0, 0x0038, 0x3F60, 0x0FEC, 0x00B4, 0x3FC4, 0x0014, 0x0000,
+ 0x3FEC, 0x004C, 0x3F2C, 0x0FE4, 0x00F4, 0x3FAC, 0x0018, 0x0000,
+ 0x3FE4, 0x005C, 0x3F00, 0x0FD4, 0x0138, 0x3F94, 0x0020, 0x0000,
+ 0x3FE0, 0x006C, 0x3ED0, 0x0FC4, 0x017C, 0x3F7C, 0x0028, 0x0000,
+ 0x3FDC, 0x007C, 0x3EA8, 0x0FA4, 0x01C4, 0x3F68, 0x0030, 0x0000,
+ 0x3FD8, 0x0088, 0x3E80, 0x0F90, 0x020C, 0x3F50, 0x0038, 0x3FFC,
+ 0x3FD4, 0x0098, 0x3E58, 0x0F70, 0x0258, 0x3F38, 0x0040, 0x3FFC,
+ 0x3FD0, 0x00A4, 0x3E34, 0x0F54, 0x02A0, 0x3F1C, 0x004C, 0x3FFC,
+ 0x3FD0, 0x00B0, 0x3E14, 0x0F28, 0x02F0, 0x3F04, 0x0054, 0x3FFC,
+ 0x3FCC, 0x00BC, 0x3DF4, 0x0F08, 0x033C, 0x3EEC, 0x005C, 0x3FF8,
+ 0x3FC8, 0x00C8, 0x3DD8, 0x0EDC, 0x038C, 0x3ED4, 0x0064, 0x3FF8,
+ 0x3FC8, 0x00D0, 0x3DC0, 0x0EAC, 0x03E0, 0x3EBC, 0x006C, 0x3FF4,
+ 0x3FC4, 0x00D8, 0x3DA8, 0x0E7C, 0x0430, 0x3EA4, 0x0078, 0x3FF4,
+ 0x3FC4, 0x00E0, 0x3D94, 0x0E48, 0x0484, 0x3E8C, 0x0080, 0x3FF0,
+ 0x3FC4, 0x00E8, 0x3D80, 0x0E10, 0x04D8, 0x3E74, 0x0088, 0x3FF0,
+ 0x3FC4, 0x00F0, 0x3D70, 0x0DD8, 0x052C, 0x3E5C, 0x0090, 0x3FEC,
+ 0x3FC0, 0x00F4, 0x3D60, 0x0DA0, 0x0584, 0x3E44, 0x0098, 0x3FEC,
+ 0x3FC0, 0x00F8, 0x3D54, 0x0D68, 0x05D8, 0x3E2C, 0x00A0, 0x3FE8,
+ 0x3FC0, 0x00FC, 0x3D48, 0x0D20, 0x0630, 0x3E18, 0x00AC, 0x3FE8,
+ 0x3FC0, 0x0100, 0x3D40, 0x0CE0, 0x0688, 0x3E00, 0x00B4, 0x3FE4,
+ 0x3FC4, 0x0100, 0x3D3C, 0x0C98, 0x06DC, 0x3DEC, 0x00BC, 0x3FE4,
+ 0x3FC4, 0x0100, 0x3D38, 0x0C58, 0x0734, 0x3DD8, 0x00C0, 0x3FE0,
+ 0x3FC4, 0x0104, 0x3D38, 0x0C0C, 0x078C, 0x3DC4, 0x00C8, 0x3FDC,
+ 0x3FC4, 0x0100, 0x3D38, 0x0BC4, 0x07E4, 0x3DB0, 0x00D0, 0x3FDC,
+ 0x3FC4, 0x0100, 0x3D38, 0x0B78, 0x083C, 0x3DA0, 0x00D8, 0x3FD8,
+ 0x3FC8, 0x0100, 0x3D3C, 0x0B28, 0x0890, 0x3D90, 0x00DC, 0x3FD8,
+ 0x3FC8, 0x00FC, 0x3D40, 0x0ADC, 0x08E8, 0x3D80, 0x00E4, 0x3FD4,
+ 0x3FCC, 0x00FC, 0x3D48, 0x0A84, 0x093C, 0x3D74, 0x00E8, 0x3FD4,
+ 0x3FCC, 0x00F8, 0x3D50, 0x0A38, 0x0990, 0x3D64, 0x00F0, 0x3FD0,
+ 0x3FD0, 0x00F4, 0x3D58, 0x09E0, 0x09E4, 0x3D5C, 0x00F4, 0x3FD0
+};
-static const uint16_t filter_8tap_64p_117[264] = {
- 116, 16100, 428, 3564, 428, 16100, 116, 0,
- 112, 16116, 376, 3564, 484, 16084, 120, 16380,
- 104, 16136, 324, 3560, 540, 16064, 124, 16380,
- 100, 16152, 272, 3556, 600, 16048, 128, 16380,
- 96, 16168, 220, 3548, 656, 16032, 136, 16376,
- 88, 16188, 172, 3540, 716, 16016, 140, 16376,
- 84, 16204, 124, 3528, 780, 16000, 144, 16376,
- 80, 16220, 76, 3512, 840, 15984, 148, 16372,
- 76, 16236, 32, 3496, 904, 15968, 152, 16372,
- 68, 16252, 16376, 3480, 968, 15952, 156, 16372,
- 64, 16268, 16332, 3456, 1032, 15936, 160, 16372,
- 60, 16284, 16292, 3432, 1096, 15920, 164, 16368,
- 56, 16300, 16252, 3408, 1164, 15908, 164, 16368,
- 48, 16316, 16216, 3380, 1228, 15892, 168, 16368,
- 44, 16332, 16180, 3348, 1296, 15880, 168, 16368,
- 40, 16348, 16148, 3316, 1364, 15868, 172, 16364,
- 36, 16360, 16116, 3284, 1428, 15856, 172, 16364,
- 32, 16376, 16084, 3248, 1496, 15848, 176, 16364,
- 28, 4, 16052, 3208, 1564, 15836, 176, 16364,
- 24, 16, 16028, 3168, 1632, 15828, 176, 16364,
- 20, 28, 16000, 3124, 1700, 15820, 176, 16364,
- 16, 40, 15976, 3080, 1768, 15812, 176, 16364,
- 12, 52, 15952, 3036, 1836, 15808, 176, 16364,
- 8, 64, 15932, 2988, 1904, 15800, 176, 16364,
- 4, 76, 15912, 2940, 1972, 15800, 172, 16364,
- 4, 84, 15892, 2888, 2040, 15796, 172, 16364,
- 0, 96, 15876, 2836, 2104, 15792, 168, 16364,
- 16380, 104, 15864, 2780, 2172, 15792, 164, 16364,
- 16380, 112, 15848, 2724, 2236, 15792, 160, 16364,
- 16376, 120, 15836, 2668, 2300, 15796, 156, 16368,
- 16376, 128, 15828, 2608, 2364, 15800, 152, 16368,
- 16372, 136, 15816, 2548, 2428, 15804, 148, 16368,
- 16372, 140, 15812, 2488, 2488, 15812, 140, 16372 };
+//=========================================
+// <num_taps> = 8
+// <num_phases> = 64
+// <scale_ratio> = 1.16666 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_8tap_64p_116[264] = {
+ 0x0080, 0x3E90, 0x0268, 0x0D14, 0x0264, 0x3E90, 0x0080, 0x0000,
+ 0x007C, 0x3E9C, 0x0238, 0x0D14, 0x0298, 0x3E84, 0x0080, 0x0000,
+ 0x0078, 0x3EAC, 0x0200, 0x0D10, 0x02D0, 0x3E78, 0x0084, 0x0000,
+ 0x0078, 0x3EB8, 0x01D0, 0x0D0C, 0x0304, 0x3E6C, 0x0084, 0x0000,
+ 0x0074, 0x3EC8, 0x01A0, 0x0D00, 0x033C, 0x3E60, 0x0088, 0x0000,
+ 0x0070, 0x3ED4, 0x0170, 0x0D00, 0x0374, 0x3E54, 0x0088, 0x3FFC,
+ 0x006C, 0x3EE4, 0x0140, 0x0CF8, 0x03AC, 0x3E48, 0x0088, 0x3FFC,
+ 0x006C, 0x3EF0, 0x0114, 0x0CE8, 0x03E4, 0x3E3C, 0x008C, 0x3FFC,
+ 0x0068, 0x3F00, 0x00E8, 0x0CD8, 0x041C, 0x3E34, 0x008C, 0x3FFC,
+ 0x0064, 0x3F10, 0x00BC, 0x0CCC, 0x0454, 0x3E28, 0x008C, 0x3FFC,
+ 0x0060, 0x3F1C, 0x0090, 0x0CBC, 0x0490, 0x3E20, 0x008C, 0x3FFC,
+ 0x005C, 0x3F2C, 0x0068, 0x0CA4, 0x04CC, 0x3E18, 0x008C, 0x3FFC,
+ 0x0058, 0x3F38, 0x0040, 0x0C94, 0x0504, 0x3E10, 0x008C, 0x3FFC,
+ 0x0054, 0x3F48, 0x001C, 0x0C7C, 0x0540, 0x3E08, 0x0088, 0x3FFC,
+ 0x0050, 0x3F54, 0x3FF8, 0x0C60, 0x057C, 0x3E04, 0x0088, 0x3FFC,
+ 0x004C, 0x3F64, 0x3FD4, 0x0C44, 0x05B8, 0x3DFC, 0x0088, 0x3FFC,
+ 0x0048, 0x3F70, 0x3FB4, 0x0C28, 0x05F4, 0x3DF8, 0x0084, 0x3FFC,
+ 0x0044, 0x3F80, 0x3F90, 0x0C0C, 0x0630, 0x3DF4, 0x0080, 0x3FFC,
+ 0x0040, 0x3F8C, 0x3F70, 0x0BE8, 0x066C, 0x3DF4, 0x0080, 0x3FFC,
+ 0x003C, 0x3F9C, 0x3F50, 0x0BC8, 0x06A8, 0x3DF0, 0x007C, 0x3FFC,
+ 0x0038, 0x3FA8, 0x3F34, 0x0BA0, 0x06E4, 0x3DF0, 0x0078, 0x0000,
+ 0x0034, 0x3FB4, 0x3F18, 0x0B80, 0x071C, 0x3DF0, 0x0074, 0x0000,
+ 0x0030, 0x3FC0, 0x3EFC, 0x0B5C, 0x0758, 0x3DF0, 0x0070, 0x0000,
+ 0x002C, 0x3FCC, 0x3EE4, 0x0B34, 0x0794, 0x3DF4, 0x0068, 0x0000,
+ 0x002C, 0x3FDC, 0x3ECC, 0x0B08, 0x07CC, 0x3DF4, 0x0064, 0x0000,
+ 0x0028, 0x3FE4, 0x3EB4, 0x0AE0, 0x0808, 0x3DF8, 0x0060, 0x0000,
+ 0x0024, 0x3FF0, 0x3EA0, 0x0AB0, 0x0840, 0x3E00, 0x0058, 0x0004,
+ 0x0020, 0x3FFC, 0x3E90, 0x0A84, 0x0878, 0x3E04, 0x0050, 0x0004,
+ 0x001C, 0x0004, 0x3E7C, 0x0A54, 0x08B0, 0x3E0C, 0x004C, 0x0008,
+ 0x0018, 0x000C, 0x3E68, 0x0A28, 0x08E8, 0x3E18, 0x0044, 0x0008,
+ 0x0018, 0x0018, 0x3E54, 0x09F4, 0x0920, 0x3E20, 0x003C, 0x000C,
+ 0x0014, 0x0020, 0x3E48, 0x09C0, 0x0954, 0x3E2C, 0x0034, 0x0010,
+ 0x0010, 0x002C, 0x3E3C, 0x098C, 0x0988, 0x3E38, 0x002C, 0x0010
+};
-static const uint16_t filter_8tap_64p_150[264] = {
- 16380, 16020, 1032, 2756, 1032, 16020, 16380, 0,
- 0, 16020, 992, 2756, 1068, 16024, 16376, 0,
- 4, 16020, 952, 2752, 1108, 16024, 16372, 0,
- 8, 16020, 916, 2748, 1148, 16028, 16368, 0,
- 12, 16020, 876, 2744, 1184, 16032, 16364, 4,
- 16, 16020, 840, 2740, 1224, 16036, 16356, 4,
- 20, 16024, 800, 2732, 1264, 16040, 16352, 4,
- 20, 16024, 764, 2724, 1304, 16044, 16348, 8,
- 24, 16028, 728, 2716, 1344, 16052, 16340, 8,
- 28, 16028, 692, 2704, 1380, 16056, 16336, 12,
- 28, 16032, 656, 2696, 1420, 16064, 16328, 12,
- 32, 16036, 620, 2684, 1460, 16072, 16324, 12,
- 36, 16040, 584, 2668, 1500, 16080, 16316, 16,
- 36, 16044, 548, 2656, 1536, 16088, 16308, 16,
- 36, 16048, 516, 2640, 1576, 16096, 16304, 20,
- 40, 16052, 480, 2624, 1612, 16108, 16296, 20,
- 40, 16060, 448, 2608, 1652, 16120, 16288, 20,
- 44, 16064, 416, 2588, 1692, 16132, 16280, 24,
- 44, 16068, 384, 2568, 1728, 16144, 16276, 24,
- 44, 16076, 352, 2548, 1764, 16156, 16268, 28,
- 44, 16080, 320, 2528, 1804, 16168, 16260, 28,
- 44, 16088, 292, 2508, 1840, 16184, 16252, 28,
- 44, 16096, 264, 2484, 1876, 16200, 16244, 32,
- 48, 16100, 232, 2460, 1912, 16216, 16236, 32,
- 48, 16108, 204, 2436, 1948, 16232, 16228, 32,
- 48, 16116, 176, 2412, 1980, 16248, 16220, 36,
- 48, 16124, 152, 2384, 2016, 16264, 16216, 36,
- 44, 16128, 124, 2356, 2052, 16284, 16208, 36,
- 44, 16136, 100, 2328, 2084, 16304, 16200, 40,
- 44, 16144, 72, 2300, 2116, 16324, 16192, 40,
- 44, 16152, 48, 2272, 2148, 16344, 16184, 40,
- 44, 16160, 24, 2244, 2180, 16364, 16176, 40,
- 44, 16168, 4, 2212, 2212, 4, 16168, 44 };
+//=========================================
+// <num_taps> = 8
+// <num_phases> = 64
+// <scale_ratio> = 1.49999 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_8tap_64p_149[264] = {
+ 0x0008, 0x3E8C, 0x03F8, 0x0AE8, 0x03F8, 0x3E8C, 0x0008, 0x0000,
+ 0x000C, 0x3E8C, 0x03D0, 0x0AE8, 0x0420, 0x3E90, 0x0000, 0x0000,
+ 0x000C, 0x3E8C, 0x03AC, 0x0AE8, 0x0444, 0x3E90, 0x0000, 0x0000,
+ 0x0010, 0x3E90, 0x0384, 0x0AE0, 0x046C, 0x3E94, 0x3FFC, 0x0000,
+ 0x0014, 0x3E90, 0x035C, 0x0ADC, 0x0494, 0x3E94, 0x3FF8, 0x0004,
+ 0x0018, 0x3E90, 0x0334, 0x0AD8, 0x04BC, 0x3E98, 0x3FF4, 0x0004,
+ 0x001C, 0x3E94, 0x0310, 0x0AD0, 0x04E4, 0x3E9C, 0x3FEC, 0x0004,
+ 0x0020, 0x3E98, 0x02E8, 0x0AC4, 0x050C, 0x3EA0, 0x3FE8, 0x0008,
+ 0x0020, 0x3E98, 0x02C4, 0x0AC0, 0x0534, 0x3EA4, 0x3FE4, 0x0008,
+ 0x0024, 0x3E9C, 0x02A0, 0x0AB4, 0x055C, 0x3EAC, 0x3FDC, 0x0008,
+ 0x0024, 0x3EA0, 0x027C, 0x0AA8, 0x0584, 0x3EB0, 0x3FD8, 0x000C,
+ 0x0028, 0x3EA4, 0x0258, 0x0A9C, 0x05AC, 0x3EB8, 0x3FD0, 0x000C,
+ 0x0028, 0x3EA8, 0x0234, 0x0A90, 0x05D4, 0x3EC0, 0x3FC8, 0x0010,
+ 0x002C, 0x3EAC, 0x0210, 0x0A80, 0x05FC, 0x3EC8, 0x3FC4, 0x0010,
+ 0x002C, 0x3EB4, 0x01F0, 0x0A70, 0x0624, 0x3ED0, 0x3FBC, 0x0010,
+ 0x002C, 0x3EB8, 0x01CC, 0x0A60, 0x064C, 0x3EDC, 0x3FB4, 0x0014,
+ 0x0030, 0x3EBC, 0x01A8, 0x0A50, 0x0674, 0x3EE4, 0x3FB0, 0x0014,
+ 0x0030, 0x3EC4, 0x0188, 0x0A38, 0x069C, 0x3EF0, 0x3FA8, 0x0018,
+ 0x0030, 0x3ECC, 0x0168, 0x0A28, 0x06C0, 0x3EFC, 0x3FA0, 0x0018,
+ 0x0030, 0x3ED0, 0x0148, 0x0A14, 0x06E8, 0x3F08, 0x3F98, 0x001C,
+ 0x0030, 0x3ED8, 0x012C, 0x0A00, 0x070C, 0x3F14, 0x3F90, 0x001C,
+ 0x0034, 0x3EE0, 0x0108, 0x09E4, 0x0734, 0x3F24, 0x3F8C, 0x001C,
+ 0x0034, 0x3EE4, 0x00EC, 0x09CC, 0x0758, 0x3F34, 0x3F84, 0x0020,
+ 0x0034, 0x3EEC, 0x00D0, 0x09B8, 0x077C, 0x3F40, 0x3F7C, 0x0020,
+ 0x0034, 0x3EF4, 0x00B4, 0x0998, 0x07A4, 0x3F50, 0x3F74, 0x0024,
+ 0x0030, 0x3EFC, 0x0098, 0x0980, 0x07C8, 0x3F64, 0x3F6C, 0x0024,
+ 0x0030, 0x3F04, 0x0080, 0x0968, 0x07E8, 0x3F74, 0x3F64, 0x0024,
+ 0x0030, 0x3F0C, 0x0060, 0x094C, 0x080C, 0x3F88, 0x3F5C, 0x0028,
+ 0x0030, 0x3F14, 0x0048, 0x0930, 0x0830, 0x3F98, 0x3F54, 0x0028,
+ 0x0030, 0x3F1C, 0x0030, 0x0914, 0x0850, 0x3FAC, 0x3F4C, 0x0028,
+ 0x0030, 0x3F24, 0x0018, 0x08F0, 0x0874, 0x3FC0, 0x3F44, 0x002C,
+ 0x002C, 0x3F2C, 0x0000, 0x08D4, 0x0894, 0x3FD8, 0x3F3C, 0x002C,
+ 0x002C, 0x3F34, 0x3FEC, 0x08B4, 0x08B4, 0x3FEC, 0x3F34, 0x002C
+};
+//=========================================
+// <num_taps> = 8
+// <num_phases> = 64
+// <scale_ratio> = 1.83332 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_8tap_64p_183[264] = {
- 16264, 16264, 1164, 2244, 1164, 16264, 16264, 0,
- 16268, 16256, 1136, 2240, 1188, 16272, 16260, 0,
- 16272, 16248, 1108, 2240, 1216, 16280, 16256, 0,
- 16276, 16240, 1080, 2236, 1240, 16292, 16252, 0,
- 16280, 16232, 1056, 2236, 1268, 16300, 16248, 0,
- 16284, 16224, 1028, 2232, 1292, 16312, 16244, 0,
- 16288, 16216, 1000, 2228, 1320, 16324, 16240, 0,
- 16292, 16212, 976, 2224, 1344, 16336, 16236, 0,
- 16296, 16204, 948, 2220, 1372, 16348, 16232, 0,
- 16300, 16200, 920, 2212, 1396, 16360, 16228, 4,
- 16304, 16196, 896, 2204, 1424, 16372, 16224, 4,
- 16308, 16188, 868, 2200, 1448, 0, 16220, 4,
- 16312, 16184, 844, 2192, 1472, 12, 16216, 4,
- 16316, 16180, 816, 2184, 1500, 28, 16212, 4,
- 16320, 16176, 792, 2172, 1524, 40, 16208, 4,
- 16324, 16172, 764, 2164, 1548, 56, 16204, 0,
- 16328, 16172, 740, 2156, 1572, 72, 16200, 0,
- 16328, 16168, 712, 2144, 1596, 88, 16196, 0,
- 16332, 16164, 688, 2132, 1620, 100, 16192, 0,
- 16336, 16164, 664, 2120, 1644, 120, 16192, 0,
- 16340, 16160, 640, 2108, 1668, 136, 16188, 0,
- 16344, 16160, 616, 2096, 1688, 152, 16184, 0,
- 16344, 16160, 592, 2080, 1712, 168, 16180, 0,
- 16348, 16156, 568, 2068, 1736, 188, 16176, 16380,
- 16352, 16156, 544, 2052, 1756, 204, 16176, 16380,
- 16352, 16156, 520, 2036, 1780, 224, 16172, 16380,
- 16356, 16156, 496, 2024, 1800, 244, 16172, 16380,
- 16360, 16156, 472, 2008, 1820, 260, 16168, 16376,
- 16360, 16156, 452, 1988, 1840, 280, 16164, 16376,
- 16364, 16156, 428, 1972, 1860, 300, 16164, 16376,
- 16364, 16156, 408, 1956, 1880, 320, 16164, 16372,
- 16368, 16160, 384, 1936, 1900, 344, 16160, 16372,
- 16368, 16160, 364, 1920, 1920, 364, 16160, 16368 };
+ 0x3F88, 0x0048, 0x047C, 0x0768, 0x047C, 0x0048, 0x3F88, 0x0000,
+ 0x3F88, 0x003C, 0x0468, 0x076C, 0x0490, 0x0054, 0x3F84, 0x0000,
+ 0x3F8C, 0x0034, 0x0454, 0x0768, 0x04A4, 0x005C, 0x3F84, 0x0000,
+ 0x3F8C, 0x0028, 0x0444, 0x076C, 0x04B4, 0x0068, 0x3F80, 0x0000,
+ 0x3F90, 0x0020, 0x042C, 0x0768, 0x04C8, 0x0074, 0x3F80, 0x0000,
+ 0x3F90, 0x0018, 0x041C, 0x0764, 0x04DC, 0x0080, 0x3F7C, 0x0000,
+ 0x3F94, 0x0010, 0x0408, 0x075C, 0x04F0, 0x008C, 0x3F7C, 0x0000,
+ 0x3F94, 0x0004, 0x03F8, 0x0760, 0x0500, 0x0098, 0x3F7C, 0x3FFC,
+ 0x3F98, 0x0000, 0x03E0, 0x075C, 0x0514, 0x00A4, 0x3F78, 0x3FFC,
+ 0x3F9C, 0x3FF8, 0x03CC, 0x0754, 0x0528, 0x00B0, 0x3F78, 0x3FFC,
+ 0x3F9C, 0x3FF0, 0x03B8, 0x0754, 0x0538, 0x00BC, 0x3F78, 0x3FFC,
+ 0x3FA0, 0x3FE8, 0x03A4, 0x0750, 0x054C, 0x00CC, 0x3F74, 0x3FF8,
+ 0x3FA4, 0x3FE0, 0x0390, 0x074C, 0x055C, 0x00D8, 0x3F74, 0x3FF8,
+ 0x3FA4, 0x3FDC, 0x037C, 0x0744, 0x0570, 0x00E4, 0x3F74, 0x3FF8,
+ 0x3FA8, 0x3FD4, 0x0368, 0x0740, 0x0580, 0x00F4, 0x3F74, 0x3FF4,
+ 0x3FA8, 0x3FCC, 0x0354, 0x073C, 0x0590, 0x0104, 0x3F74, 0x3FF4,
+ 0x3FAC, 0x3FC8, 0x0340, 0x0730, 0x05A4, 0x0110, 0x3F74, 0x3FF4,
+ 0x3FB0, 0x3FC0, 0x0330, 0x0728, 0x05B4, 0x0120, 0x3F74, 0x3FF0,
+ 0x3FB0, 0x3FBC, 0x031C, 0x0724, 0x05C4, 0x0130, 0x3F70, 0x3FF0,
+ 0x3FB4, 0x3FB4, 0x0308, 0x0720, 0x05D4, 0x013C, 0x3F70, 0x3FF0,
+ 0x3FB8, 0x3FB0, 0x02F4, 0x0714, 0x05E4, 0x014C, 0x3F74, 0x3FEC,
+ 0x3FB8, 0x3FAC, 0x02E0, 0x0708, 0x05F8, 0x015C, 0x3F74, 0x3FEC,
+ 0x3FBC, 0x3FA8, 0x02CC, 0x0704, 0x0604, 0x016C, 0x3F74, 0x3FE8,
+ 0x3FC0, 0x3FA0, 0x02BC, 0x06F8, 0x0614, 0x017C, 0x3F74, 0x3FE8,
+ 0x3FC0, 0x3F9C, 0x02A8, 0x06F4, 0x0624, 0x018C, 0x3F74, 0x3FE4,
+ 0x3FC4, 0x3F98, 0x0294, 0x06E8, 0x0634, 0x019C, 0x3F74, 0x3FE4,
+ 0x3FC8, 0x3F94, 0x0284, 0x06D8, 0x0644, 0x01AC, 0x3F78, 0x3FE0,
+ 0x3FC8, 0x3F90, 0x0270, 0x06D4, 0x0650, 0x01BC, 0x3F78, 0x3FE0,
+ 0x3FCC, 0x3F8C, 0x025C, 0x06C8, 0x0660, 0x01D0, 0x3F78, 0x3FDC,
+ 0x3FCC, 0x3F8C, 0x024C, 0x06B8, 0x066C, 0x01E0, 0x3F7C, 0x3FDC,
+ 0x3FD0, 0x3F88, 0x0238, 0x06B0, 0x067C, 0x01F0, 0x3F7C, 0x3FD8,
+ 0x3FD4, 0x3F84, 0x0228, 0x069C, 0x0688, 0x0204, 0x3F80, 0x3FD8,
+ 0x3FD4, 0x3F84, 0x0214, 0x0694, 0x0694, 0x0214, 0x3F84, 0x3FD4
+};
const uint16_t *get_filter_3tap_16p(struct fixed31_32 ratio)
{
if (ratio.value < dc_fixpt_one.value)
return filter_3tap_16p_upscale;
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
- return filter_3tap_16p_117;
+ return filter_3tap_16p_116;
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
- return filter_3tap_16p_150;
+ return filter_3tap_16p_149;
else
return filter_3tap_16p_183;
}
@@ -1029,9 +1355,9 @@ const uint16_t *get_filter_3tap_64p(struct fixed31_32 ratio)
if (ratio.value < dc_fixpt_one.value)
return filter_3tap_64p_upscale;
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
- return filter_3tap_64p_117;
+ return filter_3tap_64p_116;
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
- return filter_3tap_64p_150;
+ return filter_3tap_64p_149;
else
return filter_3tap_64p_183;
}
@@ -1041,9 +1367,9 @@ const uint16_t *get_filter_4tap_16p(struct fixed31_32 ratio)
if (ratio.value < dc_fixpt_one.value)
return filter_4tap_16p_upscale;
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
- return filter_4tap_16p_117;
+ return filter_4tap_16p_116;
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
- return filter_4tap_16p_150;
+ return filter_4tap_16p_149;
else
return filter_4tap_16p_183;
}
@@ -1053,9 +1379,9 @@ const uint16_t *get_filter_4tap_64p(struct fixed31_32 ratio)
if (ratio.value < dc_fixpt_one.value)
return filter_4tap_64p_upscale;
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
- return filter_4tap_64p_117;
+ return filter_4tap_64p_116;
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
- return filter_4tap_64p_150;
+ return filter_4tap_64p_149;
else
return filter_4tap_64p_183;
}
@@ -1065,9 +1391,9 @@ const uint16_t *get_filter_5tap_64p(struct fixed31_32 ratio)
if (ratio.value < dc_fixpt_one.value)
return filter_5tap_64p_upscale;
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
- return filter_5tap_64p_117;
+ return filter_5tap_64p_116;
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
- return filter_5tap_64p_150;
+ return filter_5tap_64p_149;
else
return filter_5tap_64p_183;
}
@@ -1077,9 +1403,9 @@ const uint16_t *get_filter_6tap_64p(struct fixed31_32 ratio)
if (ratio.value < dc_fixpt_one.value)
return filter_6tap_64p_upscale;
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
- return filter_6tap_64p_117;
+ return filter_6tap_64p_116;
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
- return filter_6tap_64p_150;
+ return filter_6tap_64p_149;
else
return filter_6tap_64p_183;
}
@@ -1089,9 +1415,9 @@ const uint16_t *get_filter_7tap_64p(struct fixed31_32 ratio)
if (ratio.value < dc_fixpt_one.value)
return filter_7tap_64p_upscale;
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
- return filter_7tap_64p_117;
+ return filter_7tap_64p_116;
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
- return filter_7tap_64p_150;
+ return filter_7tap_64p_149;
else
return filter_7tap_64p_183;
}
@@ -1101,9 +1427,9 @@ const uint16_t *get_filter_8tap_64p(struct fixed31_32 ratio)
if (ratio.value < dc_fixpt_one.value)
return filter_8tap_64p_upscale;
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
- return filter_8tap_64p_117;
+ return filter_8tap_64p_116;
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
- return filter_8tap_64p_150;
+ return filter_8tap_64p_149;
else
return filter_8tap_64p_183;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters_old.c b/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters_old.c
new file mode 100644
index 000000000000..bb0e1b80ec3c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters_old.c
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index 225955ec6d39..bc109d4fc6e6 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -27,25 +27,55 @@
#include "dc.h"
#include "dc_dmub_srv.h"
#include "../../dmub/inc/dmub_srv.h"
-#include "dmub_fw_state.h"
+#include "../../dmub/inc/dmub_gpint_cmd.h"
#include "core_types.h"
-#include "ipp.h"
#define MAX_PIPES 6
/**
* Get PSR state from firmware.
*/
-static void dmub_get_psr_state(uint32_t *psr_state)
+static void dmub_psr_get_state(struct dmub_psr *dmub, uint32_t *psr_state)
{
- // Not yet implemented
- // Trigger GPINT interrupt from firmware
+ struct dmub_srv *srv = dmub->ctx->dmub_srv->dmub;
+
+ // Send gpint command and wait for ack
+ dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_PSR_STATE, 0, 30);
+
+ dmub_srv_get_gpint_response(srv, psr_state);
+}
+
+/**
+ * Set PSR version.
+ */
+static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state *stream)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = dmub->ctx;
+
+ cmd.psr_set_version.header.type = DMUB_CMD__PSR;
+ cmd.psr_set_version.header.sub_type = DMUB_CMD__PSR_SET_VERSION;
+
+ if (stream->psr_version == 0x0) // Unsupported
+ return false;
+ else if (stream->psr_version == 0x1)
+ cmd.psr_set_version.psr_set_version_data.version = PSR_VERSION_1;
+ else if (stream->psr_version == 0x2)
+ cmd.psr_set_version.psr_set_version_data.version = PSR_VERSION_2;
+
+ cmd.psr_enable.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_version_data);
+
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_enable.header);
+ dc_dmub_srv_cmd_execute(dc->dmub_srv);
+ dc_dmub_srv_wait_idle(dc->dmub_srv);
+
+ return true;
}
/**
* Enable/Disable PSR.
*/
-static void dmub_set_psr_enable(struct dmub_psr *dmub, bool enable)
+static void dmub_psr_enable(struct dmub_psr *dmub, bool enable)
{
union dmub_rb_cmd cmd;
struct dc_context *dc = dmub->ctx;
@@ -67,13 +97,13 @@ static void dmub_set_psr_enable(struct dmub_psr *dmub, bool enable)
/**
* Set PSR level.
*/
-static void dmub_set_psr_level(struct dmub_psr *dmub, uint16_t psr_level)
+static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level)
{
union dmub_rb_cmd cmd;
uint32_t psr_state = 0;
struct dc_context *dc = dmub->ctx;
- dmub_get_psr_state(&psr_state);
+ dmub_psr_get_state(dmub, &psr_state);
if (psr_state == 0)
return;
@@ -91,7 +121,7 @@ static void dmub_set_psr_level(struct dmub_psr *dmub, uint16_t psr_level)
/**
* Setup PSR by programming phy registers and sending psr hw context values to firmware.
*/
-static bool dmub_setup_psr(struct dmub_psr *dmub,
+static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
struct dc_link *link,
struct psr_context *psr_context)
{
@@ -101,21 +131,22 @@ static bool dmub_setup_psr(struct dmub_psr *dmub,
= &cmd.psr_copy_settings.psr_copy_settings_data;
struct pipe_ctx *pipe_ctx = NULL;
struct resource_context *res_ctx = &link->ctx->dc->current_state->res_ctx;
+ int i = 0;
- for (int i = 0; i < MAX_PIPES; i++) {
- if (res_ctx &&
- res_ctx->pipe_ctx[i].stream &&
- res_ctx->pipe_ctx[i].stream->link &&
- res_ctx->pipe_ctx[i].stream->link == link &&
- res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) {
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (res_ctx->pipe_ctx[i].stream &&
+ res_ctx->pipe_ctx[i].stream->link == link &&
+ res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) {
pipe_ctx = &res_ctx->pipe_ctx[i];
break;
}
}
- if (!pipe_ctx ||
- !&pipe_ctx->plane_res ||
- !&pipe_ctx->stream_res)
+ if (!pipe_ctx)
+ return false;
+
+ // First, set the psr version
+ if (!dmub_psr_set_version(dmub, pipe_ctx->stream))
return false;
// Program DP DPHY fast training registers
@@ -138,10 +169,6 @@ static bool dmub_setup_psr(struct dmub_psr *dmub,
copy_settings_data->mpcc_inst = pipe_ctx->plane_res.mpcc_inst;
- if (pipe_ctx->plane_res.hubp)
- copy_settings_data->hubp_inst = pipe_ctx->plane_res.hubp->inst;
- else
- copy_settings_data->hubp_inst = 0;
if (pipe_ctx->plane_res.dpp)
copy_settings_data->dpp_inst = pipe_ctx->plane_res.dpp->inst;
else
@@ -157,18 +184,9 @@ static bool dmub_setup_psr(struct dmub_psr *dmub,
// Misc
copy_settings_data->psr_level = psr_context->psr_level.u32all;
- copy_settings_data->hyst_frames = psr_context->timehyst_frames;
- copy_settings_data->hyst_lines = psr_context->hyst_lines;
- copy_settings_data->phy_type = psr_context->phyType;
- copy_settings_data->aux_repeat = psr_context->aux_repeats;
- copy_settings_data->smu_optimizations_en = psr_context->allow_smu_optimizations;
- copy_settings_data->skip_wait_for_pll_lock = psr_context->skipPsrWaitForPllLock;
+ copy_settings_data->smu_optimizations_en = psr_context->allow_smu_optimizations;
copy_settings_data->frame_delay = psr_context->frame_delay;
- copy_settings_data->smu_phy_id = psr_context->smuPhyId;
- copy_settings_data->num_of_controllers = psr_context->numberOfControllers;
copy_settings_data->frame_cap_ind = psr_context->psrFrameCaptureIndicationReq;
- copy_settings_data->phy_num = psr_context->frame_delay & 0x7;
- copy_settings_data->link_rate = psr_context->frame_delay & 0xF;
dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_copy_settings.header);
dc_dmub_srv_cmd_execute(dc->dmub_srv);
@@ -178,10 +196,10 @@ static bool dmub_setup_psr(struct dmub_psr *dmub,
}
static const struct dmub_psr_funcs psr_funcs = {
- .set_psr_enable = dmub_set_psr_enable,
- .setup_psr = dmub_setup_psr,
- .get_psr_state = dmub_get_psr_state,
- .set_psr_level = dmub_set_psr_level,
+ .psr_copy_settings = dmub_psr_copy_settings,
+ .psr_enable = dmub_psr_enable,
+ .psr_get_state = dmub_psr_get_state,
+ .psr_set_level = dmub_psr_set_level,
};
/**
@@ -215,6 +233,6 @@ struct dmub_psr *dmub_psr_create(struct dc_context *ctx)
*/
void dmub_psr_destroy(struct dmub_psr **dmub)
{
- kfree(dmub);
+ kfree(*dmub);
*dmub = NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
index 229958de3035..f404fecd6410 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
@@ -27,6 +27,7 @@
#define _DMUB_PSR_H_
#include "os_types.h"
+#include "dc_link.h"
struct dmub_psr {
struct dc_context *ctx;
@@ -34,14 +35,14 @@ struct dmub_psr {
};
struct dmub_psr_funcs {
- void (*set_psr_enable)(struct dmub_psr *dmub, bool enable);
- bool (*setup_psr)(struct dmub_psr *dmub, struct dc_link *link, struct psr_context *psr_context);
- void (*get_psr_state)(uint32_t *psr_state);
- void (*set_psr_level)(struct dmub_psr *dmub, uint16_t psr_level);
+ bool (*psr_copy_settings)(struct dmub_psr *dmub, struct dc_link *link, struct psr_context *psr_context);
+ void (*psr_enable)(struct dmub_psr *dmub, bool enable);
+ void (*psr_get_state)(struct dmub_psr *dmub, uint32_t *psr_state);
+ void (*psr_set_level)(struct dmub_psr *dmub, uint16_t psr_level);
};
struct dmub_psr *dmub_psr_create(struct dc_context *ctx);
void dmub_psr_destroy(struct dmub_psr **dmub);
-#endif /* _DCE_DMUB_H_ */
+#endif /* _DMUB_PSR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 5b689273ff44..0976e378659f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -71,6 +71,8 @@
#define PANEL_POWER_UP_TIMEOUT 300
#define PANEL_POWER_DOWN_TIMEOUT 500
#define HPD_CHECK_INTERVAL 10
+#define OLED_POST_T7_DELAY 100
+#define OLED_PRE_T11_DELAY 150
#define CTX \
hws->ctx
@@ -696,8 +698,10 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
}
/*todo: cloned in stream enc, fix*/
-static bool is_panel_backlight_on(struct dce_hwseq *hws)
+bool dce110_is_panel_backlight_on(struct dc_link *link)
{
+ struct dc_context *ctx = link->ctx;
+ struct dce_hwseq *hws = ctx->dc->hwseq;
uint32_t value;
REG_GET(LVTMA_PWRSEQ_CNTL, LVTMA_BLON, &value);
@@ -705,11 +709,12 @@ static bool is_panel_backlight_on(struct dce_hwseq *hws)
return value;
}
-static bool is_panel_powered_on(struct dce_hwseq *hws)
+bool dce110_is_panel_powered_on(struct dc_link *link)
{
+ struct dc_context *ctx = link->ctx;
+ struct dce_hwseq *hws = ctx->dc->hwseq;
uint32_t pwr_seq_state, dig_on, dig_on_ovrd;
-
REG_GET(LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, &pwr_seq_state);
REG_GET_2(LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, &dig_on, LVTMA_DIGON_OVRD, &dig_on_ovrd);
@@ -816,7 +821,7 @@ void dce110_edp_power_control(
return;
}
- if (power_up != is_panel_powered_on(hwseq)) {
+ if (power_up != hwseq->funcs.is_panel_powered_on(link)) {
/* Send VBIOS command to prompt eDP panel power */
if (power_up) {
unsigned long long current_ts = dm_get_timestamp(ctx);
@@ -896,7 +901,7 @@ void dce110_edp_backlight_control(
return;
}
- if (enable && is_panel_backlight_on(hws)) {
+ if (enable && hws->funcs.is_panel_backlight_on(link)) {
DC_LOG_HW_RESUME_S3(
"%s: panel already powered up. Do nothing.\n",
__func__);
@@ -936,9 +941,21 @@ void dce110_edp_backlight_control(
if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON)
edp_receiver_ready_T7(link);
link_transmitter_control(ctx->dc_bios, &cntl);
+
+ if (enable && link->dpcd_sink_ext_caps.bits.oled)
+ msleep(OLED_POST_T7_DELAY);
+
+ if (link->dpcd_sink_ext_caps.bits.oled ||
+ link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1 ||
+ link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1)
+ dc_link_backlight_enable_aux(link, enable);
+
/*edp 1.2*/
if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_OFF)
edp_receiver_ready_T9(link);
+
+ if (!enable && link->dpcd_sink_ext_caps.bits.oled)
+ msleep(OLED_PRE_T11_DELAY);
}
void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
@@ -2576,17 +2593,6 @@ static void dce110_apply_ctx_for_surface(
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
-
- if (stream == pipe_ctx->stream) {
- if (!pipe_ctx->top_pipe &&
- (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
- dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
- }
- }
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream != stream)
continue;
@@ -2607,20 +2613,16 @@ static void dce110_apply_ctx_for_surface(
}
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
-
- if ((stream == pipe_ctx->stream) &&
- (!pipe_ctx->top_pipe) &&
- (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
- dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
- }
-
if (dc->fbc_compressor)
enable_fbc(dc, context);
}
+static void dce110_post_unlock_program_front_end(
+ struct dc *dc,
+ struct dc_state *context)
+{
+}
+
static void dce110_power_down_fe(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct dce_hwseq *hws = dc->hwseq;
@@ -2722,6 +2724,7 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.init_hw = init_hw,
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = dce110_apply_ctx_for_surface,
+ .post_unlock_program_front_end = dce110_post_unlock_program_front_end,
.update_plane_addr = update_plane_addr,
.update_pending_status = dce110_update_pending_status,
.enable_accelerated_mode = dce110_enable_accelerated_mode,
@@ -2736,6 +2739,7 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.disable_audio_stream = dce110_disable_audio_stream,
.disable_plane = dce110_power_down_fe,
.pipe_control_lock = dce_pipe_control_lock,
+ .interdependent_update_lock = NULL,
.prepare_bandwidth = dce110_prepare_bandwidth,
.optimize_bandwidth = dce110_optimize_bandwidth,
.set_drr = set_drr,
@@ -2763,6 +2767,8 @@ static const struct hwseq_private_funcs dce110_private_funcs = {
.disable_stream_gating = NULL,
.enable_stream_gating = NULL,
.edp_backlight_control = dce110_edp_backlight_control,
+ .is_panel_backlight_on = dce110_is_panel_backlight_on,
+ .is_panel_powered_on = dce110_is_panel_powered_on,
};
void dce110_hw_sequencer_construct(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
index 26a9c14a58b1..34be166e8ff0 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
@@ -85,5 +85,9 @@ void dce110_edp_wait_for_hpd_ready(
struct dc_link *link,
bool power_up);
+bool dce110_is_panel_backlight_on(struct dc_link *link);
+
+bool dce110_is_panel_powered_on(struct dc_link *link);
+
#endif /* __DC_HWSS_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
index bbd6e01b3eca..47a39eb9400b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
@@ -316,6 +316,7 @@ bool cm_helper_translate_curve_to_hw_format(
struct pwl_result_data *rgb_resulted;
struct pwl_result_data *rgb;
struct pwl_result_data *rgb_plus_1;
+ struct pwl_result_data *rgb_minus_1;
int32_t region_start, region_end;
int32_t i;
@@ -465,9 +466,20 @@ bool cm_helper_translate_curve_to_hw_format(
rgb = rgb_resulted;
rgb_plus_1 = rgb_resulted + 1;
+ rgb_minus_1 = rgb;
i = 1;
while (i != hw_points + 1) {
+
+ if (i >= hw_points - 1) {
+ if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
+ rgb_plus_1->red = dc_fixpt_add(rgb->red, rgb_minus_1->delta_red);
+ if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
+ rgb_plus_1->green = dc_fixpt_add(rgb->green, rgb_minus_1->delta_green);
+ if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
+ rgb_plus_1->blue = dc_fixpt_add(rgb->blue, rgb_minus_1->delta_blue);
+ }
+
rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red);
rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue);
@@ -482,6 +494,7 @@ bool cm_helper_translate_curve_to_hw_format(
}
++rgb_plus_1;
+ rgb_minus_1 = rgb;
++rgb;
++i;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
index f36a0d8cedfe..deccab0228d2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
@@ -128,8 +128,8 @@ bool hubbub1_verify_allow_pstate_change_high(
* pstate takes around ~100us on linux. Unknown currently as to
* why it takes that long on linux
*/
- static unsigned int pstate_wait_timeout_us = 200;
- static unsigned int pstate_wait_expected_timeout_us = 40;
+ const unsigned int pstate_wait_timeout_us = 200;
+ const unsigned int pstate_wait_expected_timeout_us = 40;
static unsigned int max_sampled_pstate_wait_us; /* data collection */
static bool forced_pstate_allow; /* help with revert wa */
@@ -147,8 +147,9 @@ bool hubbub1_verify_allow_pstate_change_high(
forced_pstate_allow = false;
}
- /* RV2:
- * dchubbubdebugind, at: 0xB
+ /* The following table only applies to DCN1 and DCN2,
+ * for newer DCNs, need to consult with HW IP folks to read RTL
+ * HUBBUB:DCHUBBUB_TEST_ARB_DEBUG10 DCHUBBUBDEBUGIND:0xB
* description
* 0: Pipe0 Plane0 Allow Pstate Change
* 1: Pipe0 Plane1 Allow Pstate Change
@@ -181,64 +182,6 @@ bool hubbub1_verify_allow_pstate_change_high(
* 28: WB0 Allow Pstate Change
* 29: WB1 Allow Pstate Change
* 30: Arbiter's allow_pstate_change
- * 31: SOC pstate change request"
- */
- /*DCN2.x:
- HUBBUB:DCHUBBUB_TEST_ARB_DEBUG10 DCHUBBUBDEBUGIND:0xB
- 0: Pipe0 Plane0 Allow P-state Change
- 1: Pipe0 Plane1 Allow P-state Change
- 2: Pipe0 Cursor0 Allow P-state Change
- 3: Pipe0 Cursor1 Allow P-state Change
- 4: Pipe1 Plane0 Allow P-state Change
- 5: Pipe1 Plane1 Allow P-state Change
- 6: Pipe1 Cursor0 Allow P-state Change
- 7: Pipe1 Cursor1 Allow P-state Change
- 8: Pipe2 Plane0 Allow P-state Change
- 9: Pipe2 Plane1 Allow P-state Change
- 10: Pipe2 Cursor0 Allow P-state Change
- 11: Pipe2 Cursor1 Allow P-state Change
- 12: Pipe3 Plane0 Allow P-state Change
- 13: Pipe3 Plane1 Allow P-state Change
- 14: Pipe3 Cursor0 Allow P-state Change
- 15: Pipe3 Cursor1 Allow P-state Change
- 16: Pipe4 Plane0 Allow P-state Change
- 17: Pipe4 Plane1 Allow P-state Change
- 18: Pipe4 Cursor0 Allow P-state Change
- 19: Pipe4 Cursor1 Allow P-state Change
- 20: Pipe5 Plane0 Allow P-state Change
- 21: Pipe5 Plane1 Allow P-state Change
- 22: Pipe5 Cursor0 Allow P-state Change
- 23: Pipe5 Cursor1 Allow P-state Change
- 24: Pipe6 Plane0 Allow P-state Change
- 25: Pipe6 Plane1 Allow P-state Change
- 26: Pipe6 Cursor0 Allow P-state Change
- 27: Pipe6 Cursor1 Allow P-state Change
- 28: WB0 Allow P-state Change
- 29: WB1 Allow P-state Change
- 30: Arbiter`s Allow P-state Change
- 31: SOC P-state Change request
- */
- /* RV1:
- * dchubbubdebugind, at: 0x7
- * description "3-0: Pipe0 cursor0 QOS
- * 7-4: Pipe1 cursor0 QOS
- * 11-8: Pipe2 cursor0 QOS
- * 15-12: Pipe3 cursor0 QOS
- * 16: Pipe0 Plane0 Allow Pstate Change
- * 17: Pipe1 Plane0 Allow Pstate Change
- * 18: Pipe2 Plane0 Allow Pstate Change
- * 19: Pipe3 Plane0 Allow Pstate Change
- * 20: Pipe0 Plane1 Allow Pstate Change
- * 21: Pipe1 Plane1 Allow Pstate Change
- * 22: Pipe2 Plane1 Allow Pstate Change
- * 23: Pipe3 Plane1 Allow Pstate Change
- * 24: Pipe0 cursor0 Allow Pstate Change
- * 25: Pipe1 cursor0 Allow Pstate Change
- * 26: Pipe2 cursor0 Allow Pstate Change
- * 27: Pipe3 cursor0 Allow Pstate Change
- * 28: WB0 Allow Pstate Change
- * 29: WB1 Allow Pstate Change
- * 30: Arbiter's allow_pstate_change
* 31: SOC pstate change request
*/
@@ -300,7 +243,7 @@ void hubbub1_wm_change_req_wa(struct hubbub *hubbub)
DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
}
-void hubbub1_program_urgent_watermarks(
+bool hubbub1_program_urgent_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -308,6 +251,7 @@ void hubbub1_program_urgent_watermarks(
{
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
uint32_t prog_wm_value;
+ bool wm_pending = false;
/* Repeat for water mark set A, B, C and D. */
/* clock state A */
@@ -321,7 +265,8 @@ void hubbub1_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->a.urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->a.urgent_ns < hubbub1->watermarks.a.urgent_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->a.pte_meta_urgent_ns > hubbub1->watermarks.a.pte_meta_urgent_ns) {
hubbub1->watermarks.a.pte_meta_urgent_ns = watermarks->a.pte_meta_urgent_ns;
@@ -331,7 +276,8 @@ void hubbub1_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->a.pte_meta_urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->a.pte_meta_urgent_ns < hubbub1->watermarks.a.pte_meta_urgent_ns)
+ wm_pending = true;
/* clock state B */
if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) {
@@ -344,7 +290,8 @@ void hubbub1_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->b.urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->b.urgent_ns < hubbub1->watermarks.b.urgent_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->b.pte_meta_urgent_ns > hubbub1->watermarks.b.pte_meta_urgent_ns) {
hubbub1->watermarks.b.pte_meta_urgent_ns = watermarks->b.pte_meta_urgent_ns;
@@ -354,7 +301,8 @@ void hubbub1_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->b.pte_meta_urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->b.pte_meta_urgent_ns < hubbub1->watermarks.b.pte_meta_urgent_ns)
+ wm_pending = true;
/* clock state C */
if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) {
@@ -367,7 +315,8 @@ void hubbub1_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->c.urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->c.urgent_ns < hubbub1->watermarks.c.urgent_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->c.pte_meta_urgent_ns > hubbub1->watermarks.c.pte_meta_urgent_ns) {
hubbub1->watermarks.c.pte_meta_urgent_ns = watermarks->c.pte_meta_urgent_ns;
@@ -377,7 +326,8 @@ void hubbub1_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->c.pte_meta_urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->c.pte_meta_urgent_ns < hubbub1->watermarks.c.pte_meta_urgent_ns)
+ wm_pending = true;
/* clock state D */
if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) {
@@ -390,7 +340,8 @@ void hubbub1_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->d.urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->d.urgent_ns < hubbub1->watermarks.d.urgent_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->d.pte_meta_urgent_ns > hubbub1->watermarks.d.pte_meta_urgent_ns) {
hubbub1->watermarks.d.pte_meta_urgent_ns = watermarks->d.pte_meta_urgent_ns;
@@ -400,10 +351,13 @@ void hubbub1_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->d.pte_meta_urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->d.pte_meta_urgent_ns < hubbub1->watermarks.d.pte_meta_urgent_ns)
+ wm_pending = true;
+
+ return wm_pending;
}
-void hubbub1_program_stutter_watermarks(
+bool hubbub1_program_stutter_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -411,6 +365,7 @@ void hubbub1_program_stutter_watermarks(
{
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
uint32_t prog_wm_value;
+ bool wm_pending = false;
/* clock state A */
if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
@@ -425,7 +380,9 @@ void hubbub1_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
+ < hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
> hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) {
@@ -439,7 +396,9 @@ void hubbub1_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->a.cstate_pstate.cstate_exit_ns
+ < hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns)
+ wm_pending = true;
/* clock state B */
if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
@@ -454,7 +413,9 @@ void hubbub1_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
+ < hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
> hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) {
@@ -468,7 +429,9 @@ void hubbub1_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->b.cstate_pstate.cstate_exit_ns
+ < hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns)
+ wm_pending = true;
/* clock state C */
if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
@@ -483,7 +446,9 @@ void hubbub1_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
+ < hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
> hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) {
@@ -497,7 +462,9 @@ void hubbub1_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->c.cstate_pstate.cstate_exit_ns
+ < hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns)
+ wm_pending = true;
/* clock state D */
if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
@@ -512,7 +479,9 @@ void hubbub1_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
+ < hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
> hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) {
@@ -526,11 +495,14 @@ void hubbub1_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->d.cstate_pstate.cstate_exit_ns
+ < hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns)
+ wm_pending = true;
+ return wm_pending;
}
-void hubbub1_program_pstate_watermarks(
+bool hubbub1_program_pstate_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -538,6 +510,7 @@ void hubbub1_program_pstate_watermarks(
{
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
uint32_t prog_wm_value;
+ bool wm_pending = false;
/* clock state A */
if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
@@ -552,7 +525,9 @@ void hubbub1_program_pstate_watermarks(
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
- }
+ } else if (watermarks->a.cstate_pstate.pstate_change_ns
+ < hubbub1->watermarks.a.cstate_pstate.pstate_change_ns)
+ wm_pending = true;
/* clock state B */
if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
@@ -567,7 +542,9 @@ void hubbub1_program_pstate_watermarks(
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
- }
+ } else if (watermarks->b.cstate_pstate.pstate_change_ns
+ < hubbub1->watermarks.b.cstate_pstate.pstate_change_ns)
+ wm_pending = true;
/* clock state C */
if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
@@ -582,7 +559,9 @@ void hubbub1_program_pstate_watermarks(
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
- }
+ } else if (watermarks->c.cstate_pstate.pstate_change_ns
+ < hubbub1->watermarks.c.cstate_pstate.pstate_change_ns)
+ wm_pending = true;
/* clock state D */
if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns
@@ -597,23 +576,33 @@ void hubbub1_program_pstate_watermarks(
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
- }
+ } else if (watermarks->d.cstate_pstate.pstate_change_ns
+ < hubbub1->watermarks.d.cstate_pstate.pstate_change_ns)
+ wm_pending = true;
+
+ return wm_pending;
}
-void hubbub1_program_watermarks(
+bool hubbub1_program_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
+ bool wm_pending = false;
/*
* Need to clamp to max of the register values (i.e. no wrap)
* for dcn1, all wm registers are 21-bit wide
*/
- hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
- hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
- hubbub1_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
+ if (hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
+
+ if (hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
+
+ if (hubbub1_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
REG_UPDATE(DCHUBBUB_ARB_SAT_LEVEL,
DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
@@ -627,6 +616,7 @@ void hubbub1_program_watermarks(
DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE, 1,
DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
#endif
+ return wm_pending;
}
void hubbub1_update_dchub(
@@ -840,8 +830,8 @@ static void hubbub1_det_request_size(
hubbub1_get_blk256_size(&blk256_width, &blk256_height, bpe);
- swath_bytes_horz_wc = height * blk256_height * bpe;
- swath_bytes_vert_wc = width * blk256_width * bpe;
+ swath_bytes_horz_wc = width * blk256_height * bpe;
+ swath_bytes_vert_wc = height * blk256_width * bpe;
*req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
false : /* full 256B request */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
index af57751253de..343a537172c7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
@@ -308,7 +308,7 @@ bool hubbub1_verify_allow_pstate_change_high(
void hubbub1_wm_change_req_wa(struct hubbub *hubbub);
-void hubbub1_program_watermarks(
+bool hubbub1_program_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -331,17 +331,17 @@ void hubbub1_construct(struct hubbub *hubbub,
const struct dcn_hubbub_shift *hubbub_shift,
const struct dcn_hubbub_mask *hubbub_mask);
-void hubbub1_program_urgent_watermarks(
+bool hubbub1_program_urgent_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
-void hubbub1_program_stutter_watermarks(
+bool hubbub1_program_stutter_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
-void hubbub1_program_pstate_watermarks(
+bool hubbub1_program_pstate_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 1008ac8a0f2a..9cc3314966bd 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -48,8 +48,8 @@
#include "dc_link_dp.h"
#include "dccg.h"
#include "clk_mgr.h"
-
-
+#include "link_hwss.h"
+#include "dpcd_defs.h"
#include "dsc.h"
#define DC_LOGGER_INIT(logger)
@@ -82,7 +82,7 @@ void print_microsec(struct dc_context *dc_ctx,
us_x10 % frac);
}
-static void dcn10_lock_all_pipes(struct dc *dc,
+void dcn10_lock_all_pipes(struct dc *dc,
struct dc_state *context,
bool lock)
{
@@ -93,6 +93,7 @@ static void dcn10_lock_all_pipes(struct dc *dc,
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe_ctx = &context->res_ctx.pipe_ctx[i];
tg = pipe_ctx->stream_res.tg;
+
/*
* Only lock the top pipe's tg to prevent redundant
* (un)locking. Also skip if pipe is disabled.
@@ -103,9 +104,9 @@ static void dcn10_lock_all_pipes(struct dc *dc,
continue;
if (lock)
- tg->funcs->lock(tg);
+ dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
else
- tg->funcs->unlock(tg);
+ dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
}
}
@@ -900,6 +901,10 @@ static void dcn10_reset_back_end_for_pipe(
* parent pipe.
*/
if (pipe_ctx->top_pipe == NULL) {
+
+ if (pipe_ctx->stream_res.abm)
+ pipe_ctx->stream_res.abm->funcs->set_abm_immediate_disable(pipe_ctx->stream_res.abm);
+
pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
@@ -1263,7 +1268,8 @@ void dcn10_init_hw(struct dc *dc)
}
//Enable ability to power gate / don't force power on permanently
- hws->funcs.enable_power_gating_plane(hws, true);
+ if (hws->funcs.enable_power_gating_plane)
+ hws->funcs.enable_power_gating_plane(hws, true);
return;
}
@@ -1317,6 +1323,31 @@ void dcn10_init_hw(struct dc *dc)
if (hws->funcs.dsc_pg_control != NULL)
hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
+ /* we want to turn off all dp displays before doing detection */
+ if (dc->config.power_down_display_on_boot) {
+ uint8_t dpcd_power_state = '\0';
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+
+ for (i = 0; i < dc->link_count; i++) {
+ if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)
+ continue;
+
+ /*
+ * core_link_read_dpcd() will invoke dm_helpers_dp_read_dpcd(),
+ * which needs to read dpcd info with the help of aconnector.
+ * If aconnector (dc->links[i]->prev) is NULL, then dpcd status
+ * cannot be read.
+ */
+ if (dc->links[i]->priv) {
+ /* if any of the displays are lit up turn them off */
+ status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
+ &dpcd_power_state, sizeof(dpcd_power_state));
+ if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0)
+ dp_receiver_power_ctrl(dc->links[i], false);
+ }
+ }
+ }
+
/* If taking control over from VBIOS, we may want to optimize our first
* mode set, so we need to skip powering down pipes until we know which
* pipes we want to use.
@@ -1325,6 +1356,9 @@ void dcn10_init_hw(struct dc *dc)
*/
if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) {
hws->funcs.init_pipes(dc, dc->current_state);
+ if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
+ dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
+ !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
}
for (i = 0; i < res_pool->audio_count; i++) {
@@ -1355,8 +1389,8 @@ void dcn10_init_hw(struct dc *dc)
REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
}
-
- hws->funcs.enable_power_gating_plane(dc->hwseq, true);
+ if (hws->funcs.enable_power_gating_plane)
+ hws->funcs.enable_power_gating_plane(dc->hwseq, true);
if (dc->clk_mgr->funcs->notify_wm_ranges)
dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
@@ -1576,7 +1610,7 @@ void dcn10_pipe_control_lock(
/* use TG master update lock to lock everything on the TG
* therefore only top pipe need to lock
*/
- if (pipe->top_pipe)
+ if (!pipe || pipe->top_pipe)
return;
if (dc->debug.sanity_checks)
@@ -2090,6 +2124,10 @@ void dcn10_get_hdr_visual_confirm_color(
if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
/* HDR10, ARGB2101010 - set boarder color to red */
color->color_r_cr = color_value;
+ } else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
+ /* FreeSync 2 ARGB2101010 - set boarder color to pink */
+ color->color_r_cr = color_value;
+ color->color_b_cb = color_value;
}
break;
case PIXEL_FORMAT_FP16:
@@ -2512,12 +2550,17 @@ void dcn10_apply_ctx_for_surface(
int i;
struct timing_generator *tg;
uint32_t underflow_check_delay_us;
- bool removed_pipe[4] = { false };
bool interdependent_update = false;
struct pipe_ctx *top_pipe_to_program =
dcn10_find_top_pipe_for_stream(dc, context, stream);
DC_LOGGER_INIT(dc->ctx->logger);
+ // Clear pipe_ctx flag
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ pipe_ctx->update_flags.raw = 0;
+ }
+
if (!top_pipe_to_program)
return;
@@ -2531,11 +2574,6 @@ void dcn10_apply_ctx_for_surface(
if (underflow_check_delay_us != 0xFFFFFFFF && hws->funcs.did_underflow_occur)
ASSERT(hws->funcs.did_underflow_occur(dc, top_pipe_to_program));
- if (interdependent_update)
- dcn10_lock_all_pipes(dc, context, true);
- else
- dcn10_pipe_control_lock(dc, top_pipe_to_program, true);
-
if (underflow_check_delay_us != 0xFFFFFFFF)
udelay(underflow_check_delay_us);
@@ -2552,18 +2590,6 @@ void dcn10_apply_ctx_for_surface(
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *old_pipe_ctx =
&dc->current_state->res_ctx.pipe_ctx[i];
- /*
- * Powergate reused pipes that are not powergated
- * fairly hacky right now, using opp_id as indicator
- * TODO: After move dc_post to dc_update, this will
- * be removed.
- */
- if (pipe_ctx->plane_state && !old_pipe_ctx->plane_state) {
- if (old_pipe_ctx->stream_res.tg == tg &&
- old_pipe_ctx->plane_res.hubp &&
- old_pipe_ctx->plane_res.hubp->opp_id != OPP_ID_INVALID)
- dc->hwss.disable_plane(dc, old_pipe_ctx);
- }
if ((!pipe_ctx->plane_state ||
pipe_ctx->stream_res.tg != old_pipe_ctx->stream_res.tg) &&
@@ -2571,7 +2597,7 @@ void dcn10_apply_ctx_for_surface(
old_pipe_ctx->stream_res.tg == tg) {
hws->funcs.plane_atomic_disconnect(dc, old_pipe_ctx);
- removed_pipe[i] = true;
+ pipe_ctx->update_flags.bits.disable = 1;
DC_LOG_DC("Reset mpcc for pipe %d\n",
old_pipe_ctx->pipe_idx);
@@ -2597,21 +2623,35 @@ void dcn10_apply_ctx_for_surface(
&pipe_ctx->dlg_regs,
&pipe_ctx->ttu_regs);
}
+}
- if (interdependent_update)
- dcn10_lock_all_pipes(dc, context, false);
- else
- dcn10_pipe_control_lock(dc, top_pipe_to_program, false);
+void dcn10_post_unlock_program_front_end(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i;
- if (num_planes == 0)
- false_optc_underflow_wa(dc, stream, tg);
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx->top_pipe &&
+ !pipe_ctx->prev_odm_pipe &&
+ pipe_ctx->stream) {
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
+
+ if (context->stream_status[i].plane_count == 0)
+ false_optc_underflow_wa(dc, pipe_ctx->stream, tg);
+ }
+ }
for (i = 0; i < dc->res_pool->pipe_count; i++)
- if (removed_pipe[i])
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
for (i = 0; i < dc->res_pool->pipe_count; i++)
- if (removed_pipe[i]) {
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
dc->hwss.optimize_bandwidth(dc, context);
break;
}
@@ -2656,7 +2696,7 @@ void dcn10_prepare_bandwidth(
false);
}
- hubbub->funcs->program_watermarks(hubbub,
+ dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
true);
@@ -2693,6 +2733,7 @@ void dcn10_optimize_bandwidth(
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
true);
+
dcn10_stereo_hw_frame_pack_wa(dc, context);
if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
@@ -2884,6 +2925,7 @@ void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct timing_generator *tg = pipe_ctx->stream_res.tg;
bool flip_pending;
+ struct dc *dc = plane_state->ctx->dc;
if (plane_state == NULL)
return;
@@ -2901,6 +2943,19 @@ void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
plane_state->status.is_right_eye =
!tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
}
+
+ if (dc->hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied) {
+ struct dce_hwseq *hwseq = dc->hwseq;
+ struct timing_generator *tg = dc->res_pool->timing_generators[0];
+ unsigned int cur_frame = tg->funcs->get_frame_count(tg);
+
+ if (cur_frame != hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame) {
+ struct hubbub *hubbub = dc->res_pool->hubbub;
+
+ hubbub->funcs->allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
+ hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = false;
+ }
+ }
}
void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index 4d20f6586bb5..16a50e05ffbf 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -70,11 +70,18 @@ void dcn10_reset_hw_ctx_wrap(
struct dc *dc,
struct dc_state *context);
void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn10_lock_all_pipes(
+ struct dc *dc,
+ struct dc_state *context,
+ bool lock);
void dcn10_apply_ctx_for_surface(
struct dc *dc,
const struct dc_stream_state *stream,
int num_planes,
struct dc_state *context);
+void dcn10_post_unlock_program_front_end(
+ struct dc *dc,
+ struct dc_state *context);
void dcn10_hubp_pg_control(
struct dce_hwseq *hws,
unsigned int hubp_inst,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
index e7e5352ec424..dd02d3983695 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
@@ -32,6 +32,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.init_hw = dcn10_init_hw,
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = dcn10_apply_ctx_for_surface,
+ .post_unlock_program_front_end = dcn10_post_unlock_program_front_end,
.update_plane_addr = dcn10_update_plane_addr,
.update_dchub = dcn10_update_dchub,
.update_pending_status = dcn10_update_pending_status,
@@ -49,6 +50,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.disable_audio_stream = dce110_disable_audio_stream,
.disable_plane = dcn10_disable_plane,
.pipe_control_lock = dcn10_pipe_control_lock,
+ .interdependent_update_lock = dcn10_lock_all_pipes,
.prepare_bandwidth = dcn10_prepare_bandwidth,
.optimize_bandwidth = dcn10_optimize_bandwidth,
.set_drr = dcn10_set_drr,
@@ -85,6 +87,8 @@ static const struct hwseq_private_funcs dcn10_private_funcs = {
.reset_hw_ctx_wrap = dcn10_reset_hw_ctx_wrap,
.enable_stream_timing = dcn10_enable_stream_timing,
.edp_backlight_control = dce110_edp_backlight_control,
+ .is_panel_backlight_on = dce110_is_panel_backlight_on,
+ .is_panel_powered_on = dce110_is_panel_powered_on,
.disable_stream_gating = NULL,
.enable_stream_gating = NULL,
.setup_vupdate_interrupt = dcn10_setup_vupdate_interrupt,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index 1a37c90e9d43..d3617d6785a7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -782,6 +782,11 @@ bool dcn10_link_encoder_validate_output_with_stream(
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
bool is_valid;
+ //if SCDC (340-600MHz) is disabled, set to HDMI 1.4 timing limit
+ if (stream->sink->edid_caps.panel_patch.skip_scdc_overwrite &&
+ enc10->base.features.max_hdmi_pixel_clock > 300000)
+ enc10->base.features.max_hdmi_pixel_clock = 300000;
+
switch (stream->signal) {
case SIGNAL_TYPE_DVI_SINGLE_LINK:
case SIGNAL_TYPE_DVI_DUAL_LINK:
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
index eb13589b9a81..762109174fb8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
@@ -62,11 +62,11 @@
SRI(DP_DPHY_FAST_TRAINING, DP, id), \
SRI(DP_SEC_CNTL1, DP, id), \
SRI(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \
- SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
SRI(DP_DPHY_HBR2_PATTERN_CONTROL, DP, id)
#define LE_DCN10_REG_LIST(id)\
+ SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
LE_DCN_COMMON_REG_LIST(id)
struct dcn10_link_enc_aux_registers {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index a9a43b397db9..63acb8ff7462 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -299,7 +299,6 @@ void optc1_set_vtg_params(struct timing_generator *optc,
uint32_t asic_blank_end;
uint32_t v_init;
uint32_t v_fp2 = 0;
- int32_t vertical_line_start;
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -316,9 +315,8 @@ void optc1_set_vtg_params(struct timing_generator *optc,
patched_crtc_timing.v_border_top;
/* if VSTARTUP is before VSYNC, FP2 is the offset, otherwise 0 */
- vertical_line_start = asic_blank_end - optc1->vstartup_start + 1;
- if (vertical_line_start < 0)
- v_fp2 = -vertical_line_start;
+ if (optc1->vstartup_start > asic_blank_end)
+ v_fp2 = optc1->vstartup_start - asic_blank_end;
/* Interlace */
if (REG(OTG_INTERLACE_CONTROL)) {
@@ -1195,7 +1193,7 @@ static void optc1_enable_stereo(struct timing_generator *optc,
REG_UPDATE_3(OTG_STEREO_CONTROL,
OTG_STEREO_EN, stereo_en,
OTG_STEREO_SYNC_OUTPUT_LINE_NUM, 0,
- OTG_STEREO_SYNC_OUTPUT_POLARITY, 0);
+ OTG_STEREO_SYNC_OUTPUT_POLARITY, flags->RIGHT_EYE_POLARITY == 0 ? 0 : 1);
if (flags->PROGRAM_POLARITY)
REG_UPDATE(OTG_STEREO_CONTROL,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 3b71898e859e..261bdc3a8218 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -570,7 +570,7 @@ static const struct dc_plane_cap plane_cap = {
static const struct dc_debug_options debug_defaults_drv = {
.sanity_checks = true,
- .disable_dmcu = true,
+ .disable_dmcu = false,
.force_abm_enable = false,
.timing_trace = false,
.clock_trace = true,
@@ -598,7 +598,7 @@ static const struct dc_debug_options debug_defaults_drv = {
};
static const struct dc_debug_options debug_defaults_diags = {
- .disable_dmcu = true,
+ .disable_dmcu = false,
.force_abm_enable = false,
.timing_trace = true,
.clock_trace = true,
@@ -1233,7 +1233,7 @@ static enum dc_status dcn10_validate_global(struct dc *dc, struct dc_state *cont
return DC_OK;
}
-static enum dc_status dcn10_get_default_swizzle_mode(struct dc_plane_state *plane_state)
+static enum dc_status dcn10_patch_unknown_plane_state(struct dc_plane_state *plane_state)
{
enum dc_status result = DC_OK;
@@ -1295,7 +1295,7 @@ static const struct resource_funcs dcn10_res_pool_funcs = {
.validate_plane = dcn10_validate_plane,
.validate_global = dcn10_validate_global,
.add_stream_to_ctx = dcn10_add_stream_to_ctx,
- .get_default_swizzle_mode = dcn10_get_default_swizzle_mode,
+ .patch_unknown_plane_state = dcn10_patch_unknown_plane_state,
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index 376c4264d295..7eba9333c328 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -1667,5 +1667,6 @@ void dcn10_stream_encoder_construct(
enc1->regs = regs;
enc1->se_shift = se_shift;
enc1->se_mask = se_mask;
+ enc1->base.stream_enc_inst = eng_id - ENGINE_ID_DIGA;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
index 50bffbfdd394..62cc2651e00c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
@@ -70,6 +70,8 @@ void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
REG_UPDATE(DPPCLK_DTO_CTRL,
DPPCLK_DTO_ENABLE[dpp_inst], 0);
}
+
+ dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk;
}
void dccg2_get_dccg_ref_freq(struct dccg *dccg,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
index 13e057d7ee93..42bba7c9548b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
@@ -369,84 +369,6 @@ void dpp2_set_cursor_attributes(
}
}
-#define IDENTITY_RATIO(ratio) (dc_fixpt_u3d19(ratio) == (1 << 19))
-
-bool dpp2_get_optimal_number_of_taps(
- struct dpp *dpp,
- struct scaler_data *scl_data,
- const struct scaling_taps *in_taps)
-{
- /* Some ASICs does not support FP16 scaling, so we reject modes require this*/
- if (scl_data->viewport.width != scl_data->h_active &&
- scl_data->viewport.height != scl_data->v_active &&
- dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT &&
- scl_data->format == PIXEL_FORMAT_FP16)
- return false;
-
- if (scl_data->viewport.width > scl_data->h_active &&
- dpp->ctx->dc->debug.max_downscale_src_width != 0 &&
- scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width)
- return false;
-
- /* TODO: add lb check */
-
- /* No support for programming ratio of 8, drop to 7.99999.. */
- if (scl_data->ratios.horz.value == (8ll << 32))
- scl_data->ratios.horz.value--;
- if (scl_data->ratios.vert.value == (8ll << 32))
- scl_data->ratios.vert.value--;
- if (scl_data->ratios.horz_c.value == (8ll << 32))
- scl_data->ratios.horz_c.value--;
- if (scl_data->ratios.vert_c.value == (8ll << 32))
- scl_data->ratios.vert_c.value--;
-
- /* Set default taps if none are provided */
- if (in_taps->h_taps == 0) {
- if (dc_fixpt_ceil(scl_data->ratios.horz) > 4)
- scl_data->taps.h_taps = 8;
- else
- scl_data->taps.h_taps = 4;
- } else
- scl_data->taps.h_taps = in_taps->h_taps;
- if (in_taps->v_taps == 0) {
- if (dc_fixpt_ceil(scl_data->ratios.vert) > 4)
- scl_data->taps.v_taps = 8;
- else
- scl_data->taps.v_taps = 4;
- } else
- scl_data->taps.v_taps = in_taps->v_taps;
- if (in_taps->v_taps_c == 0) {
- if (dc_fixpt_ceil(scl_data->ratios.vert_c) > 4)
- scl_data->taps.v_taps_c = 4;
- else
- scl_data->taps.v_taps_c = 2;
- } else
- scl_data->taps.v_taps_c = in_taps->v_taps_c;
- if (in_taps->h_taps_c == 0) {
- if (dc_fixpt_ceil(scl_data->ratios.horz_c) > 4)
- scl_data->taps.h_taps_c = 4;
- else
- scl_data->taps.h_taps_c = 2;
- } else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1)
- /* Only 1 and even h_taps_c are supported by hw */
- scl_data->taps.h_taps_c = in_taps->h_taps_c - 1;
- else
- scl_data->taps.h_taps_c = in_taps->h_taps_c;
-
- if (!dpp->ctx->dc->debug.always_scale) {
- if (IDENTITY_RATIO(scl_data->ratios.horz))
- scl_data->taps.h_taps = 1;
- if (IDENTITY_RATIO(scl_data->ratios.vert))
- scl_data->taps.v_taps = 1;
- if (IDENTITY_RATIO(scl_data->ratios.horz_c))
- scl_data->taps.h_taps_c = 1;
- if (IDENTITY_RATIO(scl_data->ratios.vert_c))
- scl_data->taps.v_taps_c = 1;
- }
-
- return true;
-}
-
void oppn20_dummy_program_regamma_pwl(
struct dpp *dpp,
const struct pwl_params *params,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
index 6bdfee20b6a7..1b1ae9ce2799 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
@@ -369,6 +369,7 @@ static bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_
dsc_reg_vals->pps.block_pred_enable = dsc_cfg->dc_dsc_cfg.block_pred_enable;
dsc_reg_vals->pps.line_buf_depth = dsc_cfg->dc_dsc_cfg.linebuf_depth;
dsc_reg_vals->alternate_ich_encoding_en = dsc_reg_vals->pps.dsc_version_minor == 1 ? 0 : 1;
+ dsc_reg_vals->ich_reset_at_eol = (dsc_cfg->is_odm || dsc_reg_vals->num_slices_h > 1) ? 0xF : 0;
// TODO: in addition to validating slice height (pic height must be divisible by slice height),
// see what happens when the same condition doesn't apply for slice_width/pic_width.
@@ -531,7 +532,6 @@ static void dsc_update_from_dsc_parameters(struct dsc_reg_values *reg_vals, cons
reg_vals->pps.rc_buf_thresh[i] = reg_vals->pps.rc_buf_thresh[i] >> 6;
reg_vals->rc_buffer_model_size = dsc_params->rc_buffer_model_size;
- reg_vals->ich_reset_at_eol = reg_vals->num_slices_h == 1 ? 0 : 0xf;
}
static void dsc_write_to_registers(struct display_stream_compressor *dsc, const struct dsc_reg_values *reg_vals)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
index 9235f7d29454..c0b21d7450d4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
@@ -562,19 +562,23 @@ void hubbub2_get_dchub_ref_freq(struct hubbub *hubbub,
}
}
-static void hubbub2_program_watermarks(
+static bool hubbub2_program_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+ bool wm_pending = false;
/*
* Need to clamp to max of the register values (i.e. no wrap)
* for dcn1, all wm registers are 21-bit wide
*/
- hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
- hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
+ if (hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
+
+ if (hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
/*
* There's a special case when going from p-state support to p-state unsupported
@@ -592,6 +596,7 @@ static void hubbub2_program_watermarks(
REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 180);
hubbub->funcs->allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+ return wm_pending;
}
static const struct hubbub_funcs hubbub2_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index cfbbaffa8654..233318260da4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -307,7 +307,8 @@ void dcn20_init_blank(
COLOR_DEPTH_UNDEFINED,
&black_color,
otg_active_width,
- otg_active_height);
+ otg_active_height,
+ 0);
if (num_opps == 2) {
bottom_opp->funcs->opp_set_disp_pattern_generator(
@@ -317,7 +318,8 @@ void dcn20_init_blank(
COLOR_DEPTH_UNDEFINED,
&black_color,
otg_active_width,
- otg_active_height);
+ otg_active_height,
+ 0);
}
hws->funcs.wait_for_blank_complete(opp);
@@ -572,7 +574,6 @@ void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
dpp->funcs->dpp_dppclk_control(dpp, false, false);
hubp->power_gated = true;
- dc->optimized_required = false; /* We're powering off, no need to optimize */
hws->funcs.plane_atomic_power_down(dc,
pipe_ctx->plane_res.dpp,
@@ -646,6 +647,9 @@ enum dc_status dcn20_enable_stream_timing(
return DC_ERROR_UNEXPECTED;
}
+ if (dc->hwseq->funcs.PLAT_58856_wa && (!dc_is_dp_signal(stream->signal)))
+ dc->hwseq->funcs.PLAT_58856_wa(context, pipe_ctx);
+
pipe_ctx->stream_res.tg->funcs->program_timing(
pipe_ctx->stream_res.tg,
&stream->timing,
@@ -975,7 +979,8 @@ void dcn20_blank_pixel_data(
stream->timing.display_color_depth,
&black_color,
width,
- height);
+ height,
+ 0);
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
odm_pipe->stream_res.opp->funcs->opp_set_disp_pattern_generator(
@@ -986,7 +991,8 @@ void dcn20_blank_pixel_data(
stream->timing.display_color_depth,
&black_color,
width,
- height);
+ height,
+ 0);
}
if (!blank)
@@ -1089,29 +1095,6 @@ void dcn20_enable_plane(
// }
}
-
-void dcn20_pipe_control_lock_global(
- struct dc *dc,
- struct pipe_ctx *pipe,
- bool lock)
-{
- if (lock) {
- pipe->stream_res.tg->funcs->lock_doublebuffer_enable(
- pipe->stream_res.tg);
- pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
- } else {
- pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
- pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg,
- CRTC_STATE_VACTIVE);
- pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg,
- CRTC_STATE_VBLANK);
- pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg,
- CRTC_STATE_VACTIVE);
- pipe->stream_res.tg->funcs->lock_doublebuffer_disable(
- pipe->stream_res.tg);
- }
-}
-
void dcn20_pipe_control_lock(
struct dc *dc,
struct pipe_ctx *pipe,
@@ -1122,7 +1105,7 @@ void dcn20_pipe_control_lock(
/* use TG master update lock to lock everything on the TG
* therefore only top pipe need to lock
*/
- if (pipe->top_pipe)
+ if (!pipe || pipe->top_pipe)
return;
if (pipe->plane_state != NULL)
@@ -1537,48 +1520,32 @@ static void dcn20_program_pipe(
}
}
-static bool does_pipe_need_lock(struct pipe_ctx *pipe)
-{
- if ((pipe->plane_state && pipe->plane_state->update_flags.raw)
- || pipe->update_flags.raw)
- return true;
- if (pipe->bottom_pipe)
- return does_pipe_need_lock(pipe->bottom_pipe);
-
- return false;
-}
-
void dcn20_program_front_end_for_ctx(
struct dc *dc,
struct dc_state *context)
{
- const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100;
int i;
struct dce_hwseq *hws = dc->hwseq;
- bool pipe_locked[MAX_PIPES] = {false};
DC_LOGGER_INIT(dc->ctx->logger);
- /* Carry over GSL groups in case the context is changing. */
- for (i = 0; i < dc->res_pool->pipe_count; i++)
- if (context->res_ctx.pipe_ctx[i].stream == dc->current_state->res_ctx.pipe_ctx[i].stream)
- context->res_ctx.pipe_ctx[i].stream_res.gsl_group =
- dc->current_state->res_ctx.pipe_ctx[i].stream_res.gsl_group;
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->plane_state) {
+ ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
+ if (dc->hwss.program_triplebuffer != NULL &&
+ !dc->debug.disable_tri_buf) {
+ /*turn off triple buffer for full update*/
+ dc->hwss.program_triplebuffer(
+ dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
+ }
+ }
+ }
/* Set pipe update flags and lock pipes */
for (i = 0; i < dc->res_pool->pipe_count; i++)
dcn20_detect_pipe_changes(&dc->current_state->res_ctx.pipe_ctx[i],
&context->res_ctx.pipe_ctx[i]);
- for (i = 0; i < dc->res_pool->pipe_count; i++)
- if (!context->res_ctx.pipe_ctx[i].top_pipe &&
- does_pipe_need_lock(&context->res_ctx.pipe_ctx[i])) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
- if (pipe_ctx->update_flags.bits.tg_changed || pipe_ctx->update_flags.bits.enable)
- dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
- if (!pipe_ctx->update_flags.bits.enable)
- dc->hwss.pipe_control_lock(dc, &dc->current_state->res_ctx.pipe_ctx[i], true);
- pipe_locked[i] = true;
- }
/* OTG blank before disabling all front ends */
for (i = 0; i < dc->res_pool->pipe_count; i++)
@@ -1616,17 +1583,17 @@ void dcn20_program_front_end_for_ctx(
hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
}
}
+}
- /* Unlock all locked pipes */
- for (i = 0; i < dc->res_pool->pipe_count; i++)
- if (pipe_locked[i]) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+void dcn20_post_unlock_program_front_end(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i;
+ const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100;
+ struct dce_hwseq *hwseq = dc->hwseq;
- if (pipe_ctx->update_flags.bits.tg_changed || pipe_ctx->update_flags.bits.enable)
- dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
- if (!pipe_ctx->update_flags.bits.enable)
- dc->hwss.pipe_control_lock(dc, &dc->current_state->res_ctx.pipe_ctx[i], false);
- }
+ DC_LOGGER_INIT(dc->ctx->logger);
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
@@ -1652,11 +1619,26 @@ void dcn20_program_front_end_for_ctx(
}
/* WA to apply WM setting*/
- if (dc->hwseq->wa.DEGVIDCN21)
+ if (hwseq->wa.DEGVIDCN21)
dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub);
-}
+ /* WA for stutter underflow during MPO transitions when adding 2nd plane */
+ if (hwseq->wa.disallow_self_refresh_during_multi_plane_transition) {
+
+ if (dc->current_state->stream_status[0].plane_count == 1 &&
+ context->stream_status[0].plane_count > 1) {
+
+ struct timing_generator *tg = dc->res_pool->timing_generators[0];
+
+ dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, false);
+
+ hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = true;
+ hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame = tg->funcs->get_frame_count(tg);
+ }
+ }
+}
+
void dcn20_prepare_bandwidth(
struct dc *dc,
struct dc_state *context)
@@ -1669,7 +1651,7 @@ void dcn20_prepare_bandwidth(
false);
/* program dchubbub watermarks */
- hubbub->funcs->program_watermarks(hubbub,
+ dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
false);
@@ -2053,6 +2035,10 @@ static void dcn20_reset_back_end_for_pipe(
* parent pipe.
*/
if (pipe_ctx->top_pipe == NULL) {
+
+ if (pipe_ctx->stream_res.abm)
+ pipe_ctx->stream_res.abm->funcs->set_abm_immediate_disable(pipe_ctx->stream_res.abm);
+
pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
index 02c9be5ebd47..63ce763f148e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
@@ -35,6 +35,9 @@ bool dcn20_set_shaper_3dlut(
void dcn20_program_front_end_for_ctx(
struct dc *dc,
struct dc_state *context);
+void dcn20_post_unlock_program_front_end(
+ struct dc *dc,
+ struct dc_state *context);
void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx);
void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx);
bool dcn20_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
@@ -58,10 +61,6 @@ void dcn20_pipe_control_lock(
struct dc *dc,
struct pipe_ctx *pipe,
bool lock);
-void dcn20_pipe_control_lock_global(
- struct dc *dc,
- struct pipe_ctx *pipe,
- bool lock);
void dcn20_prepare_bandwidth(
struct dc *dc,
struct dc_state *context);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
index d51e02fdab4d..1e73357eda34 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
@@ -33,6 +33,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = NULL,
.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
.update_plane_addr = dcn20_update_plane_addr,
.update_dchub = dcn10_update_dchub,
.update_pending_status = dcn10_update_pending_status,
@@ -50,7 +51,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
.disable_audio_stream = dce110_disable_audio_stream,
.disable_plane = dcn20_disable_plane,
.pipe_control_lock = dcn20_pipe_control_lock,
- .pipe_control_lock_global = dcn20_pipe_control_lock_global,
+ .interdependent_update_lock = dcn10_lock_all_pipes,
.prepare_bandwidth = dcn20_prepare_bandwidth,
.optimize_bandwidth = dcn20_optimize_bandwidth,
.update_bandwidth = dcn20_update_bandwidth,
@@ -96,6 +97,8 @@ static const struct hwseq_private_funcs dcn20_private_funcs = {
.reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap,
.enable_stream_timing = dcn20_enable_stream_timing,
.edp_backlight_control = dce110_edp_backlight_control,
+ .is_panel_backlight_on = dce110_is_panel_backlight_on,
+ .is_panel_powered_on = dce110_is_panel_powered_on,
.disable_stream_gating = dcn20_disable_stream_gating,
.enable_stream_gating = dcn20_enable_stream_gating,
.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
@@ -108,7 +111,6 @@ static const struct hwseq_private_funcs dcn20_private_funcs = {
.enable_power_gating_plane = dcn20_enable_power_gating_plane,
.dpp_pg_control = dcn20_dpp_pg_control,
.hubp_pg_control = dcn20_hubp_pg_control,
- .dsc_pg_control = NULL,
.update_odm = dcn20_update_odm,
.dsc_pg_control = dcn20_dsc_pg_control,
.get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h
index 3fccd5eeecbb..7bcee5894d2e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h
@@ -36,26 +36,6 @@
#define BASE(seg) \
BASE_INNER(seg)
-#define SR(reg_name)\
- .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
- mm ## reg_name
-
-#define SRI(reg_name, block, id)\
- .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
- mm ## block ## id ## _ ## reg_name
-
-#define SRI2(reg_name, block, id)\
- .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
- mm ## reg_name
-
-#define SRII(reg_name, block, id)\
- .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
- mm ## block ## id ## _ ## reg_name
-
-#define SF(reg_name, field_name, post_fix)\
- .field_name = reg_name ## __ ## field_name ## post_fix
-
-
#define MCIF_WB_COMMON_REG_LIST_DCN2_0(inst) \
SRI(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB, inst),\
SRI(MCIF_WB_BUFMGR_CUR_LINE_R, MCIF_WB, inst),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
index 023cc71fad0f..138321e151eb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
@@ -45,7 +45,8 @@ void opp2_set_disp_pattern_generator(
enum dc_color_depth color_depth,
const struct tg_color *solid_color,
int width,
- int height)
+ int height,
+ int offset)
{
struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp);
enum test_pattern_color_format bit_depth;
@@ -92,6 +93,11 @@ void opp2_set_disp_pattern_generator(
DPG_ACTIVE_WIDTH, width,
DPG_ACTIVE_HEIGHT, height);
+ /* set DPG offset */
+ REG_SET_2(DPG_OFFSET_SEGMENT, 0,
+ DPG_X_OFFSET, offset,
+ DPG_SEGMENT_WIDTH, 0);
+
switch (test_pattern) {
case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES:
case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA:
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
index 4093bec172c1..64c5b429c79a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
@@ -36,6 +36,7 @@
#define OPP_DPG_REG_LIST(id) \
SRI(DPG_CONTROL, DPG, id), \
SRI(DPG_DIMENSIONS, DPG, id), \
+ SRI(DPG_OFFSET_SEGMENT, DPG, id), \
SRI(DPG_COLOUR_B_CB, DPG, id), \
SRI(DPG_COLOUR_G_Y, DPG, id), \
SRI(DPG_COLOUR_R_CR, DPG, id), \
@@ -53,6 +54,7 @@
uint32_t FMT_422_CONTROL; \
uint32_t DPG_CONTROL; \
uint32_t DPG_DIMENSIONS; \
+ uint32_t DPG_OFFSET_SEGMENT; \
uint32_t DPG_COLOUR_B_CB; \
uint32_t DPG_COLOUR_G_Y; \
uint32_t DPG_COLOUR_R_CR; \
@@ -68,6 +70,8 @@
OPP_SF(DPG0_DPG_CONTROL, DPG_HRES, mask_sh), \
OPP_SF(DPG0_DPG_DIMENSIONS, DPG_ACTIVE_WIDTH, mask_sh), \
OPP_SF(DPG0_DPG_DIMENSIONS, DPG_ACTIVE_HEIGHT, mask_sh), \
+ OPP_SF(DPG0_DPG_OFFSET_SEGMENT, DPG_X_OFFSET, mask_sh), \
+ OPP_SF(DPG0_DPG_OFFSET_SEGMENT, DPG_SEGMENT_WIDTH, mask_sh), \
OPP_SF(DPG0_DPG_COLOUR_R_CR, DPG_COLOUR0_R_CR, mask_sh), \
OPP_SF(DPG0_DPG_COLOUR_R_CR, DPG_COLOUR1_R_CR, mask_sh), \
OPP_SF(DPG0_DPG_COLOUR_B_CB, DPG_COLOUR0_B_CB, mask_sh), \
@@ -97,6 +101,8 @@
type DPG_HRES; \
type DPG_ACTIVE_WIDTH; \
type DPG_ACTIVE_HEIGHT; \
+ type DPG_X_OFFSET; \
+ type DPG_SEGMENT_WIDTH; \
type DPG_COLOUR0_R_CR; \
type DPG_COLOUR1_R_CR; \
type DPG_COLOUR0_B_CB; \
@@ -144,7 +150,8 @@ void opp2_set_disp_pattern_generator(
enum dc_color_depth color_depth,
const struct tg_color *solid_color,
int width,
- int height);
+ int height,
+ int offset);
bool opp2_dpg_is_blanked(struct output_pixel_processor *opp);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 85f90f3e24cb..a67395208991 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -153,6 +153,7 @@ struct _vcs_dpi_ip_params_st dcn2_0_ip = {
.xfc_supported = true,
.xfc_fill_bw_overhead_percent = 10.0,
.xfc_fill_constant_bytes = 0,
+ .number_of_cursors = 1,
};
struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip = {
@@ -220,7 +221,8 @@ struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip = {
.xfc_supported = true,
.xfc_fill_bw_overhead_percent = 10.0,
.xfc_fill_constant_bytes = 0,
- .ptoi_supported = 0
+ .ptoi_supported = 0,
+ .number_of_cursors = 1,
};
struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = {
@@ -335,6 +337,117 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = {
.use_urgent_burst_bw = 0
};
+struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = {
+ .clock_limits = {
+ {
+ .state = 0,
+ .dcfclk_mhz = 560.0,
+ .fabricclk_mhz = 560.0,
+ .dispclk_mhz = 513.0,
+ .dppclk_mhz = 513.0,
+ .phyclk_mhz = 540.0,
+ .socclk_mhz = 560.0,
+ .dscclk_mhz = 171.0,
+ .dram_speed_mts = 8960.0,
+ },
+ {
+ .state = 1,
+ .dcfclk_mhz = 694.0,
+ .fabricclk_mhz = 694.0,
+ .dispclk_mhz = 642.0,
+ .dppclk_mhz = 642.0,
+ .phyclk_mhz = 600.0,
+ .socclk_mhz = 694.0,
+ .dscclk_mhz = 214.0,
+ .dram_speed_mts = 11104.0,
+ },
+ {
+ .state = 2,
+ .dcfclk_mhz = 875.0,
+ .fabricclk_mhz = 875.0,
+ .dispclk_mhz = 734.0,
+ .dppclk_mhz = 734.0,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 875.0,
+ .dscclk_mhz = 245.0,
+ .dram_speed_mts = 14000.0,
+ },
+ {
+ .state = 3,
+ .dcfclk_mhz = 1000.0,
+ .fabricclk_mhz = 1000.0,
+ .dispclk_mhz = 1100.0,
+ .dppclk_mhz = 1100.0,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 1000.0,
+ .dscclk_mhz = 367.0,
+ .dram_speed_mts = 16000.0,
+ },
+ {
+ .state = 4,
+ .dcfclk_mhz = 1200.0,
+ .fabricclk_mhz = 1200.0,
+ .dispclk_mhz = 1284.0,
+ .dppclk_mhz = 1284.0,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 1200.0,
+ .dscclk_mhz = 428.0,
+ .dram_speed_mts = 16000.0,
+ },
+ /*Extra state, no dispclk ramping*/
+ {
+ .state = 5,
+ .dcfclk_mhz = 1200.0,
+ .fabricclk_mhz = 1200.0,
+ .dispclk_mhz = 1284.0,
+ .dppclk_mhz = 1284.0,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 1200.0,
+ .dscclk_mhz = 428.0,
+ .dram_speed_mts = 16000.0,
+ },
+ },
+ .num_states = 5,
+ .sr_exit_time_us = 8.6,
+ .sr_enter_plus_exit_time_us = 10.9,
+ .urgent_latency_us = 4.0,
+ .urgent_latency_pixel_data_only_us = 4.0,
+ .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
+ .urgent_latency_vm_data_only_us = 4.0,
+ .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 40.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 40.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
+ .max_avg_sdp_bw_use_normal_percent = 40.0,
+ .max_avg_dram_bw_use_normal_percent = 40.0,
+ .writeback_latency_us = 12.0,
+ .ideal_dram_bw_after_urgent_percent = 40.0,
+ .max_request_size_bytes = 256,
+ .dram_channel_width_bytes = 2,
+ .fabric_datapath_to_dcn_data_return_bytes = 64,
+ .dcn_downspread_percent = 0.5,
+ .downspread_percent = 0.38,
+ .dram_page_open_time_ns = 50.0,
+ .dram_rw_turnaround_time_ns = 17.5,
+ .dram_return_buffer_per_channel_bytes = 8192,
+ .round_trip_ping_latency_dcfclk_cycles = 131,
+ .urgent_out_of_order_return_per_channel_bytes = 256,
+ .channel_interleave_bytes = 256,
+ .num_banks = 8,
+ .num_chans = 8,
+ .vmm_page_size_bytes = 4096,
+ .dram_clock_change_latency_us = 404.0,
+ .dummy_pstate_latency_us = 5.0,
+ .writeback_dram_clock_change_latency_us = 23.0,
+ .return_bus_width_bytes = 64,
+ .dispclk_dppclk_vco_speed_mhz = 3850,
+ .xfc_bus_transport_time_us = 20,
+ .xfc_xbuf_latency_tolerance_us = 4,
+ .use_urgent_burst_bw = 0
+};
+
struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { 0 };
#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
@@ -928,7 +1041,7 @@ static const struct resource_caps res_cap_nv14 = {
};
static const struct dc_debug_options debug_defaults_drv = {
- .disable_dmcu = true,
+ .disable_dmcu = false,
.force_abm_enable = false,
.timing_trace = false,
.clock_trace = true,
@@ -947,7 +1060,7 @@ static const struct dc_debug_options debug_defaults_drv = {
};
static const struct dc_debug_options debug_defaults_diags = {
- .disable_dmcu = true,
+ .disable_dmcu = false,
.force_abm_enable = false,
.timing_trace = true,
.clock_trace = true,
@@ -1143,6 +1256,7 @@ static const struct encoder_feature_support link_enc_feature = {
.max_hdmi_pixel_clock = 600000,
.hdmi_ycbcr420_supported = true,
.dp_ycbcr420_supported = true,
+ .fec_supported = true,
.flags.bits.IS_HBR2_CAPABLE = true,
.flags.bits.IS_HBR3_CAPABLE = true,
.flags.bits.IS_TPS3_CAPABLE = true,
@@ -1557,7 +1671,7 @@ static void acquire_dsc(struct resource_context *res_ctx,
}
}
-static void release_dsc(struct resource_context *res_ctx,
+void dcn20_release_dsc(struct resource_context *res_ctx,
const struct resource_pool *pool,
struct display_stream_compressor **dsc)
{
@@ -1617,7 +1731,7 @@ static enum dc_status remove_dsc_from_stream_resource(struct dc *dc,
pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream_res.dsc)
- release_dsc(&new_ctx->res_ctx, dc->res_pool, &pipe_ctx->stream_res.dsc);
+ dcn20_release_dsc(&new_ctx->res_ctx, dc->res_pool, &pipe_ctx->stream_res.dsc);
}
}
@@ -1861,22 +1975,6 @@ void dcn20_populate_dml_writeback_from_context(
}
-static int get_num_odm_heads(struct pipe_ctx *pipe)
-{
- int odm_head_count = 0;
- struct pipe_ctx *next_pipe = pipe->next_odm_pipe;
- while (next_pipe) {
- odm_head_count++;
- next_pipe = next_pipe->next_odm_pipe;
- }
- pipe = pipe->prev_odm_pipe;
- while (pipe) {
- odm_head_count++;
- pipe = pipe->prev_odm_pipe;
- }
- return odm_head_count ? odm_head_count + 1 : 0;
-}
-
int dcn20_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes)
{
@@ -1956,8 +2054,8 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].dout.dp_lanes = 4;
pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min;
pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max;
- switch (get_num_odm_heads(&res_ctx->pipe_ctx[i])) {
- case 2:
+ switch (get_num_odm_splits(&res_ctx->pipe_ctx[i])) {
+ case 1:
pipes[pipe_cnt].pipe.dest.odm_combine = dm_odm_combine_mode_2to1;
break;
default:
@@ -1965,9 +2063,14 @@ int dcn20_populate_dml_pipes_from_context(
}
pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].pipe_idx;
if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state
- == res_ctx->pipe_ctx[i].plane_state)
- pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].top_pipe->pipe_idx;
- else if (res_ctx->pipe_ctx[i].prev_odm_pipe) {
+ == res_ctx->pipe_ctx[i].plane_state) {
+ struct pipe_ctx *first_pipe = res_ctx->pipe_ctx[i].top_pipe;
+
+ while (first_pipe->top_pipe && first_pipe->top_pipe->plane_state
+ == res_ctx->pipe_ctx[i].plane_state)
+ first_pipe = first_pipe->top_pipe;
+ pipes[pipe_cnt].pipe.src.hsplit_grp = first_pipe->pipe_idx;
+ } else if (res_ctx->pipe_ctx[i].prev_odm_pipe) {
struct pipe_ctx *first_pipe = res_ctx->pipe_ctx[i].prev_odm_pipe;
while (first_pipe->prev_odm_pipe)
@@ -2052,16 +2155,20 @@ int dcn20_populate_dml_pipes_from_context(
/* todo: default max for now, until there is logic reflecting this in dc*/
pipes[pipe_cnt].dout.output_bpc = 12;
/*
- * Use max cursor settings for calculations to minimize
+ * For graphic plane, cursor number is 1, nv12 is 0
* bw calculations due to cursor on/off
*/
- pipes[pipe_cnt].pipe.src.num_cursors = 2;
+ if (res_ctx->pipe_ctx[i].plane_state &&
+ res_ctx->pipe_ctx[i].plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
+ pipes[pipe_cnt].pipe.src.num_cursors = 0;
+ else
+ pipes[pipe_cnt].pipe.src.num_cursors = dc->dml.ip.number_of_cursors;
+
pipes[pipe_cnt].pipe.src.cur0_src_width = 256;
pipes[pipe_cnt].pipe.src.cur0_bpp = dm_cur_32bit;
- pipes[pipe_cnt].pipe.src.cur1_src_width = 256;
- pipes[pipe_cnt].pipe.src.cur1_bpp = dm_cur_32bit;
if (!res_ctx->pipe_ctx[i].plane_state) {
+ pipes[pipe_cnt].pipe.src.is_hsplit = pipes[pipe_cnt].pipe.dest.odm_combine != dm_odm_combine_mode_disabled;
pipes[pipe_cnt].pipe.src.source_scan = dm_horz;
pipes[pipe_cnt].pipe.src.sw_mode = dm_sw_linear;
pipes[pipe_cnt].pipe.src.macro_tile_size = dm_64k_tile;
@@ -2087,19 +2194,21 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].pipe.scale_ratio_depth.scl_enable = 0; /*Lb only or Full scl*/
pipes[pipe_cnt].pipe.scale_taps.htaps = 1;
pipes[pipe_cnt].pipe.scale_taps.vtaps = 1;
- pipes[pipe_cnt].pipe.src.is_hsplit = 0;
- pipes[pipe_cnt].pipe.dest.odm_combine = 0;
pipes[pipe_cnt].pipe.dest.vtotal_min = v_total;
pipes[pipe_cnt].pipe.dest.vtotal_max = v_total;
+
+ if (pipes[pipe_cnt].pipe.dest.odm_combine == dm_odm_combine_mode_2to1) {
+ pipes[pipe_cnt].pipe.src.viewport_width /= 2;
+ pipes[pipe_cnt].pipe.dest.recout_width /= 2;
+ }
} else {
struct dc_plane_state *pln = res_ctx->pipe_ctx[i].plane_state;
struct scaler_data *scl = &res_ctx->pipe_ctx[i].plane_res.scl_data;
pipes[pipe_cnt].pipe.src.immediate_flip = pln->flip_immediate;
- pipes[pipe_cnt].pipe.src.is_hsplit = (res_ctx->pipe_ctx[i].bottom_pipe
- && res_ctx->pipe_ctx[i].bottom_pipe->plane_state == pln)
- || (res_ctx->pipe_ctx[i].top_pipe
- && res_ctx->pipe_ctx[i].top_pipe->plane_state == pln);
+ pipes[pipe_cnt].pipe.src.is_hsplit = (res_ctx->pipe_ctx[i].bottom_pipe && res_ctx->pipe_ctx[i].bottom_pipe->plane_state == pln)
+ || (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state == pln)
+ || pipes[pipe_cnt].pipe.dest.odm_combine != dm_odm_combine_mode_disabled;
pipes[pipe_cnt].pipe.src.source_scan = pln->rotation == ROTATION_ANGLE_90
|| pln->rotation == ROTATION_ANGLE_270 ? dm_vert : dm_horz;
pipes[pipe_cnt].pipe.src.viewport_y_y = scl->viewport.y;
@@ -2124,18 +2233,22 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].pipe.src.dcc = pln->dcc.enable;
pipes[pipe_cnt].pipe.dest.recout_width = scl->recout.width;
pipes[pipe_cnt].pipe.dest.recout_height = scl->recout.height;
- pipes[pipe_cnt].pipe.dest.full_recout_width = scl->recout.width;
pipes[pipe_cnt].pipe.dest.full_recout_height = scl->recout.height;
- if (res_ctx->pipe_ctx[i].bottom_pipe && res_ctx->pipe_ctx[i].bottom_pipe->plane_state == pln) {
- pipes[pipe_cnt].pipe.dest.full_recout_width +=
- res_ctx->pipe_ctx[i].bottom_pipe->plane_res.scl_data.recout.width;
- pipes[pipe_cnt].pipe.dest.full_recout_height +=
- res_ctx->pipe_ctx[i].bottom_pipe->plane_res.scl_data.recout.height;
- } else if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state == pln) {
- pipes[pipe_cnt].pipe.dest.full_recout_width +=
- res_ctx->pipe_ctx[i].top_pipe->plane_res.scl_data.recout.width;
- pipes[pipe_cnt].pipe.dest.full_recout_height +=
- res_ctx->pipe_ctx[i].top_pipe->plane_res.scl_data.recout.height;
+ pipes[pipe_cnt].pipe.dest.full_recout_width = scl->recout.width;
+ if (pipes[pipe_cnt].pipe.dest.odm_combine == dm_odm_combine_mode_2to1)
+ pipes[pipe_cnt].pipe.dest.full_recout_width *= 2;
+ else {
+ struct pipe_ctx *split_pipe = res_ctx->pipe_ctx[i].bottom_pipe;
+
+ while (split_pipe && split_pipe->plane_state == pln) {
+ pipes[pipe_cnt].pipe.dest.full_recout_width += split_pipe->plane_res.scl_data.recout.width;
+ split_pipe = split_pipe->bottom_pipe;
+ }
+ split_pipe = res_ctx->pipe_ctx[i].top_pipe;
+ while (split_pipe && split_pipe->plane_state == pln) {
+ pipes[pipe_cnt].pipe.dest.full_recout_width += split_pipe->plane_res.scl_data.recout.width;
+ split_pipe = split_pipe->top_pipe;
+ }
}
pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_16;
@@ -2302,6 +2415,7 @@ bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
+ stream->timing.v_border_bottom;
dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
dsc_cfg.color_depth = stream->timing.display_color_depth;
+ dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
@@ -2388,7 +2502,7 @@ struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
return secondary_pipe;
}
-void dcn20_merge_pipes_for_validate(
+static void dcn20_merge_pipes_for_validate(
struct dc *dc,
struct dc_state *context)
{
@@ -2413,7 +2527,7 @@ void dcn20_merge_pipes_for_validate(
odm_pipe->prev_odm_pipe = NULL;
odm_pipe->next_odm_pipe = NULL;
if (odm_pipe->stream_res.dsc)
- release_dsc(&context->res_ctx, dc->res_pool, &odm_pipe->stream_res.dsc);
+ dcn20_release_dsc(&context->res_ctx, dc->res_pool, &odm_pipe->stream_res.dsc);
/* Clear plane_res and stream_res */
memset(&odm_pipe->plane_res, 0, sizeof(odm_pipe->plane_res));
memset(&odm_pipe->stream_res, 0, sizeof(odm_pipe->stream_res));
@@ -2451,41 +2565,29 @@ int dcn20_validate_apply_pipe_split_flags(
struct dc *dc,
struct dc_state *context,
int vlevel,
- bool *split)
+ bool *split,
+ bool *merge)
{
int i, pipe_idx, vlevel_split;
+ int plane_count = 0;
bool force_split = false;
- bool avoid_split = dc->debug.pipe_split_policy != MPC_SPLIT_DYNAMIC;
+ bool avoid_split = dc->debug.pipe_split_policy == MPC_SPLIT_AVOID;
- /* Single display loop, exits if there is more than one display */
+ if (context->stream_count > 1) {
+ if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP)
+ avoid_split = true;
+ } else if (dc->debug.force_single_disp_pipe_split)
+ force_split = true;
+
+ /* TODO: fix dc bugs and remove this split threshold thing */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- bool exit_loop = false;
-
- if (!pipe->stream || pipe->top_pipe)
- continue;
- if (dc->debug.force_single_disp_pipe_split) {
- if (!force_split)
- force_split = true;
- else {
- force_split = false;
- exit_loop = true;
- }
- }
- if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP) {
- if (avoid_split)
- avoid_split = false;
- else {
- avoid_split = true;
- exit_loop = true;
- }
- }
- if (exit_loop)
- break;
+ if (pipe->stream && !pipe->prev_odm_pipe &&
+ (!pipe->top_pipe || pipe->top_pipe->plane_state != pipe->plane_state))
+ ++plane_count;
}
- /* TODO: fix dc bugs and remove this split threshold thing */
- if (context->stream_count > dc->res_pool->pipe_count / 2)
+ if (plane_count > dc->res_pool->pipe_count / 2)
avoid_split = true;
/* Avoid split loop looks for lowest voltage level that allows most unsplit pipes possible */
@@ -2508,11 +2610,12 @@ int dcn20_validate_apply_pipe_split_flags(
/* Split loop sets which pipe should be split based on dml outputs and dc flags */
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ int pipe_plane = context->bw_ctx.dml.vba.pipe_plane[pipe_idx];
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
- if (force_split || context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] > 1)
+ if (force_split || context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_plane] > 1)
split[i] = true;
if ((pipe->stream->view_format ==
VIEW_3D_FORMAT_SIDE_BY_SIDE ||
@@ -2525,10 +2628,44 @@ int dcn20_validate_apply_pipe_split_flags(
split[i] = true;
if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) {
split[i] = true;
- context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = dm_odm_combine_mode_2to1;
+ context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_2to1;
+ }
+ context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] =
+ context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_plane];
+
+ if (pipe->prev_odm_pipe && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] != dm_odm_combine_mode_disabled) {
+ /*Already split odm pipe tree, don't try to split again*/
+ split[i] = false;
+ split[pipe->prev_odm_pipe->pipe_idx] = false;
+ } else if (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state
+ && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] == dm_odm_combine_mode_disabled) {
+ /*Already split mpc tree, don't try to split again, assumes only 2x mpc combine*/
+ split[i] = false;
+ split[pipe->top_pipe->pipe_idx] = false;
+ } else if (pipe->prev_odm_pipe || (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state)) {
+ if (split[i] == false) {
+ /*Exiting mpc/odm combine*/
+ merge[i] = true;
+ if (pipe->prev_odm_pipe) {
+ ASSERT(0); /*should not actually happen yet*/
+ merge[pipe->prev_odm_pipe->pipe_idx] = true;
+ } else
+ merge[pipe->top_pipe->pipe_idx] = true;
+ } else {
+ /*Transition from mpc combine to odm combine or vice versa*/
+ ASSERT(0); /*should not actually happen yet*/
+ split[i] = true;
+ merge[i] = true;
+ if (pipe->prev_odm_pipe) {
+ split[pipe->prev_odm_pipe->pipe_idx] = true;
+ merge[pipe->prev_odm_pipe->pipe_idx] = true;
+ } else {
+ split[pipe->top_pipe->pipe_idx] = true;
+ merge[pipe->top_pipe->pipe_idx] = true;
+ }
+ }
}
- context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx] =
- context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx];
+
/* Adjust dppclk when split is forced, do not bother with dispclk */
if (split[i] && context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 1)
context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] /= 2;
@@ -2570,7 +2707,7 @@ bool dcn20_fast_validate_bw(
if (vlevel > context->bw_ctx.dml.soc.num_states)
goto validate_fail;
- vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split);
+ vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, NULL);
/*initialize pipe_just_split_from to invalid idx*/
for (i = 0; i < MAX_PIPES; i++)
@@ -2790,6 +2927,9 @@ void dcn20_calculate_dlg_params(
!= dm_dram_clock_change_unsupported;
context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
+ if (context->bw_ctx.bw.dcn.clk.dispclk_khz < dc->debug.min_disp_clk_khz)
+ context->bw_ctx.bw.dcn.clk.dispclk_khz = dc->debug.min_disp_clk_khz;
+
/*
* An artifact of dml pipe split/odm is that pipes get merged back together for
* calculation. Therefore we need to only extract for first pipe in ascending index order
@@ -3027,7 +3167,7 @@ static struct dc_cap_funcs cap_funcs = {
};
-enum dc_status dcn20_get_default_swizzle_mode(struct dc_plane_state *plane_state)
+enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_state)
{
enum dc_status result = DC_OK;
@@ -3053,7 +3193,7 @@ static struct resource_funcs dcn20_res_pool_funcs = {
.add_stream_to_ctx = dcn20_add_stream_to_ctx,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
.populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context,
- .get_default_swizzle_mode = dcn20_get_default_swizzle_mode,
+ .patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.set_mcif_arb_params = dcn20_set_mcif_arb_params,
.populate_dml_pipes = dcn20_populate_dml_pipes_from_context,
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link
@@ -3291,6 +3431,9 @@ void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st
static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb(
uint32_t hw_internal_rev)
{
+ if (ASICREV_IS_NAVI14_M(hw_internal_rev))
+ return &dcn2_0_nv14_soc;
+
if (ASICREV_IS_NAVI12_P(hw_internal_rev))
return &dcn2_0_nv12_soc;
@@ -3772,6 +3915,15 @@ static bool dcn20_resource_construct(
dcn20_hw_sequencer_construct(dc);
+ // IF NV12, set PG function pointer to NULL. It's not that
+ // PG isn't supported for NV12, it's that we don't want to
+ // program the registers because that will cause more power
+ // to be consumed. We could have created dcn20_init_hw to get
+ // the same effect by checking ASIC rev, but there was a
+ // request at some point to not check ASIC rev on hw sequencer.
+ if (ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev))
+ dc->hwseq->funcs.enable_power_gating_plane = NULL;
+
dc->caps.max_planes = pool->base.pipe_count;
for (i = 0; i < dc->caps.max_planes; ++i)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
index f5893840b79b..9d5bff9455fd 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
@@ -119,14 +119,15 @@ void dcn20_set_mcif_arb_params(
display_e2e_pipe_params_st *pipes,
int pipe_cnt);
bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate);
-void dcn20_merge_pipes_for_validate(
- struct dc *dc,
- struct dc_state *context);
int dcn20_validate_apply_pipe_split_flags(
struct dc *dc,
struct dc_state *context,
int vlevel,
- bool *split);
+ bool *split,
+ bool *merge);
+void dcn20_release_dsc(struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct display_stream_compressor **dsc);
bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx);
void dcn20_split_stream_for_mpc(
struct resource_context *res_ctx,
@@ -159,7 +160,7 @@ enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state
enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream);
enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc, struct dc_state *dc_ctx, struct dc_stream_state *dc_stream);
enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream);
-enum dc_status dcn20_get_default_swizzle_mode(struct dc_plane_state *plane_state);
+enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_state);
void dcn20_patch_bounding_box(
struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
index 9b70a1e7b962..99a7ef6ab878 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
@@ -616,5 +616,6 @@ void dcn20_stream_encoder_construct(
enc1->regs = regs;
enc1->se_shift = se_shift;
enc1->se_mask = se_mask;
+ enc1->base.stream_enc_inst = eng_id - ENGINE_ID_DIGA;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h
index 02fafb013fc6..f1ef46e8da5b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h
@@ -34,13 +34,6 @@
#define BASE(seg) \
BASE_INNER(seg)
-#define SRI(reg_name, block, id)\
- .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
- mm ## block ## id ## _ ## reg_name
-
-#define SF(reg_name, field_name, post_fix)\
- .field_name = reg_name ## __ ## field_name ## post_fix
-
#define DCN20_VMID_REG_LIST(id)\
SRI(CNTL, DCN_VM_CONTEXT, id),\
SRI(PAGE_TABLE_BASE_ADDR_HI32, DCN_VM_CONTEXT, id),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
index f546260c15b7..5e2d14b897af 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
@@ -141,7 +141,7 @@ int hubbub21_init_dchub(struct hubbub *hubbub,
return NUM_VMID;
}
-void hubbub21_program_urgent_watermarks(
+bool hubbub21_program_urgent_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -149,6 +149,7 @@ void hubbub21_program_urgent_watermarks(
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
uint32_t prog_wm_value;
+ bool wm_pending = false;
/* Repeat for water mark set A, B, C and D. */
/* clock state A */
@@ -163,7 +164,8 @@ void hubbub21_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->a.urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->a.urgent_ns < hubbub1->watermarks.a.urgent_ns)
+ wm_pending = true;
/* determine the transfer time for a quantity of data for a particular requestor.*/
if (safe_to_lower || watermarks->a.frac_urg_bw_flip
@@ -172,7 +174,9 @@ void hubbub21_program_urgent_watermarks(
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, 0,
DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, watermarks->a.frac_urg_bw_flip);
- }
+ } else if (watermarks->a.frac_urg_bw_flip
+ < hubbub1->watermarks.a.frac_urg_bw_flip)
+ wm_pending = true;
if (safe_to_lower || watermarks->a.frac_urg_bw_nom
> hubbub1->watermarks.a.frac_urg_bw_nom) {
@@ -180,14 +184,18 @@ void hubbub21_program_urgent_watermarks(
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0,
DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->a.frac_urg_bw_nom);
- }
+ } else if (watermarks->a.frac_urg_bw_nom
+ < hubbub1->watermarks.a.frac_urg_bw_nom)
+ wm_pending = true;
+
if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub1->watermarks.a.urgent_latency_ns) {
hubbub1->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns;
prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns,
refclk_mhz, 0x1fffff);
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0,
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value);
- }
+ } else if (watermarks->a.urgent_latency_ns < hubbub1->watermarks.a.urgent_latency_ns)
+ wm_pending = true;
/* clock state B */
if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) {
@@ -201,7 +209,8 @@ void hubbub21_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->b.urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->b.urgent_ns < hubbub1->watermarks.b.urgent_ns)
+ wm_pending = true;
/* determine the transfer time for a quantity of data for a particular requestor.*/
if (safe_to_lower || watermarks->a.frac_urg_bw_flip
@@ -210,7 +219,9 @@ void hubbub21_program_urgent_watermarks(
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, 0,
DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, watermarks->a.frac_urg_bw_flip);
- }
+ } else if (watermarks->a.frac_urg_bw_flip
+ < hubbub1->watermarks.a.frac_urg_bw_flip)
+ wm_pending = true;
if (safe_to_lower || watermarks->a.frac_urg_bw_nom
> hubbub1->watermarks.a.frac_urg_bw_nom) {
@@ -218,7 +229,9 @@ void hubbub21_program_urgent_watermarks(
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, 0,
DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->a.frac_urg_bw_nom);
- }
+ } else if (watermarks->a.frac_urg_bw_nom
+ < hubbub1->watermarks.a.frac_urg_bw_nom)
+ wm_pending = true;
if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub1->watermarks.b.urgent_latency_ns) {
hubbub1->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns;
@@ -226,7 +239,8 @@ void hubbub21_program_urgent_watermarks(
refclk_mhz, 0x1fffff);
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0,
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value);
- }
+ } else if (watermarks->b.urgent_latency_ns < hubbub1->watermarks.b.urgent_latency_ns)
+ wm_pending = true;
/* clock state C */
if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) {
@@ -240,7 +254,8 @@ void hubbub21_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->c.urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->c.urgent_ns < hubbub1->watermarks.c.urgent_ns)
+ wm_pending = true;
/* determine the transfer time for a quantity of data for a particular requestor.*/
if (safe_to_lower || watermarks->a.frac_urg_bw_flip
@@ -249,7 +264,9 @@ void hubbub21_program_urgent_watermarks(
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, 0,
DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, watermarks->a.frac_urg_bw_flip);
- }
+ } else if (watermarks->a.frac_urg_bw_flip
+ < hubbub1->watermarks.a.frac_urg_bw_flip)
+ wm_pending = true;
if (safe_to_lower || watermarks->a.frac_urg_bw_nom
> hubbub1->watermarks.a.frac_urg_bw_nom) {
@@ -257,7 +274,9 @@ void hubbub21_program_urgent_watermarks(
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, 0,
DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, watermarks->a.frac_urg_bw_nom);
- }
+ } else if (watermarks->a.frac_urg_bw_nom
+ < hubbub1->watermarks.a.frac_urg_bw_nom)
+ wm_pending = true;
if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub1->watermarks.c.urgent_latency_ns) {
hubbub1->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns;
@@ -265,7 +284,8 @@ void hubbub21_program_urgent_watermarks(
refclk_mhz, 0x1fffff);
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0,
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value);
- }
+ } else if (watermarks->c.urgent_latency_ns < hubbub1->watermarks.c.urgent_latency_ns)
+ wm_pending = true;
/* clock state D */
if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) {
@@ -279,7 +299,8 @@ void hubbub21_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->d.urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->d.urgent_ns < hubbub1->watermarks.d.urgent_ns)
+ wm_pending = true;
/* determine the transfer time for a quantity of data for a particular requestor.*/
if (safe_to_lower || watermarks->a.frac_urg_bw_flip
@@ -288,7 +309,9 @@ void hubbub21_program_urgent_watermarks(
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, 0,
DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, watermarks->a.frac_urg_bw_flip);
- }
+ } else if (watermarks->a.frac_urg_bw_flip
+ < hubbub1->watermarks.a.frac_urg_bw_flip)
+ wm_pending = true;
if (safe_to_lower || watermarks->a.frac_urg_bw_nom
> hubbub1->watermarks.a.frac_urg_bw_nom) {
@@ -296,7 +319,9 @@ void hubbub21_program_urgent_watermarks(
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, 0,
DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, watermarks->a.frac_urg_bw_nom);
- }
+ } else if (watermarks->a.frac_urg_bw_nom
+ < hubbub1->watermarks.a.frac_urg_bw_nom)
+ wm_pending = true;
if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub1->watermarks.d.urgent_latency_ns) {
hubbub1->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns;
@@ -304,10 +329,13 @@ void hubbub21_program_urgent_watermarks(
refclk_mhz, 0x1fffff);
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0,
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value);
- }
+ } else if (watermarks->d.urgent_latency_ns < hubbub1->watermarks.d.urgent_latency_ns)
+ wm_pending = true;
+
+ return wm_pending;
}
-void hubbub21_program_stutter_watermarks(
+bool hubbub21_program_stutter_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -315,6 +343,7 @@ void hubbub21_program_stutter_watermarks(
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
uint32_t prog_wm_value;
+ bool wm_pending = false;
/* clock state A */
if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
@@ -330,7 +359,9 @@ void hubbub21_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
+ < hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
> hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) {
@@ -345,7 +376,9 @@ void hubbub21_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->a.cstate_pstate.cstate_exit_ns
+ < hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns)
+ wm_pending = true;
/* clock state B */
if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
@@ -361,7 +394,9 @@ void hubbub21_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
+ < hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
> hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) {
@@ -376,7 +411,9 @@ void hubbub21_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->b.cstate_pstate.cstate_exit_ns
+ < hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns)
+ wm_pending = true;
/* clock state C */
if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
@@ -392,7 +429,9 @@ void hubbub21_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
+ < hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
> hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) {
@@ -407,7 +446,9 @@ void hubbub21_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->c.cstate_pstate.cstate_exit_ns
+ < hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns)
+ wm_pending = true;
/* clock state D */
if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
@@ -423,7 +464,9 @@ void hubbub21_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
+ < hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
> hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) {
@@ -438,10 +481,14 @@ void hubbub21_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->d.cstate_pstate.cstate_exit_ns
+ < hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns)
+ wm_pending = true;
+
+ return wm_pending;
}
-void hubbub21_program_pstate_watermarks(
+bool hubbub21_program_pstate_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -450,6 +497,8 @@ void hubbub21_program_pstate_watermarks(
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
uint32_t prog_wm_value;
+ bool wm_pending = false;
+
/* clock state A */
if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
> hubbub1->watermarks.a.cstate_pstate.pstate_change_ns) {
@@ -464,7 +513,9 @@ void hubbub21_program_pstate_watermarks(
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
- }
+ } else if (watermarks->a.cstate_pstate.pstate_change_ns
+ < hubbub1->watermarks.a.cstate_pstate.pstate_change_ns)
+ wm_pending = true;
/* clock state B */
if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
@@ -480,7 +531,9 @@ void hubbub21_program_pstate_watermarks(
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
- }
+ } else if (watermarks->b.cstate_pstate.pstate_change_ns
+ < hubbub1->watermarks.b.cstate_pstate.pstate_change_ns)
+ wm_pending = false;
/* clock state C */
if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
@@ -496,7 +549,9 @@ void hubbub21_program_pstate_watermarks(
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
- }
+ } else if (watermarks->c.cstate_pstate.pstate_change_ns
+ < hubbub1->watermarks.c.cstate_pstate.pstate_change_ns)
+ wm_pending = true;
/* clock state D */
if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns
@@ -512,20 +567,30 @@ void hubbub21_program_pstate_watermarks(
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
- }
+ } else if (watermarks->d.cstate_pstate.pstate_change_ns
+ < hubbub1->watermarks.d.cstate_pstate.pstate_change_ns)
+ wm_pending = true;
+
+ return wm_pending;
}
-void hubbub21_program_watermarks(
+bool hubbub21_program_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+ bool wm_pending = false;
+
+ if (hubbub21_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
- hubbub21_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
- hubbub21_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
- hubbub21_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
+ if (hubbub21_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
+
+ if (hubbub21_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
/*
* The DCHub arbiter has a mechanism to dynamically rate limit the DCHub request stream to the fabric.
@@ -549,6 +614,8 @@ void hubbub21_program_watermarks(
DCHUBBUB_ARB_MAX_QOS_COMMIT_THRESHOLD, 0xF);
hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+
+ return wm_pending;
}
void hubbub21_wm_read_state(struct hubbub *hubbub,
@@ -635,6 +702,7 @@ static const struct hubbub_funcs hubbub21_funcs = {
.wm_read_state = hubbub21_wm_read_state,
.get_dchub_ref_freq = hubbub2_get_dchub_ref_freq,
.program_watermarks = hubbub21_program_watermarks,
+ .allow_self_refresh_control = hubbub1_allow_self_refresh_control,
.apply_DEDCN21_147_wa = hubbub21_apply_DEDCN21_147_wa,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h
index c4840dfb1fa5..ef3ef28509ed 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h
@@ -113,22 +113,22 @@
void dcn21_dchvm_init(struct hubbub *hubbub);
int hubbub21_init_dchub(struct hubbub *hubbub,
struct dcn_hubbub_phys_addr_config *pa_config);
-void hubbub21_program_watermarks(
+bool hubbub21_program_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
-void hubbub21_program_urgent_watermarks(
+bool hubbub21_program_urgent_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
-void hubbub21_program_stutter_watermarks(
+bool hubbub21_program_stutter_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
-void hubbub21_program_pstate_watermarks(
+bool hubbub21_program_pstate_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
index cf09b9335728..d285ba622d61 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
@@ -79,32 +79,47 @@ void apply_DEDCN21_142_wa_for_hostvm_deadline(
struct _vcs_dpi_display_dlg_regs_st *dlg_attr)
{
struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
- uint32_t cur_value;
+ uint32_t refcyc_per_vm_group_vblank;
+ uint32_t refcyc_per_vm_req_vblank;
+ uint32_t refcyc_per_vm_group_flip;
+ uint32_t refcyc_per_vm_req_flip;
+ const uint32_t uninitialized_hw_default = 0;
- REG_GET(VBLANK_PARAMETERS_5, REFCYC_PER_VM_GROUP_VBLANK, &cur_value);
- if (cur_value > dlg_attr->refcyc_per_vm_group_vblank)
+ REG_GET(VBLANK_PARAMETERS_5,
+ REFCYC_PER_VM_GROUP_VBLANK, &refcyc_per_vm_group_vblank);
+
+ if (refcyc_per_vm_group_vblank == uninitialized_hw_default ||
+ refcyc_per_vm_group_vblank > dlg_attr->refcyc_per_vm_group_vblank)
REG_SET(VBLANK_PARAMETERS_5, 0,
REFCYC_PER_VM_GROUP_VBLANK, dlg_attr->refcyc_per_vm_group_vblank);
REG_GET(VBLANK_PARAMETERS_6,
- REFCYC_PER_VM_REQ_VBLANK,
- &cur_value);
- if (cur_value > dlg_attr->refcyc_per_vm_req_vblank)
+ REFCYC_PER_VM_REQ_VBLANK, &refcyc_per_vm_req_vblank);
+
+ if (refcyc_per_vm_req_vblank == uninitialized_hw_default ||
+ refcyc_per_vm_req_vblank > dlg_attr->refcyc_per_vm_req_vblank)
REG_SET(VBLANK_PARAMETERS_6, 0,
REFCYC_PER_VM_REQ_VBLANK, dlg_attr->refcyc_per_vm_req_vblank);
- REG_GET(FLIP_PARAMETERS_3, REFCYC_PER_VM_GROUP_FLIP, &cur_value);
- if (cur_value > dlg_attr->refcyc_per_vm_group_flip)
+ REG_GET(FLIP_PARAMETERS_3,
+ REFCYC_PER_VM_GROUP_FLIP, &refcyc_per_vm_group_flip);
+
+ if (refcyc_per_vm_group_flip == uninitialized_hw_default ||
+ refcyc_per_vm_group_flip > dlg_attr->refcyc_per_vm_group_flip)
REG_SET(FLIP_PARAMETERS_3, 0,
REFCYC_PER_VM_GROUP_FLIP, dlg_attr->refcyc_per_vm_group_flip);
- REG_GET(FLIP_PARAMETERS_4, REFCYC_PER_VM_REQ_FLIP, &cur_value);
- if (cur_value > dlg_attr->refcyc_per_vm_req_flip)
+ REG_GET(FLIP_PARAMETERS_4,
+ REFCYC_PER_VM_REQ_FLIP, &refcyc_per_vm_req_flip);
+
+ if (refcyc_per_vm_req_flip == uninitialized_hw_default ||
+ refcyc_per_vm_req_flip > dlg_attr->refcyc_per_vm_req_flip)
REG_SET(FLIP_PARAMETERS_4, 0,
REFCYC_PER_VM_REQ_FLIP, dlg_attr->refcyc_per_vm_req_flip);
REG_SET(FLIP_PARAMETERS_5, 0,
REFCYC_PER_PTE_GROUP_FLIP_C, dlg_attr->refcyc_per_pte_group_flip_c);
+
REG_SET(FLIP_PARAMETERS_6, 0,
REFCYC_PER_META_CHUNK_FLIP_C, dlg_attr->refcyc_per_meta_chunk_flip_c);
}
@@ -325,13 +340,9 @@ void hubp21_set_vm_system_aperture_settings(struct hubp *hubp,
{
struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
- PHYSICAL_ADDRESS_LOC mc_vm_apt_default;
PHYSICAL_ADDRESS_LOC mc_vm_apt_low;
PHYSICAL_ADDRESS_LOC mc_vm_apt_high;
- // The format of default addr is 48:12 of the 48 bit addr
- mc_vm_apt_default.quad_part = apt->sys_default.quad_part >> 12;
-
// The format of high/low are 48:18 of the 48 bit addr
mc_vm_apt_low.quad_part = apt->sys_low.quad_part >> 18;
mc_vm_apt_high.quad_part = apt->sys_high.quad_part >> 18;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
index 081ad8e43d58..ada65b1a7eb1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
@@ -112,3 +112,25 @@ void dcn21_optimize_pwr_state(
true);
}
+/* If user hotplug a HDMI monitor while in monitor off,
+ * OS will do a mode set (with output timing) but keep output off.
+ * In this case DAL will ask vbios to power up the pll in the PHY.
+ * If user unplug the monitor (while we are on monitor off) or
+ * system attempt to enter modern standby (which we will disable PLL),
+ * PHY will hang on the next mode set attempt.
+ * if enable PLL follow by disable PLL (without executing lane enable/disable),
+ * RDPCS_PHY_DP_MPLLB_STATE remains 1,
+ * which indicate that PLL disable attempt actually didn�t go through.
+ * As a workaround, insert PHY lane enable/disable before PLL disable.
+ */
+void dcn21_PLAT_58856_wa(struct dc_state *context, struct pipe_ctx *pipe_ctx)
+{
+ if (!pipe_ctx->stream->dpms_off)
+ return;
+
+ pipe_ctx->stream->dpms_off = false;
+ core_link_enable_stream(context, pipe_ctx);
+ core_link_disable_stream(pipe_ctx);
+ pipe_ctx->stream->dpms_off = true;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h
index 182736096123..26bf24d3b59f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h
@@ -44,4 +44,7 @@ void dcn21_optimize_pwr_state(
const struct dc *dc,
struct dc_state *context);
+void dcn21_PLAT_58856_wa(struct dc_state *context,
+ struct pipe_ctx *pipe_ctx);
+
#endif /* __DC_HWSS_DCN21_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
index 4861aa5c59ae..b9ff9767e08f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
@@ -34,6 +34,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = NULL,
.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
.update_plane_addr = dcn20_update_plane_addr,
.update_dchub = dcn10_update_dchub,
.update_pending_status = dcn10_update_pending_status,
@@ -51,7 +52,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
.disable_audio_stream = dce110_disable_audio_stream,
.disable_plane = dcn20_disable_plane,
.pipe_control_lock = dcn20_pipe_control_lock,
- .pipe_control_lock_global = dcn20_pipe_control_lock_global,
+ .interdependent_update_lock = dcn10_lock_all_pipes,
.prepare_bandwidth = dcn20_prepare_bandwidth,
.optimize_bandwidth = dcn20_optimize_bandwidth,
.update_bandwidth = dcn20_update_bandwidth,
@@ -104,6 +105,8 @@ static const struct hwseq_private_funcs dcn21_private_funcs = {
.reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap,
.enable_stream_timing = dcn20_enable_stream_timing,
.edp_backlight_control = dce110_edp_backlight_control,
+ .is_panel_backlight_on = dce110_is_panel_backlight_on,
+ .is_panel_powered_on = dce110_is_panel_powered_on,
.disable_stream_gating = dcn20_disable_stream_gating,
.enable_stream_gating = dcn20_enable_stream_gating,
.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
@@ -116,7 +119,6 @@ static const struct hwseq_private_funcs dcn21_private_funcs = {
.enable_power_gating_plane = dcn20_enable_power_gating_plane,
.dpp_pg_control = dcn20_dpp_pg_control,
.hubp_pg_control = dcn20_hubp_pg_control,
- .dsc_pg_control = NULL,
.update_odm = dcn20_update_odm,
.dsc_pg_control = dcn20_dsc_pg_control,
.get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color,
@@ -128,6 +130,7 @@ static const struct hwseq_private_funcs dcn21_private_funcs = {
.dccg_init = dcn20_dccg_init,
.set_blend_lut = dcn20_set_blend_lut,
.set_shaper_3dlut = dcn20_set_shaper_3dlut,
+ .PLAT_58856_wa = dcn21_PLAT_58856_wa,
};
void dcn21_hw_sequencer_construct(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index 0d506d30d6b6..51b5910cd05f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -60,6 +60,7 @@
#include "dcn20/dcn20_dccg.h"
#include "dcn21_hubbub.h"
#include "dcn10/dcn10_resource.h"
+#include "dce110/dce110_resource.h"
#include "dcn20/dcn20_dwb.h"
#include "dcn20/dcn20_mmhubbub.h"
@@ -83,7 +84,7 @@
#include "dcn21_resource.h"
#include "vm_helper.h"
#include "dcn20/dcn20_vmid.h"
-#include "../dce/dmub_psr.h"
+#include "dce/dmub_psr.h"
#define SOC_BOUNDING_BOX_VALID false
#define DC_LOGGER_INIT(logger)
@@ -155,17 +156,18 @@ struct _vcs_dpi_ip_params_st dcn2_1_ip = {
.xfc_supported = false,
.xfc_fill_bw_overhead_percent = 10.0,
.xfc_fill_constant_bytes = 0,
- .ptoi_supported = 0
+ .ptoi_supported = 0,
+ .number_of_cursors = 1,
};
struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
.clock_limits = {
{
.state = 0,
- .dcfclk_mhz = 304.0,
- .fabricclk_mhz = 600.0,
- .dispclk_mhz = 618.0,
- .dppclk_mhz = 440.0,
+ .dcfclk_mhz = 400.0,
+ .fabricclk_mhz = 400.0,
+ .dispclk_mhz = 600.0,
+ .dppclk_mhz = 400.00,
.phyclk_mhz = 600.0,
.socclk_mhz = 278.0,
.dscclk_mhz = 205.67,
@@ -173,10 +175,10 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
},
{
.state = 1,
- .dcfclk_mhz = 304.0,
- .fabricclk_mhz = 600.0,
- .dispclk_mhz = 618.0,
- .dppclk_mhz = 618.0,
+ .dcfclk_mhz = 464.52,
+ .fabricclk_mhz = 800.0,
+ .dispclk_mhz = 654.55,
+ .dppclk_mhz = 626.09,
.phyclk_mhz = 600.0,
.socclk_mhz = 278.0,
.dscclk_mhz = 205.67,
@@ -184,32 +186,65 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
},
{
.state = 2,
- .dcfclk_mhz = 608.0,
- .fabricclk_mhz = 1066.0,
- .dispclk_mhz = 888.0,
- .dppclk_mhz = 888.0,
- .phyclk_mhz = 810.0,
+ .dcfclk_mhz = 514.29,
+ .fabricclk_mhz = 933.0,
+ .dispclk_mhz = 757.89,
+ .dppclk_mhz = 685.71,
+ .phyclk_mhz = 600.0,
.socclk_mhz = 278.0,
.dscclk_mhz = 287.67,
- .dram_speed_mts = 2133.0,
+ .dram_speed_mts = 1866.0,
},
{
.state = 3,
- .dcfclk_mhz = 676.0,
- .fabricclk_mhz = 1600.0,
- .dispclk_mhz = 1015.0,
- .dppclk_mhz = 1015.0,
- .phyclk_mhz = 810.0,
+ .dcfclk_mhz = 576.00,
+ .fabricclk_mhz = 1067.0,
+ .dispclk_mhz = 847.06,
+ .dppclk_mhz = 757.89,
+ .phyclk_mhz = 600.0,
.socclk_mhz = 715.0,
.dscclk_mhz = 318.334,
- .dram_speed_mts = 4266.0,
+ .dram_speed_mts = 2134.0,
},
{
.state = 4,
- .dcfclk_mhz = 810.0,
+ .dcfclk_mhz = 626.09,
+ .fabricclk_mhz = 1200.0,
+ .dispclk_mhz = 900.00,
+ .dppclk_mhz = 847.06,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 953.0,
+ .dscclk_mhz = 489.0,
+ .dram_speed_mts = 2400.0,
+ },
+ {
+ .state = 5,
+ .dcfclk_mhz = 685.71,
+ .fabricclk_mhz = 1333.0,
+ .dispclk_mhz = 1028.57,
+ .dppclk_mhz = 960.00,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 278.0,
+ .dscclk_mhz = 287.67,
+ .dram_speed_mts = 2666.0,
+ },
+ {
+ .state = 6,
+ .dcfclk_mhz = 757.89,
+ .fabricclk_mhz = 1467.0,
+ .dispclk_mhz = 1107.69,
+ .dppclk_mhz = 1028.57,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 715.0,
+ .dscclk_mhz = 318.334,
+ .dram_speed_mts = 3200.0,
+ },
+ {
+ .state = 7,
+ .dcfclk_mhz = 847.06,
.fabricclk_mhz = 1600.0,
.dispclk_mhz = 1395.0,
- .dppclk_mhz = 1285.0,
+ .dppclk_mhz = 1285.00,
.phyclk_mhz = 1325.0,
.socclk_mhz = 953.0,
.dscclk_mhz = 489.0,
@@ -217,8 +252,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
},
/*Extra state, no dispclk ramping*/
{
- .state = 5,
- .dcfclk_mhz = 810.0,
+ .state = 8,
+ .dcfclk_mhz = 847.06,
.fabricclk_mhz = 1600.0,
.dispclk_mhz = 1395.0,
.dppclk_mhz = 1285.0,
@@ -265,7 +300,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
.xfc_bus_transport_time_us = 4,
.xfc_xbuf_latency_tolerance_us = 4,
.use_urgent_burst_bw = 1,
- .num_states = 5
+ .num_states = 9
};
#ifndef MAX
@@ -820,11 +855,12 @@ static const struct dc_plane_cap plane_cap = {
};
static const struct dc_debug_options debug_defaults_drv = {
- .disable_dmcu = true,
+ .disable_dmcu = false,
.force_abm_enable = false,
.timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = true,
+ .min_disp_clk_khz = 100000,
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
@@ -840,7 +876,7 @@ static const struct dc_debug_options debug_defaults_drv = {
};
static const struct dc_debug_options debug_defaults_diags = {
- .disable_dmcu = true,
+ .disable_dmcu = false,
.force_abm_enable = false,
.timing_trace = true,
.clock_trace = true,
@@ -856,6 +892,7 @@ static const struct dc_debug_options debug_defaults_diags = {
enum dcn20_clk_src_array_id {
DCN20_CLK_SRC_PLL0,
DCN20_CLK_SRC_PLL1,
+ DCN20_CLK_SRC_PLL2,
DCN20_CLK_SRC_TOTAL_DCN21
};
@@ -960,6 +997,9 @@ static void dcn21_resource_destruct(struct dcn21_resource_pool *pool)
if (pool->base.dmcu != NULL)
dce_dmcu_destroy(&pool->base.dmcu);
+ if (pool->base.psr != NULL)
+ dmub_psr_destroy(&pool->base.psr);
+
if (pool->base.dccg != NULL)
dcn_dccg_destroy(&pool->base.dccg);
@@ -1333,26 +1373,78 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
{
struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool);
struct clk_limit_table *clk_table = &bw_params->clk_table;
- int i;
+ unsigned int i, j, k;
+ int closest_clk_lvl;
+
+ // diags does not retrieve proper values from SMU
+ // cap states to 5 and make state 5 the max state
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) || IS_DIAG_DC(dc->ctx->dce_environment)) {
+ dcn2_1_soc.num_states = 5;
+
+ dcn2_1_soc.clock_limits[5].state = 5;
+ dcn2_1_soc.clock_limits[5].dcfclk_mhz = 810.0;
+ dcn2_1_soc.clock_limits[5].fabricclk_mhz = 1600.0;
+ dcn2_1_soc.clock_limits[5].dispclk_mhz = 1395.0;
+ dcn2_1_soc.clock_limits[5].dppclk_mhz = 1285.0;
+ dcn2_1_soc.clock_limits[5].phyclk_mhz = 1325.0;
+ dcn2_1_soc.clock_limits[5].socclk_mhz = 953.0;
+ dcn2_1_soc.clock_limits[5].dscclk_mhz = 489.0;
+ dcn2_1_soc.clock_limits[5].dram_speed_mts = 4266.0;
+ } else {
+ dcn2_1_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
+ dcn2_1_ip.max_num_dpp = pool->base.pipe_count;
+ dcn2_1_soc.num_chans = bw_params->num_channels;
+
+ /* Vmin: leave lowest DCN clocks, override with dcfclk, fclk, memclk from fuse */
+ dcn2_1_soc.clock_limits[0].state = 0;
+ dcn2_1_soc.clock_limits[0].dcfclk_mhz = clk_table->entries[0].dcfclk_mhz;
+ dcn2_1_soc.clock_limits[0].fabricclk_mhz = clk_table->entries[0].fclk_mhz;
+ dcn2_1_soc.clock_limits[0].socclk_mhz = clk_table->entries[0].socclk_mhz;
+ dcn2_1_soc.clock_limits[0].dram_speed_mts = clk_table->entries[0].memclk_mhz * 2;
+
+ /*
+ * Other levels: find cloest DCN clocks that fit the given clock limit using dcfclk
+ * as indicater
+ */
- dcn2_1_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
- dcn2_1_ip.max_num_dpp = pool->base.pipe_count;
- dcn2_1_soc.num_chans = bw_params->num_channels;
+ closest_clk_lvl = -1;
+ /* index currently being filled */
+ k = 1;
+ for (i = 1; i < clk_table->num_entries; i++) {
+ /* loop backwards, skip duplicate state, +1 because SMU has precision issue */
+ for (j = dcn2_1_soc.num_states - 2; j >= k; j--) {
+ if ((unsigned int) dcn2_1_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
+ closest_clk_lvl = j;
+ break;
+ }
+ }
- for (i = 0; i < clk_table->num_entries; i++) {
+ /* if found a lvl that fits, use the DCN clks from it, if not, go to next clk limit*/
+ if (closest_clk_lvl != -1) {
+ dcn2_1_soc.clock_limits[k].state = i;
+ dcn2_1_soc.clock_limits[k].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+ dcn2_1_soc.clock_limits[k].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
+ dcn2_1_soc.clock_limits[k].socclk_mhz = clk_table->entries[i].socclk_mhz;
+ dcn2_1_soc.clock_limits[k].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
+
+ dcn2_1_soc.clock_limits[k].dispclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+ dcn2_1_soc.clock_limits[k].dppclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+ dcn2_1_soc.clock_limits[k].dram_bw_per_chan_gbps = dcn2_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+ dcn2_1_soc.clock_limits[k].dscclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+ dcn2_1_soc.clock_limits[k].dtbclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+ dcn2_1_soc.clock_limits[k].phyclk_d18_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+ dcn2_1_soc.clock_limits[k].phyclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+ k++;
+ }
+ }
- dcn2_1_soc.clock_limits[i].state = i;
- dcn2_1_soc.clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
- dcn2_1_soc.clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
- dcn2_1_soc.clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
- dcn2_1_soc.clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
+ /* duplicate last level */
+ dcn2_1_soc.clock_limits[k] = dcn2_1_soc.clock_limits[k - 1];
+ dcn2_1_soc.clock_limits[k].state = k;
+ dcn2_1_soc.num_states = k + 1;
}
- dcn2_1_soc.clock_limits[i] = dcn2_1_soc.clock_limits[i - 1];
- dcn2_1_soc.num_states = i;
- // diags does not retrieve proper values from SMU, do not update DML instance for diags
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) && !IS_DIAG_DC(dc->ctx->dce_environment))
- dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21);
+ dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21);
}
/* Temporary Place holder until we can get them from fuse */
@@ -1474,6 +1566,7 @@ static struct dce_hwseq *dcn21_hwseq_create(
hws->shifts = &hwseq_shift;
hws->masks = &hwseq_mask;
hws->wa.DEGVIDCN21 = true;
+ hws->wa.disallow_self_refresh_during_multi_plane_transition = true;
}
return hws;
}
@@ -1497,6 +1590,7 @@ static const struct encoder_feature_support link_enc_feature = {
.max_hdmi_pixel_clock = 600000,
.hdmi_ycbcr420_supported = true,
.dp_ycbcr420_supported = true,
+ .fec_supported = true,
.flags.bits.IS_HBR2_CAPABLE = true,
.flags.bits.IS_HBR3_CAPABLE = true,
.flags.bits.IS_TPS3_CAPABLE = true,
@@ -1637,6 +1731,19 @@ static int dcn21_populate_dml_pipes_from_context(
return pipe_cnt;
}
+enum dc_status dcn21_patch_unknown_plane_state(struct dc_plane_state *plane_state)
+{
+ enum dc_status result = DC_OK;
+
+ if (plane_state->ctx->dc->debug.disable_dcc == DCC_ENABLE) {
+ plane_state->dcc.enable = 1;
+ /* align to our worst case block width */
+ plane_state->dcc.meta_pitch = ((plane_state->src_rect.width + 1023) / 1024) * 1024;
+ }
+ result = dcn20_patch_unknown_plane_state(plane_state);
+ return result;
+}
+
static struct resource_funcs dcn21_res_pool_funcs = {
.destroy = dcn21_destroy_resource_pool,
.link_enc_create = dcn21_link_encoder_create,
@@ -1646,7 +1753,7 @@ static struct resource_funcs dcn21_res_pool_funcs = {
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
.populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context,
- .get_default_swizzle_mode = dcn20_get_default_swizzle_mode,
+ .patch_unknown_plane_state = dcn21_patch_unknown_plane_state,
.set_mcif_arb_params = dcn20_set_mcif_arb_params,
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
.update_bw_bounding_box = update_bw_bounding_box
@@ -1693,6 +1800,7 @@ static bool dcn21_resource_construct(
dc->caps.force_dp_tps4_for_cp2520 = true;
dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true;
+ dc->caps.is_apu = true;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
@@ -1718,6 +1826,10 @@ static bool dcn21_resource_construct(
dcn21_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL1,
&clk_src_regs[1], false);
+ pool->base.clock_sources[DCN20_CLK_SRC_PLL2] =
+ dcn21_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL2,
+ &clk_src_regs[2], false);
pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL_DCN21;
@@ -1752,9 +1864,15 @@ static bool dcn21_resource_construct(
goto create_fail;
}
- // Leave as NULL to not affect current dmcu psr programming sequence
- // Will be uncommented when functionality is confirmed to be working
- pool->base.psr = NULL;
+ if (dc->debug.disable_dmcu) {
+ pool->base.psr = dmub_psr_create(ctx);
+
+ if (pool->base.psr == NULL) {
+ dm_error("DC: failed to create psr obj!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+ }
pool->base.abm = dce_abm_create(ctx,
&abm_regs,
diff --git a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
index 626d22d437f4..968c46dfb506 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
@@ -32,6 +32,7 @@ struct cp_psp_stream_config {
uint8_t otg_inst;
uint8_t link_enc_inst;
uint8_t stream_enc_inst;
+ uint8_t mst_supported;
void *dm_stream_ctx;
bool dpms_off;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
index 485a9c62ec58..5bbbafacc720 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
@@ -2614,6 +2614,14 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
if (mode_lib->vba.DRAMClockChangeSupportsVActive &&
mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) {
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
+ if (mode_lib->vba.DRAMClockChangeWatermark >
+ dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark))
+ mode_lib->vba.MinTTUVBlank[k] += 25;
+ }
+ }
mode_lib->vba.DRAMClockChangeWatermark += 25;
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
} else if (mode_lib->vba.DummyPStateCheck &&
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index 658f81e757e9..dfd3be452766 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -25,7 +25,7 @@
#ifndef __DISPLAY_MODE_STRUCTS_H__
#define __DISPLAY_MODE_STRUCTS_H__
-#define MAX_CLOCK_LIMIT_STATES 8
+#define MAX_CLOCK_LIMIT_STATES 9
typedef struct _vcs_dpi_voltage_scaling_st voltage_scaling_st;
typedef struct _vcs_dpi_soc_bounding_box_st soc_bounding_box_st;
@@ -61,12 +61,15 @@ struct _vcs_dpi_voltage_scaling_st {
double dram_speed_mts;
double fabricclk_mhz;
double dispclk_mhz;
+ double dram_bw_per_chan_gbps;
double phyclk_mhz;
double dppclk_mhz;
double dtbclk_mhz;
};
struct _vcs_dpi_soc_bounding_box_st {
+ struct _vcs_dpi_voltage_scaling_st clock_limits[MAX_CLOCK_LIMIT_STATES];
+ unsigned int num_states;
double sr_exit_time_us;
double sr_enter_plus_exit_time_us;
double urgent_latency_us;
@@ -109,8 +112,7 @@ struct _vcs_dpi_soc_bounding_box_st {
double xfc_bus_transport_time_us;
double xfc_xbuf_latency_tolerance_us;
int use_urgent_burst_bw;
- unsigned int num_states;
- struct _vcs_dpi_voltage_scaling_st clock_limits[MAX_CLOCK_LIMIT_STATES];
+ double min_dcfclk;
bool do_urgent_latency_adjustment;
double urgent_latency_adjustment_fabric_clock_component_us;
double urgent_latency_adjustment_fabric_clock_reference_mhz;
@@ -189,7 +191,7 @@ struct _vcs_dpi_ip_params_st {
unsigned int min_vblank_lines;
unsigned int dppclk_delay_subtotal;
unsigned int dispclk_delay_subtotal;
- unsigned int dcfclk_cstate_latency;
+ double dcfclk_cstate_latency;
unsigned int dppclk_delay_scl;
unsigned int dppclk_delay_scl_lb_only;
unsigned int dppclk_delay_cnvc_formatter;
@@ -202,6 +204,7 @@ struct _vcs_dpi_ip_params_st {
unsigned int LineBufferFixedBpp;
unsigned int can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one;
unsigned int bug_forcing_LC_req_same_size_fixed;
+ unsigned int number_of_cursors;
};
struct _vcs_dpi_display_xfc_params_st {
@@ -325,7 +328,6 @@ struct _vcs_dpi_display_pipe_dest_params_st {
unsigned int vupdate_width;
unsigned int vready_offset;
unsigned char interlaced;
- unsigned char embedded;
double pixel_rate_mhz;
unsigned char synchronized_vblank_all_planes;
unsigned char otg_inst;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
index b3c96d9b472f..6b525c52124c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
@@ -266,8 +266,6 @@ static void fetch_socbb_params(struct display_mode_lib *mode_lib)
mode_lib->vba.MaxDispclk[i] = soc->clock_limits[i].dispclk_mhz;
mode_lib->vba.DTBCLKPerState[i] = soc->clock_limits[i].dtbclk_mhz;
}
- mode_lib->vba.MinVoltageLevel = 0;
- mode_lib->vba.MaxVoltageLevel = mode_lib->vba.soc.num_states;
mode_lib->vba.DoUrgentLatencyAdjustment =
soc->do_urgent_latency_adjustment;
@@ -379,7 +377,6 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
mode_lib->vba.pipe_plane[j] = mode_lib->vba.NumberOfActivePlanes;
- mode_lib->vba.EmbeddedPanel[mode_lib->vba.NumberOfActivePlanes] = dst->embedded;
mode_lib->vba.DPPPerPlane[mode_lib->vba.NumberOfActivePlanes] = 1;
mode_lib->vba.SourceScan[mode_lib->vba.NumberOfActivePlanes] =
(enum scan_direction_class) (src->source_scan);
@@ -396,11 +393,11 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
mode_lib->vba.ViewportYStartC[mode_lib->vba.NumberOfActivePlanes] =
src->viewport_y_c;
mode_lib->vba.PitchY[mode_lib->vba.NumberOfActivePlanes] = src->data_pitch;
- mode_lib->vba.SurfaceHeightY[mode_lib->vba.NumberOfActivePlanes] = src->viewport_height;
- mode_lib->vba.SurfaceWidthY[mode_lib->vba.NumberOfActivePlanes] = src->viewport_width;
+ mode_lib->vba.SurfaceWidthY[mode_lib->vba.NumberOfActivePlanes] = src->surface_width_y;
+ mode_lib->vba.SurfaceHeightY[mode_lib->vba.NumberOfActivePlanes] = src->surface_height_y;
mode_lib->vba.PitchC[mode_lib->vba.NumberOfActivePlanes] = src->data_pitch_c;
- mode_lib->vba.SurfaceHeightC[mode_lib->vba.NumberOfActivePlanes] = src->viewport_height_c;
- mode_lib->vba.SurfaceWidthC[mode_lib->vba.NumberOfActivePlanes] = src->viewport_width_c;
+ mode_lib->vba.SurfaceHeightC[mode_lib->vba.NumberOfActivePlanes] = src->surface_height_c;
+ mode_lib->vba.SurfaceWidthC[mode_lib->vba.NumberOfActivePlanes] = src->surface_width_c;
mode_lib->vba.DCCMetaPitchY[mode_lib->vba.NumberOfActivePlanes] = src->meta_pitch;
mode_lib->vba.DCCMetaPitchC[mode_lib->vba.NumberOfActivePlanes] = src->meta_pitch_c;
mode_lib->vba.HRatio[mode_lib->vba.NumberOfActivePlanes] = scl->hscl_ratio;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
index 2875efd85467..5d82fc5a7ed7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
@@ -389,7 +389,6 @@ struct vba_vars_st {
/* vba mode support */
/*inputs*/
- bool EmbeddedPanel[DC__NUM_DPP__MAX];
bool SupportGFX7CompatibleTilingIn32bppAnd64bpp;
double MaxHSCLRatio;
double MaxVSCLRatio;
@@ -842,8 +841,6 @@ struct vba_vars_st {
double DCCRateChroma[DC__NUM_DPP__MAX];
double PHYCLKD18PerState[DC__VOLTAGE_STATES + 1];
- int MinVoltageLevel;
- int MaxVoltageLevel;
bool WritebackSupportInterleaveAndUsingWholeBufferForASingleStream;
bool NumberOfHDMIFRLSupport;
@@ -880,7 +877,6 @@ struct vba_vars_st {
double TotalMetaRowBandwidth[DC__VOLTAGE_STATES + 1][2];
double TotalVActiveCursorBandwidth[DC__VOLTAGE_STATES + 1][2];
double TotalVActivePixelBandwidth[DC__VOLTAGE_STATES + 1][2];
- bool UseMinimumRequiredDCFCLK;
double WritebackDelayTime[DC__NUM_DPP__MAX];
unsigned int DCCYIndependentBlock[DC__NUM_DPP__MAX];
unsigned int DCCCIndependentBlock[DC__NUM_DPP__MAX];
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
index d2d36d48caaa..f252af1947c3 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
@@ -47,9 +47,9 @@
#include "dce120/hw_factory_dce120.h"
#if defined(CONFIG_DRM_AMD_DC_DCN)
#include "dcn10/hw_factory_dcn10.h"
-#endif
#include "dcn20/hw_factory_dcn20.h"
#include "dcn21/hw_factory_dcn21.h"
+#endif
#include "diagnostics/hw_factory_diag.h"
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
index 5d396657a1ee..04e2c0f74cb0 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
@@ -45,9 +45,9 @@
#include "dce120/hw_translate_dce120.h"
#if defined(CONFIG_DRM_AMD_DC_DCN)
#include "dcn10/hw_translate_dcn10.h"
-#endif
#include "dcn20/hw_translate_dcn20.h"
#include "dcn21/hw_translate_dcn21.h"
+#endif
#include "diagnostics/hw_translate_diag.h"
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index f285b76888fb..d523fc9547e7 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -124,7 +124,7 @@ struct resource_funcs {
struct dc *dc,
struct dc_state *new_ctx,
struct dc_stream_state *stream);
- enum dc_status (*get_default_swizzle_mode)(
+ enum dc_status (*patch_unknown_plane_state)(
struct dc_plane_state *plane_state);
struct stream_encoder *(*find_first_free_match_stream_enc_for_link)(
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index 8b1f0ce6c2a7..e94e5fbf2aa2 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -78,6 +78,8 @@ void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode);
bool dp_overwrite_extended_receiver_cap(struct dc_link *link);
+void dpcd_set_source_specific_data(struct dc_link *link);
+
void dp_set_fec_ready(struct dc_link *link, bool ready);
void dp_set_fec_enable(struct dc_link *link, bool enable);
bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index ac530c057ddd..ce65678c03b2 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -27,6 +27,7 @@
#define __DAL_CLK_MGR_H__
#include "dc.h"
+#include "dm_pp_smu.h"
#define DCN_MINIMUM_DISPCLK_Khz 100000
#define DCN_MINIMUM_DPPCLK_Khz 100000
@@ -193,6 +194,7 @@ struct clk_mgr {
int dentist_vco_freq_khz;
struct clk_state_registers_and_bypass boot_snapshot;
struct clk_bw_params *bw_params;
+ struct pp_smu_wm_range_sets ranges;
};
/* forward declarations */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index 862952c0286a..9311d0de377f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -296,6 +296,10 @@ int clk_mgr_helper_get_active_display_cnt(
struct dc *dc,
struct dc_state *context);
+int clk_mgr_helper_get_active_plane_cnt(
+ struct dc *dc,
+ struct dc_state *context);
+
#endif //__DAL_CLK_MGR_INTERNAL_H__
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
index 05ee5295d2c1..336c80a18175 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
@@ -27,11 +27,12 @@
#define __DAL_DCCG_H__
#include "dc_types.h"
+#include "hw_shared.h"
struct dccg {
struct dc_context *ctx;
const struct dccg_funcs *funcs;
-
+ int pipe_dppclk_khz[MAX_PIPES];
int ref_dppclk;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index c0dc1d0f5cae..f5dd0cc73c63 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -134,7 +134,7 @@ struct hubbub_funcs {
unsigned int dccg_ref_freq_inKhz,
unsigned int *dchub_ref_freq_inKhz);
- void (*program_watermarks)(
+ bool (*program_watermarks)(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
index c59740084ebc..7c2a3328b208 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
@@ -39,6 +39,7 @@ struct dsc_config {
uint32_t pic_height;
enum dc_pixel_encoding pixel_encoding;
enum dc_color_depth color_depth; /* Bits per component */
+ bool is_odm;
struct dc_dsc_config dc_dsc_cfg;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
index 459f95f52486..f30ab4916242 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
@@ -25,16 +25,15 @@
#ifndef __DC_DWBC_H__
#define __DC_DWBC_H__
+#include "dal_types.h"
#include "dc_hw_types.h"
-
#define DWB_SW_V2 1
#define DWB_MCIF_BUF_COUNT 4
/* forward declaration of mcif_wb struct */
struct mcif_wb;
-enum dce_version;
enum dwb_sw_version {
dwb_ver_1_0 = 1,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index fb748f082c56..c2b392a533b1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -68,6 +68,7 @@ struct encoder_feature_support {
unsigned int max_hdmi_pixel_clock;
bool hdmi_ycbcr420_supported;
bool dp_ycbcr420_supported;
+ bool fec_supported;
};
union dpcd_psr_configuration {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
index 7575564b2265..2717352eb697 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
@@ -310,7 +310,8 @@ struct opp_funcs {
enum dc_color_depth color_depth,
const struct tg_color *solid_color,
int width,
- int height);
+ int height,
+ int offset);
bool (*dpg_is_blanked)(
struct output_pixel_processor *opp);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index 351b387ad606..ac6523c0828e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -103,6 +103,7 @@ struct stream_encoder {
struct dc_context *ctx;
struct dc_bios *bp;
enum engine_id id;
+ uint32_t stream_enc_inst;
};
struct enc_state {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 209118f9f193..d4c1fb242c63 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -66,6 +66,8 @@ struct hw_sequencer_funcs {
int num_planes, struct dc_state *context);
void (*program_front_end_for_ctx)(struct dc *dc,
struct dc_state *context);
+ void (*post_unlock_program_front_end)(struct dc *dc,
+ struct dc_state *context);
void (*update_plane_addr)(const struct dc *dc,
struct pipe_ctx *pipe_ctx);
void (*update_dchub)(struct dce_hwseq *hws,
@@ -78,10 +80,10 @@ struct hw_sequencer_funcs {
void (*update_pending_status)(struct pipe_ctx *pipe_ctx);
/* Pipe Lock Related */
- void (*pipe_control_lock_global)(struct dc *dc,
- struct pipe_ctx *pipe, bool lock);
void (*pipe_control_lock)(struct dc *dc,
struct pipe_ctx *pipe, bool lock);
+ void (*interdependent_update_lock)(struct dc *dc,
+ struct dc_state *context, bool lock);
void (*set_flip_control_gsl)(struct pipe_ctx *pipe_ctx,
bool flip_immediate);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
index ecf566378ccd..52a26e6be066 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
@@ -40,10 +40,13 @@ struct dce_hwseq_wa {
bool false_optc_underflow;
bool DEGVIDCN10_254;
bool DEGVIDCN21;
+ bool disallow_self_refresh_during_multi_plane_transition;
};
struct hwseq_wa_state {
bool DEGVIDCN10_253_applied;
+ bool disallow_self_refresh_during_multi_plane_transition_applied;
+ unsigned int disallow_self_refresh_during_multi_plane_transition_applied_on_frame;
};
struct pipe_ctx;
@@ -97,6 +100,8 @@ struct hwseq_private_funcs {
struct dc *dc);
void (*edp_backlight_control)(struct dc_link *link,
bool enable);
+ bool (*is_panel_backlight_on)(struct dc_link *link);
+ bool (*is_panel_powered_on)(struct dc_link *link);
void (*setup_vupdate_interrupt)(struct dc *dc,
struct pipe_ctx *pipe_ctx);
bool (*did_underflow_occur)(struct dc *dc, struct pipe_ctx *pipe_ctx);
@@ -140,6 +145,8 @@ struct hwseq_private_funcs {
const struct dc_plane_state *plane_state);
bool (*set_shaper_3dlut)(struct pipe_ctx *pipe_ctx,
const struct dc_plane_state *plane_state);
+ void (*PLAT_58856_wa)(struct dc_state *context,
+ struct pipe_ctx *pipe_ctx);
};
struct dce_hwseq {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 5ae8ada154ef..ca4c36c0c9bc 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -179,4 +179,7 @@ unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format);
void get_audio_check(struct audio_info *aud_modes,
struct audio_check *aud_chk);
+
+int get_num_odm_splits(struct pipe_ctx *pipe);
+
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index cd9532b4f14d..10b5fa9d2588 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -50,6 +50,7 @@ enum dmub_cmd_type {
DMUB_CMD__REG_REG_WAIT = 4,
DMUB_CMD__PLAT_54186_WA = 5,
DMUB_CMD__PSR = 64,
+ DMUB_CMD__ABM = 66,
DMUB_CMD__VBIOS = 128,
};
@@ -216,7 +217,6 @@ struct dmub_rb_cmd_dpphy_init {
struct dmub_cmd_psr_copy_settings_data {
uint16_t psr_level;
- uint8_t hubp_inst;
uint8_t dpp_inst;
uint8_t mpcc_inst;
uint8_t opp_inst;
@@ -225,17 +225,8 @@ struct dmub_cmd_psr_copy_settings_data {
uint8_t digbe_inst;
uint8_t dpphy_inst;
uint8_t aux_inst;
- uint8_t hyst_frames;
- uint8_t hyst_lines;
- uint8_t phy_num;
- uint8_t phy_type;
- uint8_t aux_repeat;
uint8_t smu_optimizations_en;
- uint8_t skip_wait_for_pll_lock;
uint8_t frame_delay;
- uint8_t smu_phy_id;
- uint8_t num_of_controllers;
- uint8_t link_rate;
uint8_t frame_cap_ind;
};
@@ -257,13 +248,59 @@ struct dmub_rb_cmd_psr_enable {
struct dmub_cmd_header header;
};
-struct dmub_cmd_psr_setup_data {
+struct dmub_cmd_psr_set_version_data {
enum psr_version version; // PSR version 1 or 2
};
-struct dmub_rb_cmd_psr_setup {
+struct dmub_rb_cmd_psr_set_version {
struct dmub_cmd_header header;
- struct dmub_cmd_psr_setup_data psr_setup_data;
+ struct dmub_cmd_psr_set_version_data psr_set_version_data;
+};
+
+struct dmub_cmd_abm_set_pipe_data {
+ uint32_t ramping_boundary;
+ uint32_t otg_inst;
+};
+
+struct dmub_rb_cmd_abm_set_pipe {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_abm_set_pipe_data abm_set_pipe_data;
+};
+
+struct dmub_cmd_abm_set_backlight_data {
+ uint32_t frame_ramp;
+};
+
+struct dmub_rb_cmd_abm_set_backlight {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_abm_set_backlight_data abm_set_backlight_data;
+};
+
+struct dmub_cmd_abm_set_level_data {
+ uint32_t level;
+};
+
+struct dmub_rb_cmd_abm_set_level {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_abm_set_level_data abm_set_level_data;
+};
+
+struct dmub_cmd_abm_set_ambient_level_data {
+ uint32_t ambient_lux;
+};
+
+struct dmub_rb_cmd_abm_set_ambient_level {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_abm_set_ambient_level_data abm_set_ambient_level_data;
+};
+
+struct dmub_cmd_abm_set_pwm_frac_data {
+ uint32_t fractional_pwm;
+};
+
+struct dmub_rb_cmd_abm_set_pwm_frac {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_abm_set_pwm_frac_data abm_set_pwm_frac_data;
};
union dmub_rb_cmd {
@@ -277,11 +314,16 @@ union dmub_rb_cmd {
struct dmub_rb_cmd_enable_disp_power_gating enable_disp_power_gating;
struct dmub_rb_cmd_dpphy_init dpphy_init;
struct dmub_rb_cmd_dig1_transmitter_control dig1_transmitter_control;
- struct dmub_rb_cmd_psr_enable psr_enable;
+ struct dmub_rb_cmd_psr_set_version psr_set_version;
struct dmub_rb_cmd_psr_copy_settings psr_copy_settings;
+ struct dmub_rb_cmd_psr_enable psr_enable;
struct dmub_rb_cmd_psr_set_level psr_set_level;
struct dmub_rb_cmd_PLAT_54186_wa PLAT_54186_wa;
- struct dmub_rb_cmd_psr_setup psr_setup;
+ struct dmub_rb_cmd_abm_set_pipe abm_set_pipe;
+ struct dmub_rb_cmd_abm_set_backlight abm_set_backlight;
+ struct dmub_rb_cmd_abm_set_level abm_set_level;
+ struct dmub_rb_cmd_abm_set_ambient_level abm_set_ambient_level;
+ struct dmub_rb_cmd_abm_set_pwm_frac abm_set_pwm_frac;
};
#pragma pack(pop)
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h
index 7b69eb37f762..d37535d21928 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h
@@ -32,7 +32,7 @@
*/
enum dmub_cmd_psr_type {
- DMUB_CMD__PSR_SETUP = 0,
+ DMUB_CMD__PSR_SET_VERSION = 0,
DMUB_CMD__PSR_COPY_SETTINGS = 1,
DMUB_CMD__PSR_ENABLE = 2,
DMUB_CMD__PSR_DISABLE = 3,
@@ -42,7 +42,16 @@ enum dmub_cmd_psr_type {
enum psr_version {
PSR_VERSION_1 = 0x10, // PSR Version 1
PSR_VERSION_2 = 0x20, // PSR Version 2, includes selective update
- PSR_VERSION_2_Y_COORD = 0x21, // PSR Version 2, includes Y-coordinate support for SU
+ PSR_VERSION_2_1 = 0x21, // PSR Version 2, includes Y-coordinate support for SU
+};
+
+enum dmub_cmd_abm_type {
+ DMUB_CMD__ABM_INIT_CONFIG = 0,
+ DMUB_CMD__ABM_SET_PIPE = 1,
+ DMUB_CMD__ABM_SET_BACKLIGHT = 2,
+ DMUB_CMD__ABM_SET_LEVEL = 3,
+ DMUB_CMD__ABM_SET_AMBIENT_LEVEL = 4,
+ DMUB_CMD__ABM_SET_PWM_FRAC = 5,
};
#endif /* _DMUB_CMD_DAL_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_gpint_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_gpint_cmd.h
new file mode 100644
index 000000000000..652d6fc061b6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_gpint_cmd.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DMUB_GPINT_CMD_H_
+#define _DMUB_GPINT_CMD_H_
+
+#include "dmub_types.h"
+
+/**
+ * The register format for sending a command via the GPINT.
+ */
+union dmub_gpint_data_register {
+ struct {
+ uint32_t param : 16;
+ uint32_t command_code : 12;
+ uint32_t status : 4;
+ } bits;
+ uint32_t all;
+};
+
+/**
+ * The shifts and masks below may alternatively be used to format and read
+ * the command register bits.
+ */
+
+#define DMUB_GPINT_DATA_PARAM_MASK 0xFFFF
+#define DMUB_GPINT_DATA_PARAM_SHIFT 0
+
+#define DMUB_GPINT_DATA_COMMAND_CODE_MASK 0xFFF
+#define DMUB_GPINT_DATA_COMMAND_CODE_SHIFT 16
+
+#define DMUB_GPINT_DATA_STATUS_MASK 0xF
+#define DMUB_GPINT_DATA_STATUS_SHIFT 28
+
+/*
+ * Command IDs should be treated as stable ABI.
+ * Do not reuse or modify IDs.
+ */
+
+enum dmub_gpint_command {
+ DMUB_GPINT__INVALID_COMMAND = 0,
+ DMUB_GPINT__GET_FW_VERSION = 1,
+ DMUB_GPINT__STOP_FW = 2,
+ DMUB_GPINT__GET_PSR_STATE = 7,
+};
+
+/**
+ * Command responses.
+ */
+
+#define DMUB_GPINT__STOP_FW_RESPONSE 0xDEADDEAD
+
+#endif /* _DMUB_GPINT_CMD_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
index f8917594036a..c2671f2616c8 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
@@ -66,6 +66,7 @@
#include "dmub_types.h"
#include "dmub_cmd.h"
+#include "dmub_gpint_cmd.h"
#include "dmub_rb.h"
#if defined(__cplusplus)
@@ -103,7 +104,7 @@ enum dmub_window_id {
DMUB_WINDOW_4_MAILBOX,
DMUB_WINDOW_5_TRACEBUFF,
DMUB_WINDOW_6_FW_STATE,
- DMUB_WINDOW_7_RESERVED,
+ DMUB_WINDOW_7_SCRATCH_MEM,
DMUB_WINDOW_TOTAL,
};
@@ -262,6 +263,14 @@ struct dmub_srv_hw_funcs {
bool (*is_phy_init)(struct dmub_srv *dmub);
bool (*is_auto_load_done)(struct dmub_srv *dmub);
+
+ void (*set_gpint)(struct dmub_srv *dmub,
+ union dmub_gpint_data_register reg);
+
+ bool (*is_gpint_acked)(struct dmub_srv *dmub,
+ union dmub_gpint_data_register reg);
+
+ uint32_t (*get_gpint_response)(struct dmub_srv *dmub);
};
/**
@@ -307,6 +316,7 @@ struct dmub_srv {
enum dmub_asic asic;
void *user_ctx;
bool is_virtual;
+ struct dmub_fb scratch_mem_fb;
volatile const struct dmub_fw_state *fw_state;
/* private: internal use only */
@@ -516,6 +526,45 @@ enum dmub_status dmub_srv_wait_for_phy_init(struct dmub_srv *dmub,
enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub,
uint32_t timeout_us);
+/**
+ * dmub_srv_send_gpint_command() - Sends a GPINT based command.
+ * @dmub: the dmub service
+ * @command_code: the command code to send
+ * @param: the command parameter to send
+ * @timeout_us: the maximum number of microseconds to wait
+ *
+ * Sends a command via the general purpose interrupt (GPINT).
+ * Waits for the number of microseconds specified by timeout_us
+ * for the command ACK before returning.
+ *
+ * Can be called after software initialization.
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_TIMEOUT - wait for ACK timed out
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status
+dmub_srv_send_gpint_command(struct dmub_srv *dmub,
+ enum dmub_gpint_command command_code,
+ uint16_t param, uint32_t timeout_us);
+
+/**
+ * dmub_srv_get_gpint_response() - Queries the GPINT response.
+ * @dmub: the dmub service
+ * @response: the response for the last GPINT
+ *
+ * Returns the response code for the last GPINT interrupt.
+ *
+ * Can be called after software initialization.
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_get_gpint_response(struct dmub_srv *dmub,
+ uint32_t *response);
+
#if defined(__cplusplus)
}
#endif
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
index b2ca8e0dbac9..63bb9e2c81de 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
@@ -60,6 +60,12 @@ static void dmub_dcn20_get_fb_base_offset(struct dmub_srv *dmub,
{
uint32_t tmp;
+ if (dmub->fb_base || dmub->fb_offset) {
+ *fb_base = dmub->fb_base;
+ *fb_offset = dmub->fb_offset;
+ return;
+ }
+
REG_GET(DCN_VM_FB_LOCATION_BASE, FB_BASE, &tmp);
*fb_base = (uint64_t)tmp << 24;
@@ -77,11 +83,52 @@ static inline void dmub_dcn20_translate_addr(const union dmub_addr *addr_in,
void dmub_dcn20_reset(struct dmub_srv *dmub)
{
+ union dmub_gpint_data_register cmd;
+ const uint32_t timeout = 30;
+ uint32_t in_reset, scratch, i;
+
+ REG_GET(DMCUB_CNTL, DMCUB_SOFT_RESET, &in_reset);
+
+ if (in_reset == 0) {
+ cmd.bits.status = 1;
+ cmd.bits.command_code = DMUB_GPINT__STOP_FW;
+ cmd.bits.param = 0;
+
+ dmub->hw_funcs.set_gpint(dmub, cmd);
+
+ /**
+ * Timeout covers both the ACK and the wait
+ * for remaining work to finish.
+ *
+ * This is mostly bound by the PHY disable sequence.
+ * Each register check will be greater than 1us, so
+ * don't bother using udelay.
+ */
+
+ for (i = 0; i < timeout; ++i) {
+ if (dmub->hw_funcs.is_gpint_acked(dmub, cmd))
+ break;
+ }
+
+ for (i = 0; i < timeout; ++i) {
+ scratch = dmub->hw_funcs.get_gpint_response(dmub);
+ if (scratch == DMUB_GPINT__STOP_FW_RESPONSE)
+ break;
+ }
+
+ /* Clear the GPINT command manually so we don't reset again. */
+ cmd.all = 0;
+ dmub->hw_funcs.set_gpint(dmub, cmd);
+
+ /* Force reset in case we timed out, DMCUB is likely hung. */
+ }
+
REG_UPDATE(DMCUB_CNTL, DMCUB_SOFT_RESET, 1);
REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
REG_WRITE(DMCUB_INBOX1_RPTR, 0);
REG_WRITE(DMCUB_INBOX1_WPTR, 0);
+ REG_WRITE(DMCUB_SCRATCH0, 0);
}
void dmub_dcn20_reset_release(struct dmub_srv *dmub)
@@ -217,3 +264,25 @@ bool dmub_dcn20_is_supported(struct dmub_srv *dmub)
return supported;
}
+
+void dmub_dcn20_set_gpint(struct dmub_srv *dmub,
+ union dmub_gpint_data_register reg)
+{
+ REG_WRITE(DMCUB_GPINT_DATAIN1, reg.all);
+}
+
+bool dmub_dcn20_is_gpint_acked(struct dmub_srv *dmub,
+ union dmub_gpint_data_register reg)
+{
+ union dmub_gpint_data_register test;
+
+ reg.bits.status = 0;
+ test.all = REG_READ(DMCUB_GPINT_DATAIN1);
+
+ return test.all == reg.all;
+}
+
+uint32_t dmub_dcn20_get_gpint_response(struct dmub_srv *dmub)
+{
+ return REG_READ(DMCUB_SCRATCH7);
+}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
index 04b0fa13153d..7f046c73927e 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
@@ -91,6 +91,7 @@ struct dmub_srv;
DMUB_SR(DMCUB_SCRATCH13) \
DMUB_SR(DMCUB_SCRATCH14) \
DMUB_SR(DMCUB_SCRATCH15) \
+ DMUB_SR(DMCUB_GPINT_DATAIN1) \
DMUB_SR(CC_DC_PIPE_DIS) \
DMUB_SR(MMHUBBUB_SOFT_RESET) \
DMUB_SR(DCN_VM_FB_LOCATION_BASE) \
@@ -183,4 +184,12 @@ bool dmub_dcn20_is_hw_init(struct dmub_srv *dmub);
bool dmub_dcn20_is_supported(struct dmub_srv *dmub);
+void dmub_dcn20_set_gpint(struct dmub_srv *dmub,
+ union dmub_gpint_data_register reg);
+
+bool dmub_dcn20_is_gpint_acked(struct dmub_srv *dmub,
+ union dmub_gpint_data_register reg);
+
+uint32_t dmub_dcn20_get_gpint_response(struct dmub_srv *dmub);
+
#endif /* _DMUB_DCN20_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index 85a518bf8a76..ce32cc7933c4 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -52,8 +52,11 @@
/* Default tracebuffer size if meta is absent. */
#define DMUB_TRACE_BUFFER_SIZE (1024)
+/* Default scratch mem size. */
+#define DMUB_SCRATCH_MEM_SIZE (256)
+
/* Number of windows in use. */
-#define DMUB_NUM_WINDOWS (DMUB_WINDOW_6_FW_STATE + 1)
+#define DMUB_NUM_WINDOWS (DMUB_WINDOW_TOTAL)
/* Base addresses. */
#define DMUB_CW0_BASE (0x60000000)
@@ -126,6 +129,9 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
funcs->set_inbox1_wptr = dmub_dcn20_set_inbox1_wptr;
funcs->is_supported = dmub_dcn20_is_supported;
funcs->is_hw_init = dmub_dcn20_is_hw_init;
+ funcs->set_gpint = dmub_dcn20_set_gpint;
+ funcs->is_gpint_acked = dmub_dcn20_is_gpint_acked;
+ funcs->get_gpint_response = dmub_dcn20_get_gpint_response;
if (asic == DMUB_ASIC_DCN21) {
dmub->regs = &dmub_srv_dcn21_regs;
@@ -208,9 +214,11 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
struct dmub_region *mail = &out->regions[DMUB_WINDOW_4_MAILBOX];
struct dmub_region *trace_buff = &out->regions[DMUB_WINDOW_5_TRACEBUFF];
struct dmub_region *fw_state = &out->regions[DMUB_WINDOW_6_FW_STATE];
+ struct dmub_region *scratch_mem = &out->regions[DMUB_WINDOW_7_SCRATCH_MEM];
const struct dmub_fw_meta_info *fw_info;
uint32_t fw_state_size = DMUB_FW_STATE_SIZE;
uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE;
+ uint32_t scratch_mem_size = DMUB_SCRATCH_MEM_SIZE;
if (!dmub->sw_init)
return DMUB_STATUS_INVALID;
@@ -253,7 +261,10 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
fw_state->base = dmub_align(trace_buff->top, 256);
fw_state->top = fw_state->base + dmub_align(fw_state_size, 64);
- out->fb_size = dmub_align(fw_state->top, 4096);
+ scratch_mem->base = dmub_align(fw_state->top, 256);
+ scratch_mem->top = scratch_mem->base + dmub_align(scratch_mem_size, 64);
+
+ out->fb_size = dmub_align(scratch_mem->top, 4096);
return DMUB_STATUS_OK;
}
@@ -331,6 +342,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
struct dmub_fb *mail_fb = params->fb[DMUB_WINDOW_4_MAILBOX];
struct dmub_fb *tracebuff_fb = params->fb[DMUB_WINDOW_5_TRACEBUFF];
struct dmub_fb *fw_state_fb = params->fb[DMUB_WINDOW_6_FW_STATE];
+ struct dmub_fb *scratch_mem_fb = params->fb[DMUB_WINDOW_7_SCRATCH_MEM];
struct dmub_rb_init_params rb_params;
struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5, cw6;
@@ -367,7 +379,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
dmub->hw_funcs.reset(dmub);
if (inst_fb && data_fb && bios_fb && mail_fb && tracebuff_fb &&
- fw_state_fb) {
+ fw_state_fb && scratch_mem_fb) {
cw2.offset.quad_part = data_fb->gpu_addr;
cw2.region.base = DMUB_CW0_BASE + inst_fb->size;
cw2.region.top = cw2.region.base + data_fb->size;
@@ -393,6 +405,8 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
dmub->fw_state = fw_state_fb->cpu_addr;
+ dmub->scratch_mem_fb = *scratch_mem_fb;
+
if (dmub->hw_funcs.setup_windows)
dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4,
&cw5, &cw6);
@@ -522,3 +536,50 @@ enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub,
return DMUB_STATUS_TIMEOUT;
}
+
+enum dmub_status
+dmub_srv_send_gpint_command(struct dmub_srv *dmub,
+ enum dmub_gpint_command command_code,
+ uint16_t param, uint32_t timeout_us)
+{
+ union dmub_gpint_data_register reg;
+ uint32_t i;
+
+ if (!dmub->sw_init)
+ return DMUB_STATUS_INVALID;
+
+ if (!dmub->hw_funcs.set_gpint)
+ return DMUB_STATUS_INVALID;
+
+ if (!dmub->hw_funcs.is_gpint_acked)
+ return DMUB_STATUS_INVALID;
+
+ reg.bits.status = 1;
+ reg.bits.command_code = command_code;
+ reg.bits.param = param;
+
+ dmub->hw_funcs.set_gpint(dmub, reg);
+
+ for (i = 0; i < timeout_us; ++i) {
+ if (dmub->hw_funcs.is_gpint_acked(dmub, reg))
+ return DMUB_STATUS_OK;
+ }
+
+ return DMUB_STATUS_TIMEOUT;
+}
+
+enum dmub_status dmub_srv_get_gpint_response(struct dmub_srv *dmub,
+ uint32_t *response)
+{
+ *response = 0;
+
+ if (!dmub->sw_init)
+ return DMUB_STATUS_INVALID;
+
+ if (!dmub->hw_funcs.get_gpint_response)
+ return DMUB_STATUS_INVALID;
+
+ *response = dmub->hw_funcs.get_gpint_response(dmub);
+
+ return DMUB_STATUS_OK;
+}
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index a2903985b9e8..8a87d0ed90ae 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -134,31 +134,27 @@
#define PICASSO_A0 0x41
/* DCN1_01 */
#define RAVEN2_A0 0x81
-#define RAVEN2_15D8_REV_94 0x94
-#define RAVEN2_15D8_REV_95 0x95
-#define RAVEN2_15D8_REV_E3 0xE3
-#define RAVEN2_15D8_REV_E4 0xE4
-#define RAVEN2_15D8_REV_E9 0xE9
-#define RAVEN2_15D8_REV_EA 0xEA
-#define RAVEN2_15D8_REV_EB 0xEB
#define RAVEN1_F0 0xF0
#define RAVEN_UNKNOWN 0xFF
#ifndef ASICREV_IS_RAVEN
#define ASICREV_IS_RAVEN(eChipRev) ((eChipRev >= RAVEN_A0) && eChipRev < RAVEN_UNKNOWN)
#endif
+#define PRID_DALI_DE 0xDE
+#define PRID_DALI_DF 0xDF
+#define PRID_DALI_E3 0xE3
+#define PRID_DALI_E4 0xE4
+
+#define PRID_POLLOCK_94 0x94
+#define PRID_POLLOCK_95 0x95
+#define PRID_POLLOCK_E9 0xE9
+#define PRID_POLLOCK_EA 0xEA
+#define PRID_POLLOCK_EB 0xEB
#define ASICREV_IS_PICASSO(eChipRev) ((eChipRev >= PICASSO_A0) && (eChipRev < RAVEN2_A0))
#ifndef ASICREV_IS_RAVEN2
-#define ASICREV_IS_RAVEN2(eChipRev) ((eChipRev >= RAVEN2_A0) && (eChipRev < RAVEN1_F0))
+#define ASICREV_IS_RAVEN2(eChipRev) ((eChipRev >= RAVEN2_A0) && (eChipRev < RENOIR_A0))
#endif
#define ASICREV_IS_RV1_F0(eChipRev) ((eChipRev >= RAVEN1_F0) && (eChipRev < RAVEN_UNKNOWN))
-#define ASICREV_IS_DALI(eChipRev) ((eChipRev == RAVEN2_15D8_REV_E3) \
- || (eChipRev == RAVEN2_15D8_REV_E4))
-#define ASICREV_IS_POLLOCK(eChipRev) (eChipRev == RAVEN2_15D8_REV_94 \
- || eChipRev == RAVEN2_15D8_REV_95 \
- || eChipRev == RAVEN2_15D8_REV_E9 \
- || eChipRev == RAVEN2_15D8_REV_EA \
- || eChipRev == RAVEN2_15D8_REV_EB)
#define FAMILY_RV 142 /* DCN 1*/
@@ -177,7 +173,7 @@ enum {
#define ASICREV_IS_NAVI14_M(eChipRev) ((eChipRev >= NV_NAVI14_M_A0) && (eChipRev < NV_UNKNOWN))
#define RENOIR_A0 0x91
#define DEVICE_ID_RENOIR_1636 0x1636 // Renoir
-#define ASICREV_IS_RENOIR(eChipRev) ((eChipRev >= RENOIR_A0) && (eChipRev < 0xFF))
+#define ASICREV_IS_RENOIR(eChipRev) ((eChipRev >= RENOIR_A0) && (eChipRev < RAVEN1_F0))
/*
* ASIC chip ID
diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
index 2c90d1b46c8b..3d29646c7cb4 100644
--- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h
+++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
@@ -149,4 +149,12 @@ enum dpcd_psr_sink_states {
PSR_SINK_STATE_SINK_INTERNAL_ERROR = 7,
};
+#define DP_SOURCE_TABLE_REVISION 0x310
+#define DP_SOURCE_PAYLOAD_SIZE 0x311
+#define DP_SOURCE_SINK_CAP 0x317
+#define DP_SOURCE_BACKLIGHT_LEVEL 0x320
+#define DP_SOURCE_BACKLIGHT_CURRENT_PEAK 0x326
+#define DP_SOURCE_BACKLIGHT_CONTROL 0x32E
+#define DP_SOURCE_BACKLIGHT_ENABLE 0x32F
+
#endif /* __DAL_DPCD_DEFS_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
index 89a709267019..d66f9d8eefb4 100644
--- a/drivers/gpu/drm/amd/display/include/logger_types.h
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -124,36 +124,37 @@ enum dc_log_type {
#define DC_MIN_LOG_MASK ((1 << LOG_ERROR) | \
(1 << LOG_DETECTION_EDID_PARSER))
-#define DC_DEFAULT_LOG_MASK ((1 << LOG_ERROR) | \
- (1 << LOG_WARNING) | \
- (1 << LOG_EVENT_MODE_SET) | \
- (1 << LOG_EVENT_DETECTION) | \
- (1 << LOG_EVENT_LINK_TRAINING) | \
- (1 << LOG_EVENT_LINK_LOSS) | \
- (1 << LOG_EVENT_UNDERFLOW) | \
- (1 << LOG_RESOURCE) | \
- (1 << LOG_FEATURE_OVERRIDE) | \
- (1 << LOG_DETECTION_EDID_PARSER) | \
- (1 << LOG_DC) | \
- (1 << LOG_HW_HOTPLUG) | \
- (1 << LOG_HW_SET_MODE) | \
- (1 << LOG_HW_RESUME_S3) | \
- (1 << LOG_HW_HPD_IRQ) | \
- (1 << LOG_SYNC) | \
- (1 << LOG_BANDWIDTH_VALIDATION) | \
- (1 << LOG_MST) | \
- (1 << LOG_DETECTION_DP_CAPS) | \
- (1 << LOG_BACKLIGHT)) | \
- (1 << LOG_I2C_AUX) | \
- (1 << LOG_IF_TRACE) | \
- (1 << LOG_DTN) /* | \
- (1 << LOG_DEBUG) | \
- (1 << LOG_BIOS) | \
- (1 << LOG_SURFACE) | \
- (1 << LOG_SCALER) | \
- (1 << LOG_DML) | \
- (1 << LOG_HW_LINK_TRAINING) | \
- (1 << LOG_HW_AUDIO)| \
- (1 << LOG_BANDWIDTH_CALCS)*/
+#define DC_DEFAULT_LOG_MASK ((1ULL << LOG_ERROR) | \
+ (1ULL << LOG_WARNING) | \
+ (1ULL << LOG_EVENT_MODE_SET) | \
+ (1ULL << LOG_EVENT_DETECTION) | \
+ (1ULL << LOG_EVENT_LINK_TRAINING) | \
+ (1ULL << LOG_EVENT_LINK_LOSS) | \
+ (1ULL << LOG_EVENT_UNDERFLOW) | \
+ (1ULL << LOG_RESOURCE) | \
+ (1ULL << LOG_FEATURE_OVERRIDE) | \
+ (1ULL << LOG_DETECTION_EDID_PARSER) | \
+ (1ULL << LOG_DC) | \
+ (1ULL << LOG_HW_HOTPLUG) | \
+ (1ULL << LOG_HW_SET_MODE) | \
+ (1ULL << LOG_HW_RESUME_S3) | \
+ (1ULL << LOG_HW_HPD_IRQ) | \
+ (1ULL << LOG_SYNC) | \
+ (1ULL << LOG_BANDWIDTH_VALIDATION) | \
+ (1ULL << LOG_MST) | \
+ (1ULL << LOG_DETECTION_DP_CAPS) | \
+ (1ULL << LOG_BACKLIGHT)) | \
+ (1ULL << LOG_I2C_AUX) | \
+ (1ULL << LOG_IF_TRACE) | \
+ (1ULL << LOG_HDMI_FRL) | \
+ (1ULL << LOG_DTN) /* | \
+ (1ULL << LOG_DEBUG) | \
+ (1ULL << LOG_BIOS) | \
+ (1ULL << LOG_SURFACE) | \
+ (1ULL << LOG_SCALER) | \
+ (1ULL << LOG_DML) | \
+ (1ULL << LOG_HW_LINK_TRAINING) | \
+ (1ULL << LOG_HW_AUDIO)| \
+ (1ULL << LOG_BANDWIDTH_CALCS)*/
#endif /* __DAL_LOGGER_TYPES_H__ */
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index b9992ebf77a6..4e542826cd26 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -524,12 +524,12 @@ static void build_vrr_infopacket_data(const struct mod_vrr_params *vrr,
infopacket->sb[6] |= 0x04;
/* PB7 = FreeSync Minimum refresh rate (Hz) */
- infopacket->sb[7] = (unsigned char)(vrr->min_refresh_in_uhz / 1000000);
+ infopacket->sb[7] = (unsigned char)((vrr->min_refresh_in_uhz + 500000) / 1000000);
/* PB8 = FreeSync Maximum refresh rate (Hz)
* Note: We should never go above the field rate of the mode timing set.
*/
- infopacket->sb[8] = (unsigned char)(vrr->max_refresh_in_uhz / 1000000);
+ infopacket->sb[8] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000);
//FreeSync HDR
@@ -747,10 +747,6 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
nominal_field_rate_in_uhz =
mod_freesync_calc_nominal_field_rate(stream);
- /* Rounded to the nearest Hz */
- nominal_field_rate_in_uhz = 1000000ULL *
- div_u64(nominal_field_rate_in_uhz + 500000, 1000000);
-
min_refresh_in_uhz = in_config->min_refresh_in_uhz;
max_refresh_in_uhz = in_config->max_refresh_in_uhz;
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
index 8aa528e874c4..e9fbd94f8635 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
@@ -52,8 +52,8 @@ static uint8_t is_cp_desired_hdcp1(struct mod_hdcp *hdcp)
* hdcp is not desired
*/
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
- if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_INACTIVE &&
- !hdcp->connection.displays[i].adjust.disable) {
+ if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_INACTIVE &&
+ !hdcp->displays[i].adjust.disable) {
is_auth_needed = 1;
break;
}
@@ -61,7 +61,8 @@ static uint8_t is_cp_desired_hdcp1(struct mod_hdcp *hdcp)
return (hdcp->connection.hdcp1_retry_count < MAX_NUM_OF_ATTEMPTS) &&
is_auth_needed &&
- !hdcp->connection.link.adjust.hdcp1.disable;
+ !hdcp->connection.link.adjust.hdcp1.disable &&
+ !hdcp->connection.is_hdcp1_revoked;
}
static uint8_t is_cp_desired_hdcp2(struct mod_hdcp *hdcp)
@@ -72,8 +73,8 @@ static uint8_t is_cp_desired_hdcp2(struct mod_hdcp *hdcp)
* hdcp is not desired
*/
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
- if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_INACTIVE &&
- !hdcp->connection.displays[i].adjust.disable) {
+ if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_INACTIVE &&
+ !hdcp->displays[i].adjust.disable) {
is_auth_needed = 1;
break;
}
@@ -103,8 +104,6 @@ static enum mod_hdcp_status execution(struct mod_hdcp *hdcp,
event_ctx->unexpected_event = 1;
goto out;
}
- /* update topology event if hdcp is not desired */
- status = mod_hdcp_add_display_topology(hdcp);
} else if (is_in_hdcp1_states(hdcp)) {
status = mod_hdcp_hdcp1_execution(hdcp, event_ctx, &input->hdcp1);
} else if (is_in_hdcp1_dp_states(hdcp)) {
@@ -115,6 +114,9 @@ static enum mod_hdcp_status execution(struct mod_hdcp *hdcp,
} else if (is_in_hdcp2_dp_states(hdcp)) {
status = mod_hdcp_hdcp2_dp_execution(hdcp,
event_ctx, &input->hdcp2);
+ } else {
+ event_ctx->unexpected_event = 1;
+ goto out;
}
out:
return status;
@@ -191,14 +193,7 @@ static enum mod_hdcp_status reset_authentication(struct mod_hdcp *hdcp,
mod_hdcp_hdcp1_destroy_session(hdcp);
}
- if (hdcp->auth.trans_input.hdcp1.add_topology == PASS) {
- status = mod_hdcp_remove_display_topology(hdcp);
- if (status != MOD_HDCP_STATUS_SUCCESS) {
- output->callback_needed = 0;
- output->watchdog_timer_needed = 0;
- goto out;
- }
- }
+
HDCP_TOP_RESET_AUTH_TRACE(hdcp);
memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication));
memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state));
@@ -212,25 +207,12 @@ static enum mod_hdcp_status reset_authentication(struct mod_hdcp *hdcp,
goto out;
}
}
- if (hdcp->auth.trans_input.hdcp2.add_topology == PASS) {
- status = mod_hdcp_remove_display_topology(hdcp);
- if (status != MOD_HDCP_STATUS_SUCCESS) {
- output->callback_needed = 0;
- output->watchdog_timer_needed = 0;
- goto out;
- }
- }
+
HDCP_TOP_RESET_AUTH_TRACE(hdcp);
memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication));
memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state));
set_state_id(hdcp, output, HDCP_INITIALIZED);
} else if (is_in_cp_not_desired_state(hdcp)) {
- status = mod_hdcp_remove_display_topology(hdcp);
- if (status != MOD_HDCP_STATUS_SUCCESS) {
- output->callback_needed = 0;
- output->watchdog_timer_needed = 0;
- goto out;
- }
HDCP_TOP_RESET_AUTH_TRACE(hdcp);
memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication));
memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state));
@@ -337,16 +319,20 @@ enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp,
if (status != MOD_HDCP_STATUS_SUCCESS)
goto out;
- /* add display to connection */
- hdcp->connection.link = *link;
- *display_container = *display;
-
/* reset retry counters */
reset_retry_counts(hdcp);
/* reset error trace */
memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace));
+ /* add display to connection */
+ hdcp->connection.link = *link;
+ *display_container = *display;
+ status = mod_hdcp_add_display_to_topology(hdcp, display_container);
+
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
/* request authentication */
if (current_state(hdcp) != HDCP_INITIALIZED)
set_state_id(hdcp, output, HDCP_INITIALIZED);
@@ -379,17 +365,20 @@ enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp,
if (status != MOD_HDCP_STATUS_SUCCESS)
goto out;
- /* remove display */
- display->state = MOD_HDCP_DISPLAY_INACTIVE;
-
/* clear retry counters */
reset_retry_counts(hdcp);
/* reset error trace */
memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace));
- /* request authentication for remaining displays*/
- if (get_active_display_count(hdcp) > 0)
+ /* remove display */
+ status = mod_hdcp_remove_display_from_topology(hdcp, index);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+ memset(display, 0, sizeof(struct mod_hdcp_display));
+
+ /* request authentication when connection is not reset */
+ if (current_state(hdcp) != HDCP_UNINITIALIZED)
callback_in_ms(hdcp->connection.link.adjust.auth_delay * 1000,
output);
out:
@@ -496,10 +485,8 @@ enum mod_hdcp_operation_mode mod_hdcp_signal_type_to_operation_mode(
break;
case SIGNAL_TYPE_EDP:
case SIGNAL_TYPE_DISPLAY_PORT:
- mode = MOD_HDCP_MODE_DP;
- break;
case SIGNAL_TYPE_DISPLAY_PORT_MST:
- mode = MOD_HDCP_MODE_DP_MST;
+ mode = MOD_HDCP_MODE_DP;
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
index af78e4f1be68..60ff1a0028ac 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
@@ -41,7 +41,6 @@ enum mod_hdcp_trans_input_result {
struct mod_hdcp_transition_input_hdcp1 {
uint8_t bksv_read;
uint8_t bksv_validation;
- uint8_t add_topology;
uint8_t create_session;
uint8_t an_write;
uint8_t aksv_write;
@@ -71,7 +70,6 @@ struct mod_hdcp_transition_input_hdcp1 {
struct mod_hdcp_transition_input_hdcp2 {
uint8_t hdcp2version_read;
uint8_t hdcp2_capable_check;
- uint8_t add_topology;
uint8_t create_session;
uint8_t ake_init_prepare;
uint8_t ake_init_write;
@@ -167,9 +165,9 @@ struct mod_hdcp_auth_counters {
/* contains values per connection */
struct mod_hdcp_connection {
struct mod_hdcp_link link;
- struct mod_hdcp_display displays[MAX_NUM_OF_DISPLAYS];
uint8_t is_repeater;
uint8_t is_km_stored;
+ uint8_t is_hdcp1_revoked;
uint8_t is_hdcp2_revoked;
struct mod_hdcp_trace trace;
uint8_t hdcp1_retry_count;
@@ -202,6 +200,8 @@ struct mod_hdcp {
struct mod_hdcp_config config;
/* per connection */
struct mod_hdcp_connection connection;
+ /* per displays */
+ struct mod_hdcp_display displays[MAX_NUM_OF_DISPLAYS];
/* per authentication attempt */
struct mod_hdcp_authentication auth;
/* per state in an authentication */
@@ -327,10 +327,10 @@ void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size,
/* TODO: add adjustment log */
/* psp functions */
-enum mod_hdcp_status mod_hdcp_add_display_topology(
- struct mod_hdcp *hdcp);
-enum mod_hdcp_status mod_hdcp_remove_display_topology(
- struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_add_display_to_topology(
+ struct mod_hdcp *hdcp, struct mod_hdcp_display *display);
+enum mod_hdcp_status mod_hdcp_remove_display_from_topology(
+ struct mod_hdcp *hdcp, uint8_t index);
enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp);
enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp);
enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp);
@@ -392,13 +392,13 @@ enum mod_hdcp_status mod_hdcp_write_content_type(struct mod_hdcp *hdcp);
/* hdcp version helpers */
static inline uint8_t is_dp_hdcp(struct mod_hdcp *hdcp)
{
- return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP ||
- hdcp->connection.link.mode == MOD_HDCP_MODE_DP_MST);
+ return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP);
}
static inline uint8_t is_dp_mst_hdcp(struct mod_hdcp *hdcp)
{
- return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP_MST);
+ return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP &&
+ hdcp->connection.link.dp.mst_supported);
}
static inline uint8_t is_hdmi_dvi_sl_hdcp(struct mod_hdcp *hdcp)
@@ -503,11 +503,6 @@ static inline uint8_t is_display_active(struct mod_hdcp_display *display)
return display->state >= MOD_HDCP_DISPLAY_ACTIVE;
}
-static inline uint8_t is_display_added(struct mod_hdcp_display *display)
-{
- return display->state >= MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
-}
-
static inline uint8_t is_display_encryption_enabled(struct mod_hdcp_display *display)
{
return display->state >= MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
@@ -515,35 +510,24 @@ static inline uint8_t is_display_encryption_enabled(struct mod_hdcp_display *dis
static inline uint8_t get_active_display_count(struct mod_hdcp *hdcp)
{
- uint8_t added_count = 0;
- uint8_t i;
-
- for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
- if (is_display_active(&hdcp->connection.displays[i]))
- added_count++;
- return added_count;
-}
-
-static inline uint8_t get_added_display_count(struct mod_hdcp *hdcp)
-{
- uint8_t added_count = 0;
+ uint8_t active_count = 0;
uint8_t i;
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
- if (is_display_added(&hdcp->connection.displays[i]))
- added_count++;
- return added_count;
+ if (is_display_active(&hdcp->displays[i]))
+ active_count++;
+ return active_count;
}
-static inline struct mod_hdcp_display *get_first_added_display(
+static inline struct mod_hdcp_display *get_first_active_display(
struct mod_hdcp *hdcp)
{
uint8_t i;
struct mod_hdcp_display *display = NULL;
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
- if (is_display_added(&hdcp->connection.displays[i])) {
- display = &hdcp->connection.displays[i];
+ if (is_display_active(&hdcp->displays[i])) {
+ display = &hdcp->displays[i];
break;
}
return display;
@@ -556,9 +540,9 @@ static inline struct mod_hdcp_display *get_active_display_at_index(
struct mod_hdcp_display *display = NULL;
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
- if (hdcp->connection.displays[i].index == index &&
- is_display_active(&hdcp->connection.displays[i])) {
- display = &hdcp->connection.displays[i];
+ if (hdcp->displays[i].index == index &&
+ is_display_active(&hdcp->displays[i])) {
+ display = &hdcp->displays[i];
break;
}
return display;
@@ -571,8 +555,8 @@ static inline struct mod_hdcp_display *get_empty_display_container(
struct mod_hdcp_display *display = NULL;
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
- if (!is_display_active(&hdcp->connection.displays[i])) {
- display = &hdcp->connection.displays[i];
+ if (!is_display_active(&hdcp->displays[i])) {
+ display = &hdcp->displays[i];
break;
}
return display;
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
index 37670db64855..f244b72e74e0 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
@@ -129,7 +129,7 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
{
/* device count must be greater than or equal to tracked hdcp displays */
- return (get_device_count(hdcp) < get_added_display_count(hdcp)) ?
+ return (get_device_count(hdcp) < get_active_display_count(hdcp)) ?
MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE :
MOD_HDCP_STATUS_SUCCESS;
}
@@ -168,10 +168,6 @@ static enum mod_hdcp_status exchange_ksvs(struct mod_hdcp *hdcp,
goto out;
}
- if (!mod_hdcp_execute_and_set(mod_hdcp_add_display_topology,
- &input->add_topology, &status,
- hdcp, "add_topology"))
- goto out;
if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_create_session,
&input->create_session, &status,
hdcp, "create_session"))
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
index 76edcbe51f71..f3711914364e 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
@@ -46,8 +46,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_transition(struct mod_hdcp *hdcp,
set_state_id(hdcp, output, H1_A1_EXCHANGE_KSVS);
break;
case H1_A1_EXCHANGE_KSVS:
- if (input->add_topology != PASS ||
- input->create_session != PASS) {
+ if (input->create_session != PASS) {
/* out of sync with psp state */
adjust->hdcp1.disable = 1;
fail_and_restart_in_ms(0, &status, output);
@@ -173,8 +172,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
set_state_id(hdcp, output, D1_A1_EXCHANGE_KSVS);
break;
case D1_A1_EXCHANGE_KSVS:
- if (input->add_topology != PASS ||
- input->create_session != PASS) {
+ if (input->create_session != PASS) {
/* out of sync with psp state */
adjust->hdcp1.disable = 1;
fail_and_restart_in_ms(0, &status, output);
@@ -210,7 +208,8 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
fail_and_restart_in_ms(0, &status, output);
break;
} else if (input->rx_validation != PASS) {
- if (hdcp->state.stay_count < 2) {
+ if (hdcp->state.stay_count < 2 &&
+ !hdcp->connection.is_hdcp1_revoked) {
/* allow 2 additional retries */
callback_in_ms(0, output);
increment_stay_counter(hdcp);
@@ -231,6 +230,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
(!conn->is_repeater && is_dp_mst_hdcp(hdcp) && input->stream_encryption_dp != PASS)) {
fail_and_restart_in_ms(0, &status, output);
break;
+ } else if (conn->hdcp1_retry_count < conn->link.adjust.hdcp1.min_auth_retries_wa) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
}
if (conn->is_repeater) {
set_watchdog_in_ms(hdcp, 5000, output);
@@ -290,7 +292,8 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
fail_and_restart_in_ms(0, &status, output);
break;
} else if (input->ksvlist_vp_validation != PASS) {
- if (hdcp->state.stay_count < 2) {
+ if (hdcp->state.stay_count < 2 &&
+ !hdcp->connection.is_hdcp1_revoked) {
/* allow 2 additional retries */
callback_in_ms(0, output);
increment_stay_counter(hdcp);
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
index f730b94ac3c0..549c113abcf7 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
@@ -34,7 +34,7 @@ static inline enum mod_hdcp_status check_receiver_id_list_ready(struct mod_hdcp
if (is_dp_hdcp(hdcp))
is_ready = HDCP_2_2_DP_RXSTATUS_READY(hdcp->auth.msg.hdcp2.rxstatus_dp) ? 1 : 0;
else
- is_ready = (HDCP_2_2_HDMI_RXSTATUS_READY(hdcp->auth.msg.hdcp2.rxstatus[0]) &&
+ is_ready = (HDCP_2_2_HDMI_RXSTATUS_READY(hdcp->auth.msg.hdcp2.rxstatus[1]) &&
(HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 |
hdcp->auth.msg.hdcp2.rxstatus[0])) ? 1 : 0;
return is_ready ? MOD_HDCP_STATUS_SUCCESS :
@@ -46,8 +46,8 @@ static inline enum mod_hdcp_status check_hdcp2_capable(struct mod_hdcp *hdcp)
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp))
- status = (hdcp->auth.msg.hdcp2.rxcaps_dp[2] & HDCP_2_2_RX_CAPS_VERSION_VAL) &&
- HDCP_2_2_DP_HDCP_CAPABLE(hdcp->auth.msg.hdcp2.rxcaps_dp[0]) ?
+ status = (hdcp->auth.msg.hdcp2.rxcaps_dp[0] == HDCP_2_2_RX_CAPS_VERSION_VAL) &&
+ HDCP_2_2_DP_HDCP_CAPABLE(hdcp->auth.msg.hdcp2.rxcaps_dp[2]) ?
MOD_HDCP_STATUS_SUCCESS :
MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE;
else
@@ -67,7 +67,7 @@ static inline enum mod_hdcp_status check_reauthentication_request(
MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST :
MOD_HDCP_STATUS_SUCCESS;
else
- ret = HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(hdcp->auth.msg.hdcp2.rxstatus[0]) ?
+ ret = HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(hdcp->auth.msg.hdcp2.rxstatus[1]) ?
MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST :
MOD_HDCP_STATUS_SUCCESS;
return ret;
@@ -208,7 +208,7 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
static enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
{
/* device count must be greater than or equal to tracked hdcp displays */
- return (get_device_count(hdcp) < get_added_display_count(hdcp)) ?
+ return (get_device_count(hdcp) < get_active_display_count(hdcp)) ?
MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE :
MOD_HDCP_STATUS_SUCCESS;
}
@@ -259,6 +259,7 @@ static enum mod_hdcp_status known_hdcp2_capable_rx(struct mod_hdcp *hdcp,
event_ctx->unexpected_event = 1;
goto out;
}
+
if (!mod_hdcp_execute_and_set(mod_hdcp_read_hdcp2version,
&input->hdcp2version_read, &status,
hdcp, "hdcp2version_read"))
@@ -281,10 +282,7 @@ static enum mod_hdcp_status send_ake_init(struct mod_hdcp *hdcp,
event_ctx->unexpected_event = 1;
goto out;
}
- if (!mod_hdcp_execute_and_set(mod_hdcp_add_display_topology,
- &input->add_topology, &status,
- hdcp, "add_topology"))
- goto out;
+
if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_create_session,
&input->create_session, &status,
hdcp, "create_session"))
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
index 8cae3e3aacd5..e738c7ae66ec 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
@@ -47,8 +47,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp,
}
break;
case H2_A1_SEND_AKE_INIT:
- if (input->add_topology != PASS ||
- input->create_session != PASS ||
+ if (input->create_session != PASS ||
input->ake_init_prepare != PASS) {
/* out of sync with psp state */
adjust->hdcp2.disable = 1;
@@ -389,8 +388,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp,
}
break;
case D2_A1_SEND_AKE_INIT:
- if (input->add_topology != PASS ||
- input->create_session != PASS ||
+ if (input->create_session != PASS ||
input->ake_init_prepare != PASS) {
/* out of sync with psp state */
adjust->hdcp2.disable = 1;
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
index ff9d54812e62..bb5130f4228d 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
@@ -65,6 +65,7 @@ enum mod_hdcp_ddc_message_id {
MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME,
MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS,
MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST,
+ MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST_PART2,
MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK,
MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE,
MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY,
@@ -101,6 +102,7 @@ static const uint8_t hdcp_i2c_offsets[] = {
[MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x80,
[MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x60,
[MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x80,
+ [MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST_PART2] = 0x80,
[MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x60,
[MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x60,
[MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x80,
@@ -135,6 +137,7 @@ static const uint32_t hdcp_dpcd_addrs[] = {
[MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x692f8,
[MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x69318,
[MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x69330,
+ [MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST_PART2] = 0x69340,
[MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x693e0,
[MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x693f0,
[MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x69473,
@@ -405,7 +408,7 @@ enum mod_hdcp_status mod_hdcp_read_ake_cert(struct mod_hdcp *hdcp)
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp)) {
- hdcp->auth.msg.hdcp2.ake_cert[0] = 3;
+ hdcp->auth.msg.hdcp2.ake_cert[0] = HDCP_2_2_AKE_SEND_CERT;
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_CERT,
hdcp->auth.msg.hdcp2.ake_cert+1,
sizeof(hdcp->auth.msg.hdcp2.ake_cert)-1);
@@ -423,7 +426,7 @@ enum mod_hdcp_status mod_hdcp_read_h_prime(struct mod_hdcp *hdcp)
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp)) {
- hdcp->auth.msg.hdcp2.ake_h_prime[0] = 7;
+ hdcp->auth.msg.hdcp2.ake_h_prime[0] = HDCP_2_2_AKE_SEND_HPRIME;
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME,
hdcp->auth.msg.hdcp2.ake_h_prime+1,
sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)-1);
@@ -441,7 +444,7 @@ enum mod_hdcp_status mod_hdcp_read_pairing_info(struct mod_hdcp *hdcp)
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp)) {
- hdcp->auth.msg.hdcp2.ake_pairing_info[0] = 8;
+ hdcp->auth.msg.hdcp2.ake_pairing_info[0] = HDCP_2_2_AKE_SEND_PAIRING_INFO;
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO,
hdcp->auth.msg.hdcp2.ake_pairing_info+1,
sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info)-1);
@@ -459,7 +462,7 @@ enum mod_hdcp_status mod_hdcp_read_l_prime(struct mod_hdcp *hdcp)
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp)) {
- hdcp->auth.msg.hdcp2.lc_l_prime[0] = 10;
+ hdcp->auth.msg.hdcp2.lc_l_prime[0] = HDCP_2_2_LC_SEND_LPRIME;
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME,
hdcp->auth.msg.hdcp2.lc_l_prime+1,
sizeof(hdcp->auth.msg.hdcp2.lc_l_prime)-1);
@@ -474,14 +477,27 @@ enum mod_hdcp_status mod_hdcp_read_l_prime(struct mod_hdcp *hdcp)
enum mod_hdcp_status mod_hdcp_read_rx_id_list(struct mod_hdcp *hdcp)
{
- enum mod_hdcp_status status;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (is_dp_hdcp(hdcp)) {
- hdcp->auth.msg.hdcp2.rx_id_list[0] = 12;
- status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST,
- hdcp->auth.msg.hdcp2.rx_id_list+1,
- sizeof(hdcp->auth.msg.hdcp2.rx_id_list)-1);
+ uint32_t device_count = 0;
+ uint32_t rx_id_list_size = 0;
+ uint32_t bytes_read = 0;
+ hdcp->auth.msg.hdcp2.rx_id_list[0] = HDCP_2_2_REP_SEND_RECVID_LIST;
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST,
+ hdcp->auth.msg.hdcp2.rx_id_list+1,
+ HDCP_MAX_AUX_TRANSACTION_SIZE);
+ if (status == MOD_HDCP_STATUS_SUCCESS) {
+ bytes_read = HDCP_MAX_AUX_TRANSACTION_SIZE;
+ device_count = HDCP_2_2_DEV_COUNT_LO(hdcp->auth.msg.hdcp2.rx_id_list[2]) +
+ (HDCP_2_2_DEV_COUNT_HI(hdcp->auth.msg.hdcp2.rx_id_list[1]) << 4);
+ rx_id_list_size = MIN((21 + 5 * device_count),
+ (sizeof(hdcp->auth.msg.hdcp2.rx_id_list) - 1));
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST_PART2,
+ hdcp->auth.msg.hdcp2.rx_id_list + 1 + bytes_read,
+ (rx_id_list_size - 1) / HDCP_MAX_AUX_TRANSACTION_SIZE * HDCP_MAX_AUX_TRANSACTION_SIZE);
+ }
} else {
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST,
hdcp->auth.msg.hdcp2.rx_id_list,
@@ -495,7 +511,7 @@ enum mod_hdcp_status mod_hdcp_read_stream_ready(struct mod_hdcp *hdcp)
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp)) {
- hdcp->auth.msg.hdcp2.repeater_auth_stream_ready[0] = 17;
+ hdcp->auth.msg.hdcp2.repeater_auth_stream_ready[0] = HDCP_2_2_REP_STREAM_READY;
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY,
hdcp->auth.msg.hdcp2.repeater_auth_stream_ready+1,
sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready)-1);
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
index 724ebcee9a19..44956f9ba178 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
@@ -90,10 +90,14 @@ char *mod_hdcp_status_to_str(int32_t status)
return "MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING";
case MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE:
return "MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED:
+ return "MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED";
case MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY:
return "MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY";
case MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE:
return "MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED:
+ return "MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED";
case MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION:
return "MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION";
case MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE:
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
index ff91373ebada..d3192b9d0c3d 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
@@ -37,10 +37,11 @@
/* default logs */
#define HDCP_ERROR_TRACE(hdcp, status) \
HDCP_LOG_ERR(hdcp, \
- "[Link %d] WARNING %s IN STATE %s", \
+ "[Link %d] WARNING %s IN STATE %s STAY COUNT %d", \
hdcp->config.index, \
mod_hdcp_status_to_str(status), \
- mod_hdcp_state_id_to_str(hdcp->state.id))
+ mod_hdcp_state_id_to_str(hdcp->state.id), \
+ hdcp->state.stay_count)
#define HDCP_HDCP1_ENABLED_TRACE(hdcp, displayIndex) \
HDCP_LOG_VER(hdcp, \
"[Link %d] HDCP 1.4 enabled on display %d", \
@@ -49,6 +50,15 @@
HDCP_LOG_VER(hdcp, \
"[Link %d] HDCP 2.2 enabled on display %d", \
hdcp->config.index, displayIndex)
+#define HDCP_HDCP1_DISABLED_TRACE(hdcp, displayIndex) \
+ HDCP_LOG_VER(hdcp, \
+ "[Link %d] HDCP 1.4 disabled on display %d", \
+ hdcp->config.index, displayIndex)
+#define HDCP_HDCP2_DISABLED_TRACE(hdcp, displayIndex) \
+ HDCP_LOG_VER(hdcp, \
+ "[Link %d] HDCP 2.2 disabled on display %d", \
+ hdcp->config.index, displayIndex)
+
/* state machine logs */
#define HDCP_REMOVE_DISPLAY_TRACE(hdcp, displayIndex) \
HDCP_LOG_FSM(hdcp, \
@@ -102,6 +112,9 @@
sizeof(hdcp->auth.msg.hdcp1.bksv)); \
HDCP_DDC_READ_TRACE(hdcp, "BCAPS", &hdcp->auth.msg.hdcp1.bcaps, \
sizeof(hdcp->auth.msg.hdcp1.bcaps)); \
+ HDCP_DDC_READ_TRACE(hdcp, "BSTATUS", \
+ (uint8_t *)&hdcp->auth.msg.hdcp1.bstatus, \
+ sizeof(hdcp->auth.msg.hdcp1.bstatus)); \
HDCP_DDC_WRITE_TRACE(hdcp, "AN", hdcp->auth.msg.hdcp1.an, \
sizeof(hdcp->auth.msg.hdcp1.an)); \
HDCP_DDC_WRITE_TRACE(hdcp, "AKSV", hdcp->auth.msg.hdcp1.aksv, \
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
index 7911dc157d5a..836e47954938 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -44,84 +44,78 @@ static void hdcp2_message_init(struct mod_hdcp *hdcp,
in->process.msg3_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE;
in->process.msg3_desc.msg_size = 0;
}
-enum mod_hdcp_status mod_hdcp_remove_display_topology(struct mod_hdcp *hdcp)
-{
-
- struct psp_context *psp = hdcp->config.psp.handle;
- struct ta_dtm_shared_memory *dtm_cmd;
- struct mod_hdcp_display *display = NULL;
- uint8_t i;
+enum mod_hdcp_status mod_hdcp_remove_display_from_topology(
+ struct mod_hdcp *hdcp, uint8_t index)
+ {
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_dtm_shared_memory *dtm_cmd;
+ struct mod_hdcp_display *display =
+ get_active_display_at_index(hdcp, index);
dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
- for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
- if (is_display_added(&(hdcp->connection.displays[i]))) {
+ if (!display || !is_display_active(display))
+ return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
- memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
+ memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
- display = &hdcp->connection.displays[i];
+ dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2;
+ dtm_cmd->dtm_in_message.topology_update_v2.display_handle = display->index;
+ dtm_cmd->dtm_in_message.topology_update_v2.is_active = 0;
+ dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
- dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2;
- dtm_cmd->dtm_in_message.topology_update_v2.display_handle = display->index;
- dtm_cmd->dtm_in_message.topology_update_v2.is_active = 0;
- dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
+ psp_dtm_invoke(psp, dtm_cmd->cmd_id);
- psp_dtm_invoke(psp, dtm_cmd->cmd_id);
+ if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
- if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
+ display->state = MOD_HDCP_DISPLAY_ACTIVE;
+ HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index);
+
+ return MOD_HDCP_STATUS_SUCCESS;
+ }
- display->state = MOD_HDCP_DISPLAY_ACTIVE;
- HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index);
- }
- }
-
- return MOD_HDCP_STATUS_SUCCESS;
-}
-
-enum mod_hdcp_status mod_hdcp_add_display_topology(struct mod_hdcp *hdcp)
+enum mod_hdcp_status mod_hdcp_add_display_to_topology(
+ struct mod_hdcp *hdcp, struct mod_hdcp_display *display)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_dtm_shared_memory *dtm_cmd;
- struct mod_hdcp_display *display = NULL;
struct mod_hdcp_link *link = &hdcp->connection.link;
- uint8_t i;
if (!psp->dtm_context.dtm_initialized) {
DRM_ERROR("Failed to add display topology, DTM TA is not initialized.");
+ display->state = MOD_HDCP_DISPLAY_INACTIVE;
return MOD_HDCP_STATUS_FAILURE;
}
dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
- for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
- if (hdcp->connection.displays[i].state == MOD_HDCP_DISPLAY_ACTIVE) {
- display = &hdcp->connection.displays[i];
-
- memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
-
- dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2;
- dtm_cmd->dtm_in_message.topology_update_v2.display_handle = display->index;
- dtm_cmd->dtm_in_message.topology_update_v2.is_active = 1;
- dtm_cmd->dtm_in_message.topology_update_v2.controller = display->controller;
- dtm_cmd->dtm_in_message.topology_update_v2.ddc_line = link->ddc_line;
- dtm_cmd->dtm_in_message.topology_update_v2.dig_be = link->dig_be;
- dtm_cmd->dtm_in_message.topology_update_v2.dig_fe = display->dig_fe;
- dtm_cmd->dtm_in_message.topology_update_v2.dp_mst_vcid = display->vc_id;
- dtm_cmd->dtm_in_message.topology_update_v2.max_hdcp_supported_version =
- TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_2;
- dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
-
- psp_dtm_invoke(psp, dtm_cmd->cmd_id);
-
- if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
-
- display->state = MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
- HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index);
- }
+ memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
+
+ dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2;
+ dtm_cmd->dtm_in_message.topology_update_v2.display_handle = display->index;
+ dtm_cmd->dtm_in_message.topology_update_v2.is_active = 1;
+ dtm_cmd->dtm_in_message.topology_update_v2.controller = display->controller;
+ dtm_cmd->dtm_in_message.topology_update_v2.ddc_line = link->ddc_line;
+ dtm_cmd->dtm_in_message.topology_update_v2.dig_be = link->dig_be;
+ dtm_cmd->dtm_in_message.topology_update_v2.dig_fe = display->dig_fe;
+ if (is_dp_hdcp(hdcp))
+ dtm_cmd->dtm_in_message.topology_update_v2.is_assr = link->dp.assr_supported;
+
+ dtm_cmd->dtm_in_message.topology_update_v2.dp_mst_vcid = display->vc_id;
+ dtm_cmd->dtm_in_message.topology_update_v2.max_hdcp_supported_version =
+ TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_2;
+ dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
+
+ psp_dtm_invoke(psp, dtm_cmd->cmd_id);
+
+ if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) {
+ display->state = MOD_HDCP_DISPLAY_INACTIVE;
+ return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
}
+ HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index);
+
return MOD_HDCP_STATUS_SUCCESS;
}
@@ -129,7 +123,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
- struct mod_hdcp_display *display = get_first_added_display(hdcp);
+ struct mod_hdcp_display *display = get_first_active_display(hdcp);
struct ta_hdcp_shared_memory *hdcp_cmd;
if (!psp->hdcp_context.hdcp_initialized) {
@@ -164,6 +158,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp)
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
+ uint8_t i = 0;
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -177,6 +172,14 @@ enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp)
return MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE;
HDCP_TOP_HDCP1_DESTROY_SESSION_TRACE(hdcp);
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+ if (is_display_encryption_enabled(
+ &hdcp->displays[i])) {
+ hdcp->displays[i].state =
+ MOD_HDCP_DISPLAY_ACTIVE;
+ HDCP_HDCP1_DISABLED_TRACE(hdcp,
+ hdcp->displays[i].index);
+ }
return MOD_HDCP_STATUS_SUCCESS;
}
@@ -210,6 +213,10 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp)
} else if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status ==
TA_HDCP_AUTHENTICATION_STATUS__HDCP1_AUTHENTICATED) {
hdcp->connection.is_repeater = 0;
+ } else if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status ==
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_REVOKED) {
+ hdcp->connection.is_hdcp1_revoked = 1;
+ return MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED;
} else
return MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE;
@@ -221,7 +228,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
- struct mod_hdcp_display *display = get_first_added_display(hdcp);
+ struct mod_hdcp_display *display = get_first_active_display(hdcp);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -245,6 +252,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -264,10 +272,19 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp)
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE;
+ if (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS &&
+ hdcp_cmd->out_msg.hdcp1_second_part_authentication.authentication_status ==
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_AUTHENTICATED) {
+ status = MOD_HDCP_STATUS_SUCCESS;
+ } else if (hdcp_cmd->out_msg.hdcp1_second_part_authentication.authentication_status ==
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_REVOKED) {
+ hdcp->connection.is_hdcp1_revoked = 1;
+ status = MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED;
+ } else {
+ status = MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE;
+ }
- return MOD_HDCP_STATUS_SUCCESS;
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp *hdcp)
@@ -281,14 +298,13 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
- if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED ||
- hdcp->connection.displays[i].adjust.disable)
+ if (hdcp->displays[i].adjust.disable)
continue;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
hdcp_cmd->in_msg.hdcp1_enable_dp_stream_encryption.session_handle = hdcp->auth.id;
- hdcp_cmd->in_msg.hdcp1_enable_dp_stream_encryption.display_handle = hdcp->connection.displays[i].index;
+ hdcp_cmd->in_msg.hdcp1_enable_dp_stream_encryption.display_handle = hdcp->displays[i].index;
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_ENABLE_DP_STREAM_ENCRYPTION;
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
@@ -296,8 +312,8 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp
if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
return MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE;
- hdcp->connection.displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
- HDCP_HDCP1_ENABLED_TRACE(hdcp, hdcp->connection.displays[i].index);
+ hdcp->displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
+ HDCP_HDCP1_ENABLED_TRACE(hdcp, hdcp->displays[i].index);
}
return MOD_HDCP_STATUS_SUCCESS;
@@ -344,7 +360,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
- struct mod_hdcp_display *display = get_first_added_display(hdcp);
+ struct mod_hdcp_display *display = get_first_active_display(hdcp);
if (!psp->hdcp_context.hdcp_initialized) {
DRM_ERROR("Failed to create hdcp session, HDCP TA is not initialized");
@@ -385,6 +401,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
+ uint8_t i = 0;
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -398,6 +415,14 @@ enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp)
return MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE;
HDCP_TOP_HDCP2_DESTROY_SESSION_TRACE(hdcp);
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+ if (is_display_encryption_enabled(
+ &hdcp->displays[i])) {
+ hdcp->displays[i].state =
+ MOD_HDCP_DISPLAY_ACTIVE;
+ HDCP_HDCP2_DISABLED_TRACE(hdcp,
+ hdcp->displays[i].index);
+ }
return MOD_HDCP_STATUS_SUCCESS;
}
@@ -473,9 +498,12 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_ake_cert(struct mod_hdcp *hdcp)
hdcp->connection.is_km_stored = msg_out->process.is_km_stored ? 1 : 0;
hdcp->connection.is_repeater = msg_out->process.is_repeater ? 1 : 0;
return MOD_HDCP_STATUS_SUCCESS;
+ } else if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
+ hdcp->connection.is_hdcp2_revoked = 1;
+ return MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED;
}
- return MOD_HDCP_STATUS_FAILURE;
+ return MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE;
}
enum mod_hdcp_status mod_hdcp_hdcp2_validate_h_prime(struct mod_hdcp *hdcp)
@@ -630,20 +658,15 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_encryption(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
- struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
- struct mod_hdcp_display *display = get_first_added_display(hdcp);
+ struct mod_hdcp_display *display = get_first_active_display(hdcp);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
- msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
-
- hdcp2_message_init(hdcp, msg_in);
-
if (!display)
return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
- hdcp_cmd->in_msg.hdcp1_enable_encryption.session_handle = hdcp->auth.id;
+ hdcp_cmd->in_msg.hdcp2_set_encryption.session_handle = hdcp->auth.id;
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_SET_ENCRYPTION;
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
@@ -695,6 +718,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp)
hdcp->connection.is_km_stored = msg_out->process.is_km_stored ? 1 : 0;
hdcp->connection.is_repeater = msg_out->process.is_repeater ? 1 : 0;
return MOD_HDCP_STATUS_SUCCESS;
+ } else if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
+ hdcp->connection.is_hdcp2_revoked = 1;
+ return MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED;
}
@@ -717,10 +743,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
- if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED ||
- hdcp->connection.displays[i].adjust.disable)
+ if (hdcp->displays[i].adjust.disable)
continue;
- hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.display_handle = hdcp->connection.displays[i].index;
+ hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.display_handle = hdcp->displays[i].index;
hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.session_handle = hdcp->auth.id;
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_ENABLE_DP_STREAM_ENCRYPTION;
@@ -729,8 +754,8 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp
if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
break;
- hdcp->connection.displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
- HDCP_HDCP2_ENABLED_TRACE(hdcp, hdcp->connection.displays[i].index);
+ hdcp->displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
+ HDCP_HDCP2_ENABLED_TRACE(hdcp, hdcp->displays[i].index);
}
return (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS) ? MOD_HDCP_STATUS_SUCCESS
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h
index 82a5e997d573..1a663dbbf810 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h
@@ -117,6 +117,8 @@ struct ta_dtm_shared_memory {
int psp_cmd_submit_buf(struct psp_context *psp, struct amdgpu_firmware_info *ucode, struct psp_gfx_cmd_resp *cmd,
uint64_t fence_mc_addr);
+enum { PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE = 5120 };
+
enum ta_hdcp_command {
TA_HDCP_COMMAND__INITIALIZE,
TA_HDCP_COMMAND__HDCP1_CREATE_SESSION,
@@ -134,7 +136,10 @@ enum ta_hdcp_command {
TA_HDCP_COMMAND__UNUSED_3,
TA_HDCP_COMMAND__HDCP2_CREATE_SESSION_V2,
TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2,
- TA_HDCP_COMMAND__HDCP2_ENABLE_DP_STREAM_ENCRYPTION
+ TA_HDCP_COMMAND__HDCP2_ENABLE_DP_STREAM_ENCRYPTION,
+ TA_HDCP_COMMAND__HDCP_DESTROY_ALL_SESSIONS,
+ TA_HDCP_COMMAND__HDCP_SET_SRM,
+ TA_HDCP_COMMAND__HDCP_GET_SRM
};
enum ta_hdcp2_msg_id {
@@ -235,7 +240,8 @@ enum ta_hdcp_authentication_status {
TA_HDCP_AUTHENTICATION_STATUS__HDCP22_AUTHENTICATION_PENDING = 0x06,
TA_HDCP_AUTHENTICATION_STATUS__HDCP22_AUTHENTICATION_FAILED = 0x07,
TA_HDCP_AUTHENTICATION_STATUS__HDCP22_AUTHENTICATED = 0x08,
- TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_VALIDATION_FAILED = 0x09
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_VALIDATION_FAILED = 0x09,
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_REVOKED = 0x0A
};
enum ta_hdcp2_msg_authentication_status {
@@ -253,7 +259,8 @@ enum ta_hdcp2_msg_authentication_status {
TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_SEQ_NUM,
TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_SIZE,
TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_LENGTH,
- TA_HDCP2_MSG_AUTHENTICATION_STATUS__REAUTH_REQUEST
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__REAUTH_REQUEST,
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED
};
enum ta_hdcp_content_type {
@@ -415,6 +422,22 @@ struct ta_hdcp_cmd_hdcp2_enable_dp_stream_encryption_input {
uint32_t display_handle;
};
+struct ta_hdcp_cmd_set_srm_input {
+ uint32_t srm_buf_size;
+ uint8_t srm_buf[PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE];
+};
+
+struct ta_hdcp_cmd_set_srm_output {
+ uint8_t valid_signature;
+ uint32_t srm_version;
+};
+
+struct ta_hdcp_cmd_get_srm_output {
+ uint32_t srm_version;
+ uint32_t srm_buf_size;
+ uint8_t srm_buf[PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE];
+};
+
/**********************************************************/
/* Common input structure for HDCP callbacks */
union ta_hdcp_cmd_input {
@@ -432,6 +455,7 @@ union ta_hdcp_cmd_input {
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2
hdcp2_prepare_process_authentication_message_v2;
struct ta_hdcp_cmd_hdcp2_enable_dp_stream_encryption_input hdcp2_enable_dp_stream_encryption;
+ struct ta_hdcp_cmd_set_srm_input hdcp_set_srm;
};
/* Common output structure for HDCP callbacks */
@@ -444,6 +468,8 @@ union ta_hdcp_cmd_output {
struct ta_hdcp_cmd_hdcp2_create_session_output_v2 hdcp2_create_session_v2;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2
hdcp2_prepare_process_authentication_message_v2;
+ struct ta_hdcp_cmd_set_srm_output hdcp_set_srm;
+ struct ta_hdcp_cmd_get_srm_output hdcp_get_srm;
};
/**********************************************************/
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
index f2a0e1a064da..eae9309cfb24 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
@@ -56,8 +56,10 @@ enum mod_hdcp_status {
MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE,
MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING,
MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED,
MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY,
MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED,
MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION,
MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE,
MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE,
@@ -100,6 +102,7 @@ enum mod_hdcp_status {
struct mod_hdcp_displayport {
uint8_t rev;
uint8_t assr_supported;
+ uint8_t mst_supported;
};
struct mod_hdcp_hdmi {
@@ -108,14 +111,12 @@ struct mod_hdcp_hdmi {
enum mod_hdcp_operation_mode {
MOD_HDCP_MODE_OFF,
MOD_HDCP_MODE_DEFAULT,
- MOD_HDCP_MODE_DP,
- MOD_HDCP_MODE_DP_MST
+ MOD_HDCP_MODE_DP
};
enum mod_hdcp_display_state {
MOD_HDCP_DISPLAY_INACTIVE = 0,
MOD_HDCP_DISPLAY_ACTIVE,
- MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED,
MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED
};
@@ -155,7 +156,8 @@ struct mod_hdcp_display_adjustment {
struct mod_hdcp_link_adjustment_hdcp1 {
uint8_t disable : 1;
uint8_t postpone_encryption : 1;
- uint8_t reserved : 6;
+ uint8_t min_auth_retries_wa : 1;
+ uint8_t reserved : 5;
};
enum mod_hdcp_force_hdcp_type {
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
index 42cbeffac640..13c57ff2abdc 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
@@ -34,8 +34,7 @@ struct dc_info_packet;
struct mod_vrr_params;
void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
- struct dc_info_packet *info_packet,
- bool *use_vsc_sdp_for_colorimetry);
+ struct dc_info_packet *info_packet);
void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream,
struct dc_info_packet *info_packet, int ALLMEnabled, int ALLMValue);
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index 6a8a056424b8..cff3ab15fc0c 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -130,8 +130,7 @@ enum ColorimetryYCCDP {
};
void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
- struct dc_info_packet *info_packet,
- bool *use_vsc_sdp_for_colorimetry)
+ struct dc_info_packet *info_packet)
{
unsigned int vsc_packet_revision = vsc_packet_undefined;
unsigned int i;
@@ -139,11 +138,6 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
unsigned int colorimetryFormat = 0;
bool stereo3dSupport = false;
- /* Initialize first, later if infopacket is valid determine if VSC SDP
- * should be used to signal colorimetry format and pixel encoding.
- */
- *use_vsc_sdp_for_colorimetry = false;
-
if (stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE && stream->view_format != VIEW_3D_FORMAT_NONE) {
vsc_packet_revision = vsc_packet_rev1;
stereo3dSupport = true;
@@ -153,9 +147,8 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
if (stream->psr_version != 0)
vsc_packet_revision = vsc_packet_rev2;
- /* Update to revision 5 for extended colorimetry support for DPCD 1.4+ */
- if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
- stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
+ /* Update to revision 5 for extended colorimetry support */
+ if (stream->use_vsc_sdp_for_colorimetry)
vsc_packet_revision = vsc_packet_rev5;
/* VSC packet not needed based on the features
@@ -269,13 +262,6 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
info_packet->valid = true;
- /* If we are using VSC SDP revision 05h, use this to signal for
- * colorimetry format and pixel encoding. HW should later be
- * programmed to set MSA MISC1 bit 6 to indicate ignore
- * colorimetry format and pixel encoding in the MSA.
- */
- *use_vsc_sdp_for_colorimetry = true;
-
/* Set VSC SDP fields for pixel encoding and colorimetry format from DP 1.3 specs
* Data Bytes DB 18~16
* Bits 3:0 (Colorimetry Format) | Bits 7:4 (Pixel Encoding)
diff --git a/drivers/gpu/drm/amd/display/modules/vmid/vmid.c b/drivers/gpu/drm/amd/display/modules/vmid/vmid.c
index f0a153704f6e..00f132f8ad55 100644
--- a/drivers/gpu/drm/amd/display/modules/vmid/vmid.c
+++ b/drivers/gpu/drm/amd/display/modules/vmid/vmid.c
@@ -40,14 +40,18 @@ struct core_vmid {
static void add_ptb_to_table(struct core_vmid *core_vmid, unsigned int vmid, uint64_t ptb)
{
- core_vmid->ptb_assigned_to_vmid[vmid] = ptb;
- core_vmid->num_vmids_available--;
+ if (vmid < MAX_VMID) {
+ core_vmid->ptb_assigned_to_vmid[vmid] = ptb;
+ core_vmid->num_vmids_available--;
+ }
}
static void clear_entry_from_vmid_table(struct core_vmid *core_vmid, unsigned int vmid)
{
- core_vmid->ptb_assigned_to_vmid[vmid] = 0;
- core_vmid->num_vmids_available++;
+ if (vmid < MAX_VMID) {
+ core_vmid->ptb_assigned_to_vmid[vmid] = 0;
+ core_vmid->num_vmids_available++;
+ }
}
static void evict_vmids(struct core_vmid *core_vmid)
@@ -57,7 +61,7 @@ static void evict_vmids(struct core_vmid *core_vmid)
// At this point any positions with value 0 are unused vmids, evict them
for (i = 1; i < core_vmid->num_vmid; i++) {
- if (ord & (1u << i))
+ if (!(ord & (1u << i)))
clear_entry_from_vmid_table(core_vmid, i);
}
}
@@ -91,7 +95,7 @@ static int get_next_available_vmid(struct core_vmid *core_vmid)
uint8_t mod_vmid_get_for_ptb(struct mod_vmid *mod_vmid, uint64_t ptb)
{
struct core_vmid *core_vmid = MOD_VMID_TO_CORE(mod_vmid);
- unsigned int vmid = 0;
+ int vmid = 0;
// Physical address gets vmid 0
if (ptb == 0)
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h
index b6f74bf4af02..27bb8c1ab858 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h
@@ -7376,6 +7376,8 @@
#define mmCRTC4_CRTC_DRR_CONTROL 0x0f3e
#define mmCRTC4_CRTC_DRR_CONTROL_BASE_IDX 2
+#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0 0x395d
+#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX 2
// addressBlock: dce_dc_fmt4_dispdec
// base address: 0x2000
diff --git a/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_sh_mask.h
new file mode 100644
index 000000000000..82b6cc25205e
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_sh_mask.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _wafl2_4_0_0_SH_MASK_HEADER
+#define _wafl2_4_0_0_SH_MASK_HEADER
+
+//PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataLossErr__SHIFT 0x0
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__TrainingErr__SHIFT 0x1
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__CRCErr__SHIFT 0x5
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__BERExceededErr__SHIFT 0x6
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__TxMetaDataErr__SHIFT 0x7
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayBufParityErr__SHIFT 0x8
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataParityErr__SHIFT 0x9
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayFifoOverflowErr__SHIFT 0xa
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayFifoUnderflowErr__SHIFT 0xb
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ElasticFifoOverflowErr__SHIFT 0xc
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DeskewErr__SHIFT 0xd
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataStartupLimitErr__SHIFT 0xf
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__FCInitTimeoutErr__SHIFT 0x10
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryTimeoutErr__SHIFT 0x11
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReadySerialTimeoutErr__SHIFT 0x12
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReadySerialAttemptErr__SHIFT 0x13
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryAttemptErr__SHIFT 0x14
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryRelockAttemptErr__SHIFT 0x15
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ClearBERAccum__SHIFT 0x17
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__BERAccumulator__SHIFT 0x18
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataLossErr_MASK 0x00000001L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__TrainingErr_MASK 0x00000002L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__CRCErr_MASK 0x00000020L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__BERExceededErr_MASK 0x00000040L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__TxMetaDataErr_MASK 0x00000080L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayBufParityErr_MASK 0x00000100L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataParityErr_MASK 0x00000200L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayFifoOverflowErr_MASK 0x00000400L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayFifoUnderflowErr_MASK 0x00000800L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ElasticFifoOverflowErr_MASK 0x00001000L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DeskewErr_MASK 0x00002000L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataStartupLimitErr_MASK 0x00008000L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__FCInitTimeoutErr_MASK 0x00010000L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryTimeoutErr_MASK 0x00020000L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReadySerialTimeoutErr_MASK 0x00040000L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReadySerialAttemptErr_MASK 0x00080000L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryAttemptErr_MASK 0x00100000L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryRelockAttemptErr_MASK 0x00200000L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ClearBERAccum_MASK 0x00800000L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__BERAccumulator_MASK 0xFF000000L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_smn.h b/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_smn.h
new file mode 100644
index 000000000000..4a51a90c611a
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_smn.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _wafl2_4_0_0_SMN_HEADER
+#define _wafl2_4_0_0_SMN_HEADER
+
+#define smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS 0x11cf0210
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_sh_mask.h
new file mode 100644
index 000000000000..f37712f05b03
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_sh_mask.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _xgmi_4_0_0_SH_MASK_HEADER
+#define _xgmi_4_0_0_SH_MASK_HEADER
+
+//PCS_GOPX16_PCS_ERROR_STATUS
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataLossErr__SHIFT 0x0
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__TrainingErr__SHIFT 0x1
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__CRCErr__SHIFT 0x5
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__BERExceededErr__SHIFT 0x6
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__TxMetaDataErr__SHIFT 0x7
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayBufParityErr__SHIFT 0x8
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataParityErr__SHIFT 0x9
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayFifoOverflowErr__SHIFT 0xa
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayFifoUnderflowErr__SHIFT 0xb
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ElasticFifoOverflowErr__SHIFT 0xc
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DeskewErr__SHIFT 0xd
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataStartupLimitErr__SHIFT 0xf
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__FCInitTimeoutErr__SHIFT 0x10
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryTimeoutErr__SHIFT 0x11
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReadySerialTimeoutErr__SHIFT 0x12
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReadySerialAttemptErr__SHIFT 0x13
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryAttemptErr__SHIFT 0x14
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryRelockAttemptErr__SHIFT 0x15
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ClearBERAccum__SHIFT 0x17
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__BERAccumulator__SHIFT 0x18
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataLossErr_MASK 0x00000001L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__TrainingErr_MASK 0x00000002L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__CRCErr_MASK 0x00000020L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__BERExceededErr_MASK 0x00000040L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__TxMetaDataErr_MASK 0x00000080L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayBufParityErr_MASK 0x00000100L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataParityErr_MASK 0x00000200L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayFifoOverflowErr_MASK 0x00000400L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayFifoUnderflowErr_MASK 0x00000800L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ElasticFifoOverflowErr_MASK 0x00001000L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DeskewErr_MASK 0x00002000L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataStartupLimitErr_MASK 0x00008000L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__FCInitTimeoutErr_MASK 0x00010000L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryTimeoutErr_MASK 0x00020000L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReadySerialTimeoutErr_MASK 0x00040000L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReadySerialAttemptErr_MASK 0x00080000L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryAttemptErr_MASK 0x00100000L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryRelockAttemptErr_MASK 0x00200000L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ClearBERAccum_MASK 0x00800000L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__BERAccumulator_MASK 0xFF000000L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_smn.h b/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_smn.h
new file mode 100644
index 000000000000..6ccbac4ce87e
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_smn.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _xgmi_4_0_0_SMN_HEADER
+#define _xgmi_4_0_0_SMN_HEADER
+
+#define smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS 0x11af0210
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index a607b1034962..a3c238c39ef5 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -123,7 +123,7 @@ struct kgd2kfd_shared_resources {
uint32_t num_queue_per_pipe;
/* Bit n == 1 means Queue n is available for KFD */
- DECLARE_BITMAP(queue_bitmap, KGD_MAX_QUEUES);
+ DECLARE_BITMAP(cp_queue_bitmap, KGD_MAX_QUEUES);
/* SDMA doorbell assignments (SOC15 and later chips only). Only
* specific doorbells are routed to each SDMA engine. Others
@@ -151,6 +151,7 @@ struct kgd2kfd_shared_resources {
/* Minor device number of the render node */
int drm_render_minor;
+
};
struct tile_config {
@@ -166,27 +167,6 @@ struct tile_config {
#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096
-/*
- * Allocation flag domains
- * NOTE: This must match the corresponding definitions in kfd_ioctl.h.
- */
-#define ALLOC_MEM_FLAGS_VRAM (1 << 0)
-#define ALLOC_MEM_FLAGS_GTT (1 << 1)
-#define ALLOC_MEM_FLAGS_USERPTR (1 << 2)
-#define ALLOC_MEM_FLAGS_DOORBELL (1 << 3)
-#define ALLOC_MEM_FLAGS_MMIO_REMAP (1 << 4)
-
-/*
- * Allocation flags attributes/access options.
- * NOTE: This must match the corresponding definitions in kfd_ioctl.h.
- */
-#define ALLOC_MEM_FLAGS_WRITABLE (1 << 31)
-#define ALLOC_MEM_FLAGS_EXECUTABLE (1 << 30)
-#define ALLOC_MEM_FLAGS_PUBLIC (1 << 29)
-#define ALLOC_MEM_FLAGS_NO_SUBSTITUTE (1 << 28) /* TODO */
-#define ALLOC_MEM_FLAGS_AQL_QUEUE_MEM (1 << 27)
-#define ALLOC_MEM_FLAGS_COHERENT (1 << 26) /* For GFXv9 or later */
-
/**
* struct kfd2kgd_calls
*
@@ -222,8 +202,6 @@ struct tile_config {
* @set_scratch_backing_va: Sets VA for scratch backing memory of a VMID.
* Only used for no cp scheduling mode
*
- * @get_tile_config: Returns GPU-specific tiling mode information
- *
* @set_vm_context_page_table_base: Program page table base for a VMID
*
* @invalidate_tlbs: Invalidate TLBs for a specific PASID
@@ -236,6 +214,8 @@ struct tile_config {
*
* @get_hive_id: Returns hive id of current device, 0 if xgmi is not enabled
*
+ * @get_unique_id: Returns uuid id of current device
+ *
* This structure contains function pointers to services that the kgd driver
* provides to amdkfd driver.
*
@@ -307,12 +287,11 @@ struct kfd2kgd_calls {
void (*set_scratch_backing_va)(struct kgd_dev *kgd,
uint64_t va, uint32_t vmid);
- int (*get_tile_config)(struct kgd_dev *kgd, struct tile_config *config);
-
void (*set_vm_context_page_table_base)(struct kgd_dev *kgd,
uint32_t vmid, uint64_t page_table_base);
uint32_t (*read_vmid_from_vmfault_reg)(struct kgd_dev *kgd);
uint64_t (*get_hive_id)(struct kgd_dev *kgd);
+ uint64_t (*get_unique_id)(struct kgd_dev *kgd);
};
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 99ad4ddbe12f..f6d4b0ef46ad 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -23,15 +23,12 @@
#include <linux/firmware.h>
#include <linux/pci.h>
-#include "pp_debug.h"
#include "amdgpu.h"
#include "amdgpu_smu.h"
#include "smu_internal.h"
-#include "soc15_common.h"
#include "smu_v11_0.h"
#include "smu_v12_0.h"
#include "atom.h"
-#include "amd_pcie.h"
#include "vega20_ppt.h"
#include "arcturus_ppt.h"
#include "navi10_ppt.h"
@@ -121,20 +118,20 @@ static int smu_feature_update_enable_state(struct smu_context *smu,
if (enabled) {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
- feature_low);
+ feature_low, NULL);
if (ret)
return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
- feature_high);
+ feature_high, NULL);
if (ret)
return ret;
} else {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
- feature_low);
+ feature_low, NULL);
if (ret)
return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
- feature_high);
+ feature_high, NULL);
if (ret)
return ret;
}
@@ -195,21 +192,13 @@ int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t
return -EINVAL;
if (if_version) {
- ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion);
- if (ret)
- return ret;
-
- ret = smu_read_smc_arg(smu, if_version);
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);
if (ret)
return ret;
}
if (smu_version) {
- ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion);
- if (ret)
- return ret;
-
- ret = smu_read_smc_arg(smu, smu_version);
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);
if (ret)
return ret;
}
@@ -218,17 +207,19 @@ int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t
}
int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
- uint32_t min, uint32_t max)
+ uint32_t min, uint32_t max, bool lock_needed)
{
int ret = 0;
- if (min <= 0 && max <= 0)
- return -EINVAL;
-
if (!smu_clk_dpm_is_enabled(smu, clk_type))
return 0;
+ if (lock_needed)
+ mutex_lock(&smu->mutex);
ret = smu_set_soft_freq_limited_range(smu, clk_type, min, max);
+ if (lock_needed)
+ mutex_unlock(&smu->mutex);
+
return ret;
}
@@ -251,7 +242,7 @@ int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
if (max > 0) {
param = (uint32_t)((clk_id << 16) | (max & 0xffff));
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
- param);
+ param, NULL);
if (ret)
return ret;
}
@@ -259,7 +250,7 @@ int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
if (min > 0) {
param = (uint32_t)((clk_id << 16) | (min & 0xffff));
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
- param);
+ param, NULL);
if (ret)
return ret;
}
@@ -335,12 +326,8 @@ int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_typ
param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
- ret = smu_send_smc_msg_with_param(smu,SMU_MSG_GetDpmFreqByIndex,
- param);
- if (ret)
- return ret;
-
- ret = smu_read_smc_arg(smu, &param);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmFreqByIndex,
+ param, &param);
if (ret)
return ret;
@@ -542,7 +529,8 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
ret = smu_send_smc_msg_with_param(smu, drv2smu ?
SMU_MSG_TransferTableDram2Smu :
SMU_MSG_TransferTableSmu2Dram,
- table_id | ((argument & 0xFFFF) << 16));
+ table_id | ((argument & 0xFFFF) << 16),
+ NULL);
if (ret)
return ret;
@@ -900,6 +888,7 @@ static int smu_sw_init(void *handle)
mutex_init(&smu->sensor_lock);
mutex_init(&smu->metrics_lock);
+ mutex_init(&smu->message_lock);
smu->watermarks_bitmap = 0;
smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
@@ -943,6 +932,13 @@ static int smu_sw_init(void *handle)
return ret;
}
+ if (adev->smu.ppt_funcs->i2c_eeprom_init) {
+ ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c);
+
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -952,6 +948,9 @@ static int smu_sw_fini(void *handle)
struct smu_context *smu = &adev->smu;
int ret;
+ if (adev->smu.ppt_funcs->i2c_eeprom_fini)
+ smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c);
+
kfree(smu->irq_source);
smu->irq_source = NULL;
@@ -1113,12 +1112,12 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
return ret;
}
+ ret = smu_set_driver_table_location(smu);
+ if (ret)
+ return ret;
+
/* smu_dump_pptable(smu); */
if (!amdgpu_sriov_vf(adev)) {
- ret = smu_set_driver_table_location(smu);
- if (ret)
- return ret;
-
/*
* Copy pptable bo in the vram to smc with SMU MSGs such as
* SetDriverDramAddr and TransferTableDram2Smu.
@@ -1454,29 +1453,84 @@ int smu_reset(struct smu_context *smu)
return ret;
}
+static int smu_disable_dpm(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t smu_version;
+ int ret = 0;
+ bool use_baco = !smu->is_apu &&
+ ((adev->in_gpu_reset &&
+ (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
+ (adev->in_runpm && amdgpu_asic_supports_baco(adev)));
+
+ ret = smu_get_smc_version(smu, NULL, &smu_version);
+ if (ret) {
+ pr_err("Failed to get smu version.\n");
+ return ret;
+ }
+
+ /*
+ * Disable all enabled SMU features.
+ * This should be handled in SMU FW, as a backup
+ * driver can issue call to SMU FW until sequence
+ * in SMU FW is operational.
+ */
+ ret = smu_system_features_control(smu, false);
+ if (ret) {
+ pr_err("Failed to disable smu features.\n");
+ return ret;
+ }
+
+ /*
+ * Arcturus does not have BACO bit in disable feature mask.
+ * Enablement of BACO bit on Arcturus should be skipped.
+ */
+ if (adev->asic_type == CHIP_ARCTURUS) {
+ if (use_baco && (smu_version > 0x360e00))
+ return 0;
+ }
+
+ /* For baco, need to leave BACO feature enabled */
+ if (use_baco) {
+ /*
+ * Correct the way for checking whether SMU_FEATURE_BACO_BIT
+ * is supported.
+ *
+ * Since 'smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)' will
+ * always return false as the 'smu_system_features_control(smu, false)'
+ * was just issued above which disabled all SMU features.
+ *
+ * Thus 'smu_feature_get_index(smu, SMU_FEATURE_BACO_BIT)' is used
+ * now for the checking.
+ */
+ if (smu_feature_get_index(smu, SMU_FEATURE_BACO_BIT) >= 0) {
+ ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true);
+ if (ret) {
+ pr_warn("set BACO feature enabled failed, return %d\n", ret);
+ return ret;
+ }
+ }
+ }
+
+ return ret;
+}
+
static int smu_suspend(void *handle)
{
- int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = &adev->smu;
- bool baco_feature_is_enabled = false;
+ int ret;
- if (!smu->pm_enabled)
+ if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
- if(!smu->is_apu)
- baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT);
-
- ret = smu_system_features_control(smu, false);
- if (ret)
- return ret;
+ if (!smu->pm_enabled)
+ return 0;
- if (baco_feature_is_enabled) {
- ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true);
- if (ret) {
- pr_warn("set BACO feature enabled failed, return %d\n", ret);
+ if(!amdgpu_sriov_vf(adev)) {
+ ret = smu_disable_dpm(smu);
+ if (ret)
return ret;
- }
}
smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
@@ -1942,7 +1996,7 @@ int smu_set_mp1_state(struct smu_context *smu,
return 0;
}
- ret = smu_send_smc_msg(smu, msg);
+ ret = smu_send_smc_msg(smu, msg, NULL);
if (ret)
pr_err("[PrepareMp1] Failed!\n");
@@ -2006,8 +2060,11 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
smu_set_watermarks_table(smu, table, clock_ranges);
- smu->watermarks_bitmap |= WATERMARKS_EXIST;
- smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
+
+ if (!(smu->watermarks_bitmap & WATERMARKS_EXIST)) {
+ smu->watermarks_bitmap |= WATERMARKS_EXIST;
+ smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
+ }
}
mutex_unlock(&smu->mutex);
@@ -2617,12 +2674,3 @@ uint32_t smu_get_pptable_power_limit(struct smu_context *smu)
return ret;
}
-
-int smu_send_smc_msg(struct smu_context *smu,
- enum smu_message_type msg)
-{
- int ret;
-
- ret = smu_send_smc_msg_with_param(smu, msg, 0);
- return ret;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index 14ba6aa876e2..c6d3bef15320 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -21,7 +21,6 @@
*
*/
-#include "pp_debug.h"
#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
@@ -42,7 +41,7 @@
#include <linux/pci.h>
#include "amdgpu_ras.h"
-#define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control.eeprom_accessor))->adev
+#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
#define CTF_OFFSET_EDGE 5
#define CTF_OFFSET_HOTSPOT 5
@@ -127,6 +126,7 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_message_map[SMU_MSG_MAX_COUNT]
MSG_MAP(WaflTest, PPSMC_MSG_WaflTest),
MSG_MAP(SetXgmiMode, PPSMC_MSG_SetXgmiMode),
MSG_MAP(SetMemoryChannelEnable, PPSMC_MSG_SetMemoryChannelEnable),
+ MSG_MAP(DFCstateControl, PPSMC_MSG_DFCstateControl),
};
static struct smu_11_0_cmn2aisc_mapping arcturus_clk_map[SMU_CLK_COUNT] = {
@@ -373,13 +373,13 @@ arcturus_set_single_dpm_table(struct smu_context *smu,
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_GetDpmFreqByIndex,
- (clk_id << 16 | 0xFF));
+ (clk_id << 16 | 0xFF),
+ &num_of_levels);
if (ret) {
pr_err("[%s] failed to get dpm levels!\n", __func__);
return ret;
}
- smu_read_smc_arg(smu, &num_of_levels);
if (!num_of_levels) {
pr_err("[%s] number of clk levels is invalid!\n", __func__);
return -EINVAL;
@@ -389,12 +389,12 @@ arcturus_set_single_dpm_table(struct smu_context *smu,
for (i = 0; i < num_of_levels; i++) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_GetDpmFreqByIndex,
- (clk_id << 16 | i));
+ (clk_id << 16 | i),
+ &clk);
if (ret) {
pr_err("[%s] failed to get dpm freq by index!\n", __func__);
return ret;
}
- smu_read_smc_arg(smu, &clk);
if (!clk) {
pr_err("[%s] clk value is invalid!\n", __func__);
return -EINVAL;
@@ -552,13 +552,13 @@ static int arcturus_run_btc(struct smu_context *smu)
{
int ret = 0;
- ret = smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc);
+ ret = smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc, NULL);
if (ret) {
pr_err("RunAfllBtc failed!\n");
return ret;
}
- return smu_send_smc_msg(smu, SMU_MSG_RunDcBtc);
+ return smu_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
}
static int arcturus_populate_umd_state_clk(struct smu_context *smu)
@@ -743,7 +743,8 @@ static int arcturus_upload_dpm_level(struct smu_context *smu, bool max,
single_dpm_table->dpm_state.soft_min_level;
ret = smu_send_smc_msg_with_param(smu,
(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
- (PPCLK_GFXCLK << 16) | (freq & 0xffff));
+ (PPCLK_GFXCLK << 16) | (freq & 0xffff),
+ NULL);
if (ret) {
pr_err("Failed to set soft %s gfxclk !\n",
max ? "max" : "min");
@@ -758,7 +759,8 @@ static int arcturus_upload_dpm_level(struct smu_context *smu, bool max,
single_dpm_table->dpm_state.soft_min_level;
ret = smu_send_smc_msg_with_param(smu,
(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
- (PPCLK_UCLK << 16) | (freq & 0xffff));
+ (PPCLK_UCLK << 16) | (freq & 0xffff),
+ NULL);
if (ret) {
pr_err("Failed to set soft %s memclk !\n",
max ? "max" : "min");
@@ -773,7 +775,8 @@ static int arcturus_upload_dpm_level(struct smu_context *smu, bool max,
single_dpm_table->dpm_state.soft_min_level;
ret = smu_send_smc_msg_with_param(smu,
(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
- (PPCLK_SOCCLK << 16) | (freq & 0xffff));
+ (PPCLK_SOCCLK << 16) | (freq & 0xffff),
+ NULL);
if (ret) {
pr_err("Failed to set soft %s socclk !\n",
max ? "max" : "min");
@@ -1288,12 +1291,11 @@ static int arcturus_get_power_limit(struct smu_context *smu,
return -EINVAL;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit,
- power_src << 16);
+ power_src << 16, &asic_default_power_limit);
if (ret) {
pr_err("[%s] get PPT limit failed!", __func__);
return ret;
}
- smu_read_smc_arg(smu, &asic_default_power_limit);
} else {
/* the last hope to figure out the ppt limit */
if (!pptable) {
@@ -1497,7 +1499,8 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu,
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetWorkloadMask,
- 1 << workload_type);
+ 1 << workload_type,
+ NULL);
if (ret) {
pr_err("Fail to set workload type %d\n", workload_type);
return ret;
@@ -2187,7 +2190,7 @@ static int arcturus_i2c_eeprom_control_init(struct i2c_adapter *control)
control->class = I2C_CLASS_SPD;
control->dev.parent = &adev->pdev->dev;
control->algo = &arcturus_i2c_eeprom_i2c_algo;
- snprintf(control->name, sizeof(control->name), "RAS EEPROM");
+ snprintf(control->name, sizeof(control->name), "AMDGPU EEPROM");
res = i2c_add_adapter(control);
if (res)
@@ -2214,6 +2217,27 @@ static uint32_t arcturus_get_pptable_power_limit(struct smu_context *smu)
return pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
}
+static int arcturus_set_df_cstate(struct smu_context *smu,
+ enum pp_df_cstate state)
+{
+ uint32_t smu_version;
+ int ret;
+
+ ret = smu_get_smc_version(smu, NULL, &smu_version);
+ if (ret) {
+ pr_err("Failed to get smu version!\n");
+ return ret;
+ }
+
+ /* PPSMC_MSG_DFCstateControl is supported by 54.15.0 and onwards */
+ if (smu_version < 0x360F00) {
+ pr_err("DFCstateControl is only supported by PMFW 54.15.0 and onwards\n");
+ return -EINVAL;
+ }
+
+ return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL);
+}
+
static const struct pptable_funcs arcturus_ppt_funcs = {
/* translate smu index into arcturus specific index */
.get_smu_msg_index = arcturus_get_smu_msg_index,
@@ -2277,7 +2301,6 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
.system_features_control = smu_v11_0_system_features_control,
.send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
- .read_smc_arg = smu_v11_0_read_arg,
.init_display_count = smu_v11_0_init_display_count,
.set_allowed_mask = smu_v11_0_set_allowed_mask,
.get_enabled_mask = smu_v11_0_get_enabled_mask,
@@ -2307,6 +2330,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
.override_pcie_parameters = smu_v11_0_override_pcie_parameters,
.get_pptable_power_limit = arcturus_get_pptable_power_limit,
+ .set_df_cstate = arcturus_set_df_cstate,
};
void arcturus_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index bf04cfefb283..7740488999df 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -1250,7 +1250,7 @@ static void smu7_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
switch (sources) {
default:
pr_err("Unknown throttling event sources.");
- /* fall through */
+ fallthrough;
case 0:
protection = false;
/* src is unused */
@@ -3698,12 +3698,12 @@ static int smu7_request_link_speed_change_before_state_change(
data->force_pcie_gen = PP_PCIEGen2;
if (current_link_speed == PP_PCIEGen2)
break;
- /* fall through */
+ fallthrough;
case PP_PCIEGen2:
if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN2, false))
break;
+ fallthrough;
#endif
- /* fall through */
default:
data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr);
break;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 92a65e3daff4..f29f95be1e56 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -3382,7 +3382,7 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
}
if (data->need_update_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK + DPMTABLE_UPDATE_SOCCLK)) {
+ (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK | DPMTABLE_UPDATE_SOCCLK)) {
result = vega10_populate_all_graphic_levels(hwmgr);
PP_ASSERT_WITH_CODE((0 == result),
"Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
@@ -3390,7 +3390,7 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
}
if (data->need_update_dpm_table &
- (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
+ (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
result = vega10_populate_all_memory_levels(hwmgr);
PP_ASSERT_WITH_CODE((0 == result),
"Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 3b3ec5666051..08b6ba39a6d7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -487,15 +487,16 @@ static int vega20_setup_asic_task(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
int ret = 0;
+ bool use_baco = (adev->in_gpu_reset &&
+ (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
+ (adev->in_runpm && amdgpu_asic_supports_baco(adev));
ret = vega20_init_sclk_threshold(hwmgr);
PP_ASSERT_WITH_CODE(!ret,
"Failed to init sclk threshold!",
return ret);
- if (adev->in_gpu_reset &&
- (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) {
-
+ if (use_baco) {
ret = vega20_baco_apply_vdci_flush_workaround(hwmgr);
if (ret)
pr_err("Failed to apply vega20 baco workaround!\n");
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 97b6714e83e6..657a6f17e91f 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -362,6 +362,7 @@ struct smu_context
struct mutex mutex;
struct mutex sensor_lock;
struct mutex metrics_lock;
+ struct mutex message_lock;
uint64_t pool_size;
struct smu_table_context smu_table;
@@ -371,6 +372,9 @@ struct smu_context
struct amd_pp_display_configuration *display_config;
struct smu_baco_context smu_baco;
void *od_settings;
+#if defined(CONFIG_DEBUG_FS)
+ struct dentry *debugfs_sclk;
+#endif
uint32_t pstate_sclk;
uint32_t pstate_mclk;
@@ -514,8 +518,7 @@ struct pptable_funcs {
int (*set_last_dcef_min_deep_sleep_clk)(struct smu_context *smu);
int (*system_features_control)(struct smu_context *smu, bool en);
int (*send_smc_msg_with_param)(struct smu_context *smu,
- enum smu_message_type msg, uint32_t param);
- int (*read_smc_arg)(struct smu_context *smu, uint32_t *arg);
+ enum smu_message_type msg, uint32_t param, uint32_t *read_arg);
int (*init_display_count)(struct smu_context *smu, uint32_t count);
int (*set_allowed_mask)(struct smu_context *smu);
int (*get_enabled_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num);
@@ -707,7 +710,7 @@ int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *min, uint32_t *max, bool lock_needed);
int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
- uint32_t min, uint32_t max);
+ uint32_t min, uint32_t max, bool lock_needed);
int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t min, uint32_t max);
int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type,
diff --git a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
index e3291259b249..f736d773f9d6 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
@@ -110,7 +110,11 @@
//Others
#define PPSMC_MSG_SetMemoryChannelEnable 0x39
-#define PPSMC_Message_Count 0x3A
+//OOB
+#define PPSMC_MSG_SetNumBadHbmPagesRetired 0x3A
+
+#define PPSMC_MSG_DFCstateControl 0x3B
+#define PPSMC_Message_Count 0x3C
typedef uint32_t PPSMC_Result;
typedef uint32_t PPSMC_Msg;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h b/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h
index 822cd8b5bf90..cea65093b6ad 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h
@@ -37,7 +37,7 @@
#define PP_ASSERT_WITH_CODE(cond, msg, code) \
do { \
if (!(cond)) { \
- pr_warn("%s\n", msg); \
+ pr_warn_ratelimited("%s\n", msg); \
code; \
} \
} while (0)
@@ -45,7 +45,7 @@
#define PP_ASSERT(cond, msg) \
do { \
if (!(cond)) { \
- pr_warn("%s\n", msg); \
+ pr_warn_ratelimited("%s\n", msg); \
} \
} while (0)
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h
index ac0120e384be..4b2da98afcd2 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h
@@ -701,7 +701,8 @@ typedef struct {
// APCC Settings
uint16_t PccThresholdLow;
uint16_t PccThresholdHigh;
- uint32_t PaddingAPCC[6]; //FIXME pending SPEC
+ uint32_t MGpuFanBoostLimitRpm;
+ uint32_t PaddingAPCC[5];
// Temperature Dependent Vmin
uint16_t VDDGFX_TVmin; //Celcius
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
index d5314d12628a..1c88219fe403 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
@@ -28,8 +28,9 @@
#define SMU11_DRIVER_IF_VERSION_INV 0xFFFFFFFF
#define SMU11_DRIVER_IF_VERSION_VG20 0x13
#define SMU11_DRIVER_IF_VERSION_ARCT 0x12
-#define SMU11_DRIVER_IF_VERSION_NV10 0x33
-#define SMU11_DRIVER_IF_VERSION_NV14 0x34
+#define SMU11_DRIVER_IF_VERSION_NV10 0x35
+#define SMU11_DRIVER_IF_VERSION_NV12 0x33
+#define SMU11_DRIVER_IF_VERSION_NV14 0x36
/* MP Apertures */
#define MP0_Public 0x03800000
@@ -182,9 +183,8 @@ int smu_v11_0_system_features_control(struct smu_context *smu,
int
smu_v11_0_send_msg_with_param(struct smu_context *smu,
enum smu_message_type msg,
- uint32_t param);
-
-int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg);
+ uint32_t param,
+ uint32_t *read_arg);
int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h
index b2f96a101124..7a63cf8e85ed 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h
@@ -39,21 +39,39 @@
#define SMU_11_0_PP_OVERDRIVE_VERSION 0x0800
#define SMU_11_0_PP_POWERSAVINGCLOCK_VERSION 0x0100
+enum SMU_11_0_ODFEATURE_CAP {
+ SMU_11_0_ODCAP_GFXCLK_LIMITS = 0,
+ SMU_11_0_ODCAP_GFXCLK_CURVE,
+ SMU_11_0_ODCAP_UCLK_MAX,
+ SMU_11_0_ODCAP_POWER_LIMIT,
+ SMU_11_0_ODCAP_FAN_ACOUSTIC_LIMIT,
+ SMU_11_0_ODCAP_FAN_SPEED_MIN,
+ SMU_11_0_ODCAP_TEMPERATURE_FAN,
+ SMU_11_0_ODCAP_TEMPERATURE_SYSTEM,
+ SMU_11_0_ODCAP_MEMORY_TIMING_TUNE,
+ SMU_11_0_ODCAP_FAN_ZERO_RPM_CONTROL,
+ SMU_11_0_ODCAP_AUTO_UV_ENGINE,
+ SMU_11_0_ODCAP_AUTO_OC_ENGINE,
+ SMU_11_0_ODCAP_AUTO_OC_MEMORY,
+ SMU_11_0_ODCAP_FAN_CURVE,
+ SMU_11_0_ODCAP_COUNT,
+};
+
enum SMU_11_0_ODFEATURE_ID {
- SMU_11_0_ODFEATURE_GFXCLK_LIMITS = 1 << 0, //GFXCLK Limit feature
- SMU_11_0_ODFEATURE_GFXCLK_CURVE = 1 << 1, //GFXCLK Curve feature
- SMU_11_0_ODFEATURE_UCLK_MAX = 1 << 2, //UCLK Limit feature
- SMU_11_0_ODFEATURE_POWER_LIMIT = 1 << 3, //Power Limit feature
- SMU_11_0_ODFEATURE_FAN_ACOUSTIC_LIMIT = 1 << 4, //Fan Acoustic RPM feature
- SMU_11_0_ODFEATURE_FAN_SPEED_MIN = 1 << 5, //Minimum Fan Speed feature
- SMU_11_0_ODFEATURE_TEMPERATURE_FAN = 1 << 6, //Fan Target Temperature Limit feature
- SMU_11_0_ODFEATURE_TEMPERATURE_SYSTEM = 1 << 7, //Operating Temperature Limit feature
- SMU_11_0_ODFEATURE_MEMORY_TIMING_TUNE = 1 << 8, //AC Timing Tuning feature
- SMU_11_0_ODFEATURE_FAN_ZERO_RPM_CONTROL = 1 << 9, //Zero RPM feature
- SMU_11_0_ODFEATURE_AUTO_UV_ENGINE = 1 << 10, //Auto Under Volt GFXCLK feature
- SMU_11_0_ODFEATURE_AUTO_OC_ENGINE = 1 << 11, //Auto Over Clock GFXCLK feature
- SMU_11_0_ODFEATURE_AUTO_OC_MEMORY = 1 << 12, //Auto Over Clock MCLK feature
- SMU_11_0_ODFEATURE_FAN_CURVE = 1 << 13, //VICTOR TODO
+ SMU_11_0_ODFEATURE_GFXCLK_LIMITS = 1 << SMU_11_0_ODCAP_GFXCLK_LIMITS, //GFXCLK Limit feature
+ SMU_11_0_ODFEATURE_GFXCLK_CURVE = 1 << SMU_11_0_ODCAP_GFXCLK_CURVE, //GFXCLK Curve feature
+ SMU_11_0_ODFEATURE_UCLK_MAX = 1 << SMU_11_0_ODCAP_UCLK_MAX, //UCLK Limit feature
+ SMU_11_0_ODFEATURE_POWER_LIMIT = 1 << SMU_11_0_ODCAP_POWER_LIMIT, //Power Limit feature
+ SMU_11_0_ODFEATURE_FAN_ACOUSTIC_LIMIT = 1 << SMU_11_0_ODCAP_FAN_ACOUSTIC_LIMIT, //Fan Acoustic RPM feature
+ SMU_11_0_ODFEATURE_FAN_SPEED_MIN = 1 << SMU_11_0_ODCAP_FAN_SPEED_MIN, //Minimum Fan Speed feature
+ SMU_11_0_ODFEATURE_TEMPERATURE_FAN = 1 << SMU_11_0_ODCAP_TEMPERATURE_FAN, //Fan Target Temperature Limit feature
+ SMU_11_0_ODFEATURE_TEMPERATURE_SYSTEM = 1 << SMU_11_0_ODCAP_TEMPERATURE_SYSTEM, //Operating Temperature Limit feature
+ SMU_11_0_ODFEATURE_MEMORY_TIMING_TUNE = 1 << SMU_11_0_ODCAP_MEMORY_TIMING_TUNE, //AC Timing Tuning feature
+ SMU_11_0_ODFEATURE_FAN_ZERO_RPM_CONTROL = 1 << SMU_11_0_ODCAP_FAN_ZERO_RPM_CONTROL, //Zero RPM feature
+ SMU_11_0_ODFEATURE_AUTO_UV_ENGINE = 1 << SMU_11_0_ODCAP_AUTO_UV_ENGINE, //Auto Under Volt GFXCLK feature
+ SMU_11_0_ODFEATURE_AUTO_OC_ENGINE = 1 << SMU_11_0_ODCAP_AUTO_OC_ENGINE, //Auto Over Clock GFXCLK feature
+ SMU_11_0_ODFEATURE_AUTO_OC_MEMORY = 1 << SMU_11_0_ODCAP_AUTO_OC_MEMORY, //Auto Over Clock MCLK feature
+ SMU_11_0_ODFEATURE_FAN_CURVE = 1 << SMU_11_0_ODCAP_FAN_CURVE, //Fan Curve feature
SMU_11_0_ODFEATURE_COUNT = 14,
};
#define SMU_11_0_MAX_ODFEATURE 32 //Maximum Number of OD Features
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
index d79e54b5ebf6..7fbebc1979cf 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
@@ -40,14 +40,13 @@ struct smu_12_0_cmn2aisc_mapping {
int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
uint16_t msg);
-int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg);
-
int smu_v12_0_wait_for_response(struct smu_context *smu);
int
smu_v12_0_send_msg_with_param(struct smu_context *smu,
enum smu_message_type msg,
- uint32_t param);
+ uint32_t param,
+ uint32_t *read_arg);
int smu_v12_0_check_fw_status(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 19a9846b730e..d66dfa7410b6 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -21,7 +21,6 @@
*
*/
-#include "pp_debug.h"
#include <linux/firmware.h>
#include <linux/pci.h>
#include "amdgpu.h"
@@ -31,7 +30,6 @@
#include "amdgpu_atomfirmware.h"
#include "smu_v11_0.h"
#include "smu11_driver_if_navi10.h"
-#include "soc15_common.h"
#include "atom.h"
#include "navi10_ppt.h"
#include "smu_v11_0_pptable.h"
@@ -661,14 +659,14 @@ static int navi10_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
if (enable) {
/* vcn dpm on is a prerequisite for vcn power gate messages */
if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1, NULL);
if (ret)
return ret;
}
power_gate->vcn_gated = false;
} else {
if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
- ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
+ ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
if (ret)
return ret;
}
@@ -686,14 +684,14 @@ static int navi10_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
if (enable) {
if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
- ret = smu_send_smc_msg(smu, SMU_MSG_PowerUpJpeg);
+ ret = smu_send_smc_msg(smu, SMU_MSG_PowerUpJpeg, NULL);
if (ret)
return ret;
}
power_gate->jpeg_gated = false;
} else {
if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
- ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownJpeg);
+ ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownJpeg, NULL);
if (ret)
return ret;
}
@@ -736,9 +734,9 @@ static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu
return dpm_desc->SnapToDiscrete == 0 ? true : false;
}
-static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_ID feature)
+static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_CAP cap)
{
- return od_table->cap[feature];
+ return od_table->cap[cap];
}
static void navi10_od_setting_get_range(struct smu_11_0_overdrive_table *od_table,
@@ -846,7 +844,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
case SMU_OD_SCLK:
if (!smu->od_enabled || !od_table || !od_settings)
break;
- if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_LIMITS))
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS))
break;
size += sprintf(buf + size, "OD_SCLK:\n");
size += sprintf(buf + size, "0: %uMhz\n1: %uMhz\n", od_table->GfxclkFmin, od_table->GfxclkFmax);
@@ -854,7 +852,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
case SMU_OD_MCLK:
if (!smu->od_enabled || !od_table || !od_settings)
break;
- if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX))
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX))
break;
size += sprintf(buf + size, "OD_MCLK:\n");
size += sprintf(buf + size, "1: %uMHz\n", od_table->UclkFmax);
@@ -862,7 +860,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
case SMU_OD_VDDC_CURVE:
if (!smu->od_enabled || !od_table || !od_settings)
break;
- if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_CURVE))
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE))
break;
size += sprintf(buf + size, "OD_VDDC_CURVE:\n");
for (i = 0; i < 3; i++) {
@@ -887,7 +885,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
break;
size = sprintf(buf, "%s:\n", "OD_RANGE");
- if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_LIMITS)) {
+ if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMIN,
&min_value, NULL);
navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMAX,
@@ -896,14 +894,14 @@ static int navi10_print_clk_levels(struct smu_context *smu,
min_value, max_value);
}
- if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX)) {
+ if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX)) {
navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_UCLKFMAX,
&min_value, &max_value);
size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
min_value, max_value);
}
- if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_CURVE)) {
+ if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE)) {
navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1,
&min_value, &max_value);
size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
@@ -970,7 +968,7 @@ static int navi10_force_clk_levels(struct smu_context *smu,
if (ret)
return size;
- ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq);
+ ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq, false);
if (ret)
return size;
break;
@@ -1042,7 +1040,7 @@ static int navi10_pre_display_config_changed(struct smu_context *smu)
int ret = 0;
uint32_t max_freq = 0;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0, NULL);
if (ret)
return ret;
@@ -1063,19 +1061,11 @@ static int navi10_display_config_changed(struct smu_context *smu)
int ret = 0;
if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
- !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
- ret = smu_write_watermarks_table(smu);
- if (ret)
- return ret;
-
- smu->watermarks_bitmap |= WATERMARKS_LOADED;
- }
-
- if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
smu_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays,
- smu->display_config->num_display);
+ smu->display_config->num_display,
+ NULL);
if (ret)
return ret;
}
@@ -1102,7 +1092,7 @@ static int navi10_force_dpm_limit_value(struct smu_context *smu, bool highest)
return ret;
force_freq = highest ? max_freq : min_freq;
- ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq);
+ ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq, false);
if (ret)
return ret;
}
@@ -1128,7 +1118,7 @@ static int navi10_unforce_dpm_levels(struct smu_context *smu)
if (ret)
return ret;
- ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq);
+ ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq, false);
if (ret)
return ret;
}
@@ -1400,7 +1390,7 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u
if (workload_type < 0)
return -EINVAL;
smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
- 1 << workload_type);
+ 1 << workload_type, NULL);
return ret;
}
@@ -1465,7 +1455,8 @@ static int navi10_notify_smc_display_config(struct smu_context *smu)
if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetMinDeepSleepDcefclk,
- min_clocks.dcef_clock_in_sr/100);
+ min_clocks.dcef_clock_in_sr/100,
+ NULL);
if (ret) {
pr_err("Attempt to set divider for DCEFCLK Failed!");
return ret;
@@ -1493,6 +1484,7 @@ static int navi10_set_watermarks_table(struct smu_context *smu,
*clock_ranges)
{
int i;
+ int ret = 0;
Watermarks_t *table = watermarks;
if (!table || !clock_ranges)
@@ -1544,6 +1536,18 @@ static int navi10_set_watermarks_table(struct smu_context *smu,
clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
}
+ smu->watermarks_bitmap |= WATERMARKS_EXIST;
+
+ /* pass data to smu controller */
+ if (!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
+ ret = smu_write_watermarks_table(smu);
+ if (ret) {
+ pr_err("Failed to update WMTABLE!");
+ return ret;
+ }
+ smu->watermarks_bitmap |= WATERMARKS_LOADED;
+ }
+
return 0;
}
@@ -1674,10 +1678,10 @@ static int navi10_set_standard_performance_level(struct smu_context *smu)
return navi10_set_performance_level(smu, AMD_DPM_FORCED_LEVEL_AUTO);
}
- ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq);
+ ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq, false);
if (ret)
return ret;
- ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq);
+ ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq, false);
if (ret)
return ret;
@@ -1742,10 +1746,10 @@ static int navi10_set_peak_performance_level(struct smu_context *smu)
if (ret)
return ret;
- ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq);
+ ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq, false);
if (ret)
return ret;
- ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq);
+ ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq, false);
if (ret)
return ret;
@@ -1855,12 +1859,11 @@ static int navi10_get_power_limit(struct smu_context *smu,
return -EINVAL;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit,
- power_src << 16);
+ power_src << 16, &asic_default_power_limit);
if (ret) {
pr_err("[%s] get PPT limit failed!", __func__);
return ret;
}
- smu_read_smc_arg(smu, &asic_default_power_limit);
} else {
/* the last hope to figure out the ppt limit */
if (!pptable) {
@@ -1900,7 +1903,8 @@ static int navi10_update_pcie_parameters(struct smu_context *smu,
pptable->PcieLaneCount[i] : pcie_width_cap);
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_OverridePcieParameters,
- smu_pcie_arg);
+ smu_pcie_arg,
+ NULL);
if (ret)
return ret;
@@ -1946,13 +1950,13 @@ static int navi10_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu,
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_GetVoltageByDpm,
- param);
+ param,
+ &value);
if (ret) {
pr_err("[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!");
return ret;
}
- smu_read_smc_arg(smu, &value);
*voltage = (uint16_t)value;
return 0;
@@ -2056,7 +2060,7 @@ static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABL
switch (type) {
case PP_OD_EDIT_SCLK_VDDC_TABLE:
- if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_LIMITS)) {
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
pr_warn("GFXCLK_LIMITS not supported!\n");
return -ENOTSUPP;
}
@@ -2102,7 +2106,7 @@ static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABL
}
break;
case PP_OD_EDIT_MCLK_VDDC_TABLE:
- if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX)) {
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX)) {
pr_warn("UCLK_MAX not supported!\n");
return -ENOTSUPP;
}
@@ -2143,7 +2147,7 @@ static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABL
}
break;
case PP_OD_EDIT_VDDC_CURVE:
- if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_CURVE)) {
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE)) {
pr_warn("GFXCLK_CURVE not supported!\n");
return -ENOTSUPP;
}
@@ -2209,7 +2213,7 @@ static int navi10_run_btc(struct smu_context *smu)
{
int ret = 0;
- ret = smu_send_smc_msg(smu, SMU_MSG_RunBtc);
+ ret = smu_send_smc_msg(smu, SMU_MSG_RunBtc, NULL);
if (ret)
pr_err("RunBtc failed!\n");
@@ -2221,9 +2225,9 @@ static int navi10_dummy_pstate_control(struct smu_context *smu, bool enable)
int result = 0;
if (!enable)
- result = smu_send_smc_msg(smu, SMU_MSG_DAL_DISABLE_DUMMY_PSTATE_CHANGE);
+ result = smu_send_smc_msg(smu, SMU_MSG_DAL_DISABLE_DUMMY_PSTATE_CHANGE, NULL);
else
- result = smu_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE);
+ result = smu_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE, NULL);
return result;
}
@@ -2332,7 +2336,6 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
.system_features_control = smu_v11_0_system_features_control,
.send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
- .read_smc_arg = smu_v11_0_read_arg,
.init_display_count = smu_v11_0_init_display_count,
.set_allowed_mask = smu_v11_0_set_allowed_mask,
.get_enabled_mask = smu_v11_0_get_enabled_mask,
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
index 861e6410363b..7bf52ecba01d 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -24,7 +24,6 @@
#include "amdgpu.h"
#include "amdgpu_smu.h"
#include "smu_internal.h"
-#include "soc15_common.h"
#include "smu_v12_0_ppsmc.h"
#include "smu12_driver_if.h"
#include "smu_v12_0.h"
@@ -111,8 +110,8 @@ static struct smu_12_0_cmn2aisc_mapping renoir_clk_map[SMU_CLK_COUNT] = {
CLK_MAP(GFXCLK, CLOCK_GFXCLK),
CLK_MAP(SCLK, CLOCK_GFXCLK),
CLK_MAP(SOCCLK, CLOCK_SOCCLK),
- CLK_MAP(UCLK, CLOCK_UMCCLK),
- CLK_MAP(MCLK, CLOCK_UMCCLK),
+ CLK_MAP(UCLK, CLOCK_FCLK),
+ CLK_MAP(MCLK, CLOCK_FCLK),
};
static struct smu_12_0_cmn2aisc_mapping renoir_table_map[SMU_TABLE_COUNT] = {
@@ -280,7 +279,7 @@ static int renoir_print_clk_levels(struct smu_context *smu,
break;
case SMU_MCLK:
count = NUM_MEMCLK_DPM_LEVELS;
- cur_value = metrics.ClockFrequency[CLOCK_UMCCLK];
+ cur_value = metrics.ClockFrequency[CLOCK_FCLK];
break;
case SMU_DCEFCLK:
count = NUM_DCFCLK_DPM_LEVELS;
@@ -342,14 +341,14 @@ static int renoir_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
if (enable) {
/* vcn dpm on is a prerequisite for vcn power gate messages */
if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL);
if (ret)
return ret;
}
power_gate->vcn_gated = false;
} else {
if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
- ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
+ ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
if (ret)
return ret;
}
@@ -367,14 +366,14 @@ static int renoir_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
if (enable) {
if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL);
if (ret)
return ret;
}
power_gate->jpeg_gated = false;
} else {
if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
if (ret)
return ret;
}
@@ -423,7 +422,7 @@ static int renoir_force_dpm_limit_value(struct smu_context *smu, bool highest)
return ret;
force_freq = highest ? max_freq : min_freq;
- ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq);
+ ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq, false);
if (ret)
return ret;
}
@@ -456,7 +455,7 @@ static int renoir_unforce_dpm_levels(struct smu_context *smu) {
if (ret)
return ret;
- ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq);
+ ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq, false);
if (ret)
return ret;
}
@@ -622,22 +621,24 @@ static int renoir_force_clk_levels(struct smu_context *smu,
return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
soft_max_level == 0 ? min_freq :
- soft_max_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : max_freq);
+ soft_max_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : max_freq,
+ NULL);
if (ret)
return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
soft_min_level == 2 ? max_freq :
- soft_min_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : min_freq);
+ soft_min_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : min_freq,
+ NULL);
if (ret)
return ret;
break;
case SMU_SOCCLK:
GET_DPM_CUR_FREQ(clk_table, clk_type, soft_min_level, min_freq);
GET_DPM_CUR_FREQ(clk_table, clk_type, soft_max_level, max_freq);
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max_freq);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max_freq, NULL);
if (ret)
return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min_freq);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min_freq, NULL);
if (ret)
return ret;
break;
@@ -645,10 +646,10 @@ static int renoir_force_clk_levels(struct smu_context *smu,
case SMU_FCLK:
GET_DPM_CUR_FREQ(clk_table, clk_type, soft_min_level, min_freq);
GET_DPM_CUR_FREQ(clk_table, clk_type, soft_max_level, max_freq);
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max_freq);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max_freq, NULL);
if (ret)
return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min_freq);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min_freq, NULL);
if (ret)
return ret;
break;
@@ -672,14 +673,19 @@ static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, u
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_workload_get_type(smu, smu->power_profile_mode);
if (workload_type < 0) {
- pr_err("Unsupported power profile mode %d on RENOIR\n",smu->power_profile_mode);
+ /*
+ * TODO: If some case need switch to powersave/default power mode
+ * then can consider enter WORKLOAD_COMPUTE/WORKLOAD_CUSTOM for power saving.
+ */
+ pr_err_once("Unsupported power profile mode %d on RENOIR\n",smu->power_profile_mode);
return -EINVAL;
}
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
- 1 << workload_type);
+ 1 << workload_type,
+ NULL);
if (ret) {
- pr_err("Fail to set workload type %d\n", workload_type);
+ pr_err_once("Fail to set workload type %d\n", workload_type);
return ret;
}
@@ -697,7 +703,7 @@ static int renoir_set_peak_clock_by_device(struct smu_context *smu)
if (ret)
return ret;
- ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq);
+ ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq, false);
if (ret)
return ret;
@@ -705,7 +711,7 @@ static int renoir_set_peak_clock_by_device(struct smu_context *smu)
if (ret)
return ret;
- ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq);
+ ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq, false);
if (ret)
return ret;
@@ -806,9 +812,10 @@ static int renoir_set_watermarks_table(
clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
}
+ smu->watermarks_bitmap |= WATERMARKS_EXIST;
+
/* pass data to smu controller */
- if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
- !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
+ if (!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
ret = smu_write_watermarks_table(smu);
if (ret) {
pr_err("Failed to update WMTABLE!");
@@ -909,7 +916,6 @@ static const struct pptable_funcs renoir_ppt_funcs = {
.powergate_vcn = smu_v12_0_powergate_vcn,
.powergate_jpeg = smu_v12_0_powergate_jpeg,
.send_smc_msg_with_param = smu_v12_0_send_msg_with_param,
- .read_smc_arg = smu_v12_0_read_arg,
.set_gfx_cgpg = smu_v12_0_set_gfx_cgpg,
.gfx_off_control = smu_v12_0_gfx_off_control,
.init_smc_tables = smu_v12_0_init_smc_tables,
diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h b/drivers/gpu/drm/amd/powerplay/smu_internal.h
index 7bd200ffcda8..6900877de845 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_internal.h
+++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h
@@ -79,12 +79,13 @@
#define smu_set_default_od_settings(smu, initialize) \
((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings((smu), (initialize)) : 0)
-int smu_send_smc_msg(struct smu_context *smu, enum smu_message_type msg);
+#define smu_send_smc_msg_with_param(smu, msg, param, read_arg) \
+ ((smu)->ppt_funcs->send_smc_msg_with_param? (smu)->ppt_funcs->send_smc_msg_with_param((smu), (msg), (param), (read_arg)) : 0)
+
+static inline int smu_send_smc_msg(struct smu_context *smu, enum smu_message_type msg, uint32_t *read_arg) {
+ return smu_send_smc_msg_with_param(smu, msg, 0, read_arg);
+}
-#define smu_send_smc_msg_with_param(smu, msg, param) \
- ((smu)->ppt_funcs->send_smc_msg_with_param? (smu)->ppt_funcs->send_smc_msg_with_param((smu), (msg), (param)) : 0)
-#define smu_read_smc_arg(smu, arg) \
- ((smu)->ppt_funcs->read_smc_arg? (smu)->ppt_funcs->read_smc_arg((smu), (arg)) : 0)
#define smu_alloc_dpm_context(smu) \
((smu)->ppt_funcs->alloc_dpm_context ? (smu)->ppt_funcs->alloc_dpm_context((smu)) : 0)
#define smu_init_display_count(smu, count) \
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index 0dc49479a7eb..4fd77c7cfc80 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -26,7 +26,6 @@
#define SMU_11_0_PARTIAL_PPTABLE
-#include "pp_debug.h"
#include "amdgpu.h"
#include "amdgpu_smu.h"
#include "smu_internal.h"
@@ -64,7 +63,7 @@ static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu,
return 0;
}
-int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
+static int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
{
struct amdgpu_device *adev = smu->adev;
@@ -92,7 +91,8 @@ static int smu_v11_0_wait_for_response(struct smu_context *smu)
int
smu_v11_0_send_msg_with_param(struct smu_context *smu,
enum smu_message_type msg,
- uint32_t param)
+ uint32_t param,
+ uint32_t *read_arg)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0, index = 0;
@@ -101,11 +101,12 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu,
if (index < 0)
return index;
+ mutex_lock(&smu->message_lock);
ret = smu_v11_0_wait_for_response(smu);
if (ret) {
pr_err("Msg issuing pre-check failed and "
"SMU may be not in the right state!\n");
- return ret;
+ goto out;
}
WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
@@ -115,10 +116,21 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu,
smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
ret = smu_v11_0_wait_for_response(smu);
- if (ret)
+ if (ret) {
pr_err("failed send message: %10s (%d) \tparam: 0x%08x response %#x\n",
smu_get_message_name(smu, msg), index, param, ret);
-
+ goto out;
+ }
+ if (read_arg) {
+ ret = smu_v11_0_read_arg(smu, read_arg);
+ if (ret) {
+ pr_err("failed to read message arg: %10s (%d) \tparam: 0x%08x response %#x\n",
+ smu_get_message_name(smu, msg), index, param, ret);
+ goto out;
+ }
+ }
+out:
+ mutex_unlock(&smu->message_lock);
return ret;
}
@@ -262,6 +274,9 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
case CHIP_NAVI10:
smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV10;
break;
+ case CHIP_NAVI12:
+ smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV12;
+ break;
case CHIP_NAVI14:
smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV14;
break;
@@ -671,12 +686,14 @@ int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetSystemVirtualDramAddrHigh,
- address_high);
+ address_high,
+ NULL);
if (ret)
return ret;
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetSystemVirtualDramAddrLow,
- address_low);
+ address_low,
+ NULL);
if (ret)
return ret;
@@ -685,15 +702,15 @@ int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
address_low = (uint32_t)lower_32_bits(address);
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
- address_high);
+ address_high, NULL);
if (ret)
return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
- address_low);
+ address_low, NULL);
if (ret)
return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
- (uint32_t)memory_pool->size);
+ (uint32_t)memory_pool->size, NULL);
if (ret)
return ret;
@@ -757,7 +774,7 @@ int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
int ret;
ret = smu_send_smc_msg_with_param(smu,
- SMU_MSG_SetMinDeepSleepDcefclk, clk);
+ SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL);
if (ret)
pr_err("SMU11 attempt to set divider for DCEFCLK Failed!");
@@ -784,11 +801,13 @@ int smu_v11_0_set_driver_table_location(struct smu_context *smu)
if (driver_table->mc_address) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetDriverDramAddrHigh,
- upper_32_bits(driver_table->mc_address));
+ upper_32_bits(driver_table->mc_address),
+ NULL);
if (!ret)
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetDriverDramAddrLow,
- lower_32_bits(driver_table->mc_address));
+ lower_32_bits(driver_table->mc_address),
+ NULL);
}
return ret;
@@ -802,11 +821,13 @@ int smu_v11_0_set_tool_table_location(struct smu_context *smu)
if (tool_table->mc_address) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetToolsDramAddrHigh,
- upper_32_bits(tool_table->mc_address));
+ upper_32_bits(tool_table->mc_address),
+ NULL);
if (!ret)
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetToolsDramAddrLow,
- lower_32_bits(tool_table->mc_address));
+ lower_32_bits(tool_table->mc_address),
+ NULL);
}
return ret;
@@ -819,7 +840,7 @@ int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
if (!smu->pm_enabled)
return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL);
return ret;
}
@@ -837,12 +858,12 @@ int smu_v11_0_set_allowed_mask(struct smu_context *smu)
bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
- feature_mask[1]);
+ feature_mask[1], NULL);
if (ret)
goto failed;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow,
- feature_mask[0]);
+ feature_mask[0], NULL);
if (ret)
goto failed;
@@ -862,17 +883,11 @@ int smu_v11_0_get_enabled_mask(struct smu_context *smu,
return -EINVAL;
if (bitmap_empty(feature->enabled, feature->feature_num)) {
- ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh);
- if (ret)
- return ret;
- ret = smu_read_smc_arg(smu, &feature_mask_high);
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high);
if (ret)
return ret;
- ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow);
- if (ret)
- return ret;
- ret = smu_read_smc_arg(smu, &feature_mask_low);
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low);
if (ret)
return ret;
@@ -894,10 +909,13 @@ int smu_v11_0_system_features_control(struct smu_context *smu,
int ret = 0;
ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
- SMU_MSG_DisableAllSmuFeatures));
+ SMU_MSG_DisableAllSmuFeatures), NULL);
if (ret)
return ret;
+ bitmap_zero(feature->enabled, feature->feature_num);
+ bitmap_zero(feature->supported, feature->feature_num);
+
if (en) {
ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
if (ret)
@@ -907,9 +925,6 @@ int smu_v11_0_system_features_control(struct smu_context *smu,
feature->feature_num);
bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
feature->feature_num);
- } else {
- bitmap_zero(feature->enabled, feature->feature_num);
- bitmap_zero(feature->supported, feature->feature_num);
}
return ret;
@@ -923,7 +938,7 @@ int smu_v11_0_notify_display_change(struct smu_context *smu)
return ret;
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL);
return ret;
}
@@ -947,30 +962,24 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
return -EINVAL;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
- clk_id << 16);
+ clk_id << 16, clock);
if (ret) {
pr_err("[GetMaxSustainableClock] Failed to get max DC clock from SMC!");
return ret;
}
- ret = smu_read_smc_arg(smu, clock);
- if (ret)
- return ret;
-
if (*clock != 0)
return 0;
/* if DC limit is zero, return AC limit */
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
- clk_id << 16);
+ clk_id << 16, clock);
if (ret) {
pr_err("[GetMaxSustainableClock] failed to get max AC clock from SMC!");
return ret;
}
- ret = smu_read_smc_arg(smu, clock);
-
- return ret;
+ return 0;
}
int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
@@ -978,8 +987,12 @@ int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks;
int ret = 0;
- max_sustainable_clocks = kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks),
+ if (!smu->smu_table.max_sustainable_clocks)
+ max_sustainable_clocks = kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks),
GFP_KERNEL);
+ else
+ max_sustainable_clocks = smu->smu_table.max_sustainable_clocks;
+
smu->smu_table.max_sustainable_clocks = (void *)max_sustainable_clocks;
max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100;
@@ -1102,7 +1115,7 @@ int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
return -EOPNOTSUPP;
}
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n, NULL);
if (ret) {
pr_err("[%s] Set power limit Failed!\n", __func__);
return ret;
@@ -1132,11 +1145,7 @@ int smu_v11_0_get_current_clk_freq(struct smu_context *smu,
ret = smu_get_current_clk_freq_by_table(smu, clk_id, &freq);
else {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmClockFreq,
- (asic_clk_id << 16));
- if (ret)
- return ret;
-
- ret = smu_read_smc_arg(smu, &freq);
+ (asic_clk_id << 16), &freq);
if (ret)
return ret;
}
@@ -1371,9 +1380,9 @@ int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return 0;
if (enable)
- ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff);
+ ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
else
- ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff);
+ ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
break;
default:
break;
@@ -1511,7 +1520,8 @@ int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
int ret = 0;
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetXgmiMode,
- pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3);
+ pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3,
+ NULL);
return ret;
}
@@ -1624,14 +1634,14 @@ int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu)
{
int ret = 0;
- ret = smu_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME);
+ ret = smu_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL);
return ret;
}
static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v11_0_baco_seq baco_seq)
{
- return smu_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq);
+ return smu_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq, NULL);
}
bool smu_v11_0_baco_is_support(struct smu_context *smu)
@@ -1700,12 +1710,12 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
data |= 0x80000000;
WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0, NULL);
} else {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1, NULL);
}
} else {
- ret = smu_send_smc_msg(smu, SMU_MSG_ExitBaco);
+ ret = smu_send_smc_msg(smu, SMU_MSG_ExitBaco, NULL);
if (ret)
goto out;
@@ -1773,19 +1783,13 @@ int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
param = (clk_id & 0xffff) << 16;
if (max) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param);
- if (ret)
- goto failed;
- ret = smu_read_smc_arg(smu, max);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param, max);
if (ret)
goto failed;
}
if (min) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param);
- if (ret)
- goto failed;
- ret = smu_read_smc_arg(smu, min);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min);
if (ret)
goto failed;
}
@@ -1807,7 +1811,7 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
if (max > 0) {
param = (uint32_t)((clk_id << 16) | (max & 0xffff));
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
- param);
+ param, NULL);
if (ret)
return ret;
}
@@ -1815,7 +1819,7 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
if (min > 0) {
param = (uint32_t)((clk_id << 16) | (min & 0xffff));
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
- param);
+ param, NULL);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
index 870e6db2907e..169ebdad87b8 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
@@ -20,7 +20,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "pp_debug.h"
#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
@@ -50,7 +49,7 @@ int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
return 0;
}
-int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg)
+static int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg)
{
struct amdgpu_device *adev = smu->adev;
@@ -78,7 +77,8 @@ int smu_v12_0_wait_for_response(struct smu_context *smu)
int
smu_v12_0_send_msg_with_param(struct smu_context *smu,
enum smu_message_type msg,
- uint32_t param)
+ uint32_t param,
+ uint32_t *read_arg)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0, index = 0;
@@ -87,11 +87,12 @@ smu_v12_0_send_msg_with_param(struct smu_context *smu,
if (index < 0)
return index;
+ mutex_lock(&smu->message_lock);
ret = smu_v12_0_wait_for_response(smu);
if (ret) {
pr_err("Msg issuing pre-check failed and "
"SMU may be not in the right state!\n");
- return ret;
+ goto out;
}
WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
@@ -101,10 +102,21 @@ smu_v12_0_send_msg_with_param(struct smu_context *smu,
smu_v12_0_send_msg_without_waiting(smu, (uint16_t)index);
ret = smu_v12_0_wait_for_response(smu);
- if (ret)
+ if (ret) {
pr_err("Failed to send message 0x%x, response 0x%x param 0x%x\n",
index, ret, param);
-
+ goto out;
+ }
+ if (read_arg) {
+ ret = smu_v12_0_read_arg(smu, read_arg);
+ if (ret) {
+ pr_err("Failed to read message arg 0x%x, response 0x%x param 0x%x\n",
+ index, ret, param);
+ goto out;
+ }
+ }
+out:
+ mutex_unlock(&smu->message_lock);
return ret;
}
@@ -163,9 +175,9 @@ int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
return 0;
if (gate)
- return smu_send_smc_msg(smu, SMU_MSG_PowerDownSdma);
+ return smu_send_smc_msg(smu, SMU_MSG_PowerDownSdma, NULL);
else
- return smu_send_smc_msg(smu, SMU_MSG_PowerUpSdma);
+ return smu_send_smc_msg(smu, SMU_MSG_PowerUpSdma, NULL);
}
int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate)
@@ -174,9 +186,9 @@ int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate)
return 0;
if (gate)
- return smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
+ return smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
else
- return smu_send_smc_msg(smu, SMU_MSG_PowerUpVcn);
+ return smu_send_smc_msg(smu, SMU_MSG_PowerUpVcn, NULL);
}
int smu_v12_0_powergate_jpeg(struct smu_context *smu, bool gate)
@@ -185,9 +197,9 @@ int smu_v12_0_powergate_jpeg(struct smu_context *smu, bool gate)
return 0;
if (gate)
- return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0);
+ return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
else
- return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0);
+ return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL);
}
int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
@@ -196,7 +208,9 @@ int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
return 0;
return smu_v12_0_send_msg_with_param(smu,
- SMU_MSG_SetGfxCGPG, enable ? 1 : 0);
+ SMU_MSG_SetGfxCGPG,
+ enable ? 1 : 0,
+ NULL);
}
int smu_v12_0_read_sensor(struct smu_context *smu,
@@ -262,10 +276,10 @@ int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable)
int ret = 0, timeout = 500;
if (enable) {
- ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff);
+ ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
} else {
- ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff);
+ ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
/* confirm gfx is back to "on" state, timeout is 0.5 second */
while (!(smu_v12_0_get_gfxoff_status(smu) == 2)) {
@@ -331,17 +345,11 @@ int smu_v12_0_get_enabled_mask(struct smu_context *smu,
if (!feature_mask || num < 2)
return -EINVAL;
- ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh);
- if (ret)
- return ret;
- ret = smu_read_smc_arg(smu, &feature_mask_high);
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high);
if (ret)
return ret;
- ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow);
- if (ret)
- return ret;
- ret = smu_read_smc_arg(smu, &feature_mask_low);
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low);
if (ret)
return ret;
@@ -388,14 +396,11 @@ int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
switch (clk_type) {
case SMU_GFXCLK:
case SMU_SCLK:
- ret = smu_send_smc_msg(smu, SMU_MSG_GetMaxGfxclkFrequency);
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetMaxGfxclkFrequency, max);
if (ret) {
pr_err("Attempt to get max GX frequency from SMC Failed !\n");
goto failed;
}
- ret = smu_read_smc_arg(smu, max);
- if (ret)
- goto failed;
break;
case SMU_UCLK:
case SMU_FCLK:
@@ -419,14 +424,11 @@ int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
switch (clk_type) {
case SMU_GFXCLK:
case SMU_SCLK:
- ret = smu_send_smc_msg(smu, SMU_MSG_GetMinGfxclkFrequency);
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetMinGfxclkFrequency, min);
if (ret) {
pr_err("Attempt to get min GX frequency from SMC Failed !\n");
goto failed;
}
- ret = smu_read_smc_arg(smu, min);
- if (ret)
- goto failed;
break;
case SMU_UCLK:
case SMU_FCLK:
@@ -450,7 +452,7 @@ failed:
}
int smu_v12_0_mode2_reset(struct smu_context *smu){
- return smu_v12_0_send_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2);
+ return smu_v12_0_send_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2, NULL);
}
int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
@@ -458,45 +460,42 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
{
int ret = 0;
- if (max < min)
- return -EINVAL;
-
switch (clk_type) {
case SMU_GFXCLK:
case SMU_SCLK:
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, min);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, min, NULL);
if (ret)
return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, max);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, max, NULL);
if (ret)
return ret;
break;
case SMU_FCLK:
case SMU_MCLK:
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min, NULL);
if (ret)
return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max, NULL);
if (ret)
return ret;
break;
case SMU_SOCCLK:
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min, NULL);
if (ret)
return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max, NULL);
if (ret)
return ret;
break;
case SMU_VCLK:
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, min);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, min, NULL);
if (ret)
return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, max);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, max, NULL);
if (ret)
return ret;
break;
@@ -515,11 +514,13 @@ int smu_v12_0_set_driver_table_location(struct smu_context *smu)
if (driver_table->mc_address) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetDriverDramAddrHigh,
- upper_32_bits(driver_table->mc_address));
+ upper_32_bits(driver_table->mc_address),
+ NULL);
if (!ret)
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetDriverDramAddrLow,
- lower_32_bits(driver_table->mc_address));
+ lower_32_bits(driver_table->mc_address),
+ NULL);
}
return ret;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
index 49e5ef3e3876..16aa171971d3 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
@@ -33,6 +33,8 @@
#include "smu7_smumgr.h"
#include "vega20_hwmgr.h"
+#include "smu_v11_0_i2c.h"
+
/* MP Apertures */
#define MP0_Public 0x03800000
#define MP0_SRAM 0x03900000
@@ -406,6 +408,7 @@ static int vega20_smu_init(struct pp_hwmgr *hwmgr)
struct vega20_smumgr *priv;
unsigned long tools_size = 0x19000;
int ret = 0;
+ struct amdgpu_device *adev = hwmgr->adev;
struct cgs_firmware_info info = {0};
@@ -505,6 +508,10 @@ static int vega20_smu_init(struct pp_hwmgr *hwmgr)
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].version = 0x01;
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size = sizeof(DpmActivityMonitorCoeffInt_t);
+ ret = smu_v11_0_i2c_eeprom_control_init(&adev->pm.smu_i2c);
+ if (ret)
+ goto err4;
+
return 0;
err4:
@@ -537,6 +544,9 @@ static int vega20_smu_fini(struct pp_hwmgr *hwmgr)
{
struct vega20_smumgr *priv =
(struct vega20_smumgr *)(hwmgr->smu_backend);
+ struct amdgpu_device *adev = hwmgr->adev;
+
+ smu_v11_0_i2c_eeprom_control_fini(&adev->pm.smu_i2c);
if (priv) {
amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PPTABLE].handle,
@@ -560,6 +570,7 @@ static int vega20_smu_fini(struct pp_hwmgr *hwmgr)
kfree(hwmgr->smu_backend);
hwmgr->smu_backend = NULL;
}
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index 4ad8d6c14ee5..49ff3756bd9f 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -21,7 +21,6 @@
*
*/
-#include "pp_debug.h"
#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
@@ -587,7 +586,7 @@ static int vega20_check_powerplay_table(struct smu_context *smu)
static int vega20_run_btc_afll(struct smu_context *smu)
{
- return smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc);
+ return smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc, NULL);
}
#define FEATURE_MASK(feature) (1ULL << feature)
@@ -670,13 +669,13 @@ vega20_set_single_dpm_table(struct smu_context *smu,
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_GetDpmFreqByIndex,
- (clk_id << 16 | 0xFF));
+ (clk_id << 16 | 0xFF),
+ &num_of_levels);
if (ret) {
pr_err("[GetNumOfDpmLevel] failed to get dpm levels!");
return ret;
}
- smu_read_smc_arg(smu, &num_of_levels);
if (!num_of_levels) {
pr_err("[GetNumOfDpmLevel] number of clk levels is invalid!");
return -EINVAL;
@@ -687,12 +686,12 @@ vega20_set_single_dpm_table(struct smu_context *smu,
for (i = 0; i < num_of_levels; i++) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_GetDpmFreqByIndex,
- (clk_id << 16 | i));
+ (clk_id << 16 | i),
+ &clk);
if (ret) {
pr_err("[GetDpmFreqByIndex] failed to get dpm freq by index!");
return ret;
}
- smu_read_smc_arg(smu, &clk);
if (!clk) {
pr_err("[GetDpmFreqByIndex] clk value is invalid!");
return -EINVAL;
@@ -1200,7 +1199,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max,
single_dpm_table->dpm_state.soft_min_level;
ret = smu_send_smc_msg_with_param(smu,
(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
- (PPCLK_GFXCLK << 16) | (freq & 0xffff));
+ (PPCLK_GFXCLK << 16) | (freq & 0xffff),
+ NULL);
if (ret) {
pr_err("Failed to set soft %s gfxclk !\n",
max ? "max" : "min");
@@ -1215,7 +1215,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max,
single_dpm_table->dpm_state.soft_min_level;
ret = smu_send_smc_msg_with_param(smu,
(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
- (PPCLK_UCLK << 16) | (freq & 0xffff));
+ (PPCLK_UCLK << 16) | (freq & 0xffff),
+ NULL);
if (ret) {
pr_err("Failed to set soft %s memclk !\n",
max ? "max" : "min");
@@ -1230,7 +1231,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max,
single_dpm_table->dpm_state.soft_min_level;
ret = smu_send_smc_msg_with_param(smu,
(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
- (PPCLK_SOCCLK << 16) | (freq & 0xffff));
+ (PPCLK_SOCCLK << 16) | (freq & 0xffff),
+ NULL);
if (ret) {
pr_err("Failed to set soft %s socclk !\n",
max ? "max" : "min");
@@ -1245,7 +1247,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max,
single_dpm_table->dpm_state.soft_min_level;
ret = smu_send_smc_msg_with_param(smu,
(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
- (PPCLK_FCLK << 16) | (freq & 0xffff));
+ (PPCLK_FCLK << 16) | (freq & 0xffff),
+ NULL);
if (ret) {
pr_err("Failed to set soft %s fclk !\n",
max ? "max" : "min");
@@ -1260,7 +1263,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max,
if (!max) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetHardMinByFreq,
- (PPCLK_DCEFCLK << 16) | (freq & 0xffff));
+ (PPCLK_DCEFCLK << 16) | (freq & 0xffff),
+ NULL);
if (ret) {
pr_err("Failed to set hard min dcefclk !\n");
return ret;
@@ -1421,7 +1425,9 @@ static int vega20_force_clk_levels(struct smu_context *smu,
}
ret = smu_send_smc_msg_with_param(smu,
- SMU_MSG_SetMinLinkDpmByIndex, soft_min_level);
+ SMU_MSG_SetMinLinkDpmByIndex,
+ soft_min_level,
+ NULL);
if (ret)
pr_err("Failed to set min link dpm level!\n");
@@ -1477,13 +1483,13 @@ static int vega20_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu,
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_GetAVFSVoltageByDpm,
- ((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq));
+ ((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq),
+ voltage);
if (ret) {
pr_err("[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!");
return ret;
}
- smu_read_smc_arg(smu, voltage);
*voltage = *voltage / VOLTAGE_SCALE;
return 0;
@@ -1956,8 +1962,10 @@ static int vega20_set_power_profile_mode(struct smu_context *smu, long *input, u
workload_type = smu_workload_get_type(smu, smu->power_profile_mode);
if (workload_type < 0)
return -EINVAL;
- smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
- 1 << workload_type);
+ smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetWorkloadMask,
+ 1 << workload_type,
+ NULL);
return ret;
}
@@ -2029,7 +2037,8 @@ vega20_set_uclk_to_highest_dpm_level(struct smu_context *smu,
dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetHardMinByFreq,
- (PPCLK_UCLK << 16) | dpm_table->dpm_state.hard_min_level);
+ (PPCLK_UCLK << 16) | dpm_table->dpm_state.hard_min_level,
+ NULL);
if (ret) {
pr_err("[%s] Set hard min uclk failed!", __func__);
return ret;
@@ -2047,7 +2056,7 @@ static int vega20_pre_display_config_changed(struct smu_context *smu)
if (!smu->smu_dpm.dpm_context)
return -EINVAL;
- smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0);
+ smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0, NULL);
ret = vega20_set_uclk_to_highest_dpm_level(smu,
&dpm_table->mem_table);
if (ret)
@@ -2074,7 +2083,8 @@ static int vega20_display_config_changed(struct smu_context *smu)
smu_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
smu_send_smc_msg_with_param(smu,
SMU_MSG_NumOfDisplays,
- smu->display_config->num_display);
+ smu->display_config->num_display,
+ NULL);
}
return ret;
@@ -2247,7 +2257,8 @@ vega20_notify_smc_display_config(struct smu_context *smu)
if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetMinDeepSleepDcefclk,
- min_clocks.dcef_clock_in_sr/100);
+ min_clocks.dcef_clock_in_sr/100,
+ NULL);
if (ret) {
pr_err("Attempt to set divider for DCEFCLK Failed!");
return ret;
@@ -2262,7 +2273,8 @@ vega20_notify_smc_display_config(struct smu_context *smu)
memtable->dpm_state.hard_min_level = min_clocks.memory_clock/100;
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetHardMinByFreq,
- (PPCLK_UCLK << 16) | memtable->dpm_state.hard_min_level);
+ (PPCLK_UCLK << 16) | memtable->dpm_state.hard_min_level,
+ NULL);
if (ret) {
pr_err("[%s] Set hard min uclk failed!", __func__);
return ret;
@@ -2853,8 +2865,10 @@ static int vega20_set_thermal_fan_table(struct smu_context *smu)
struct smu_table_context *table_context = &smu->smu_table;
PPTable_t *pptable = table_context->driver_pptable;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetFanTemperatureTarget,
- (uint32_t)pptable->FanTargetTemperature);
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetFanTemperatureTarget,
+ (uint32_t)pptable->FanTargetTemperature,
+ NULL);
return ret;
}
@@ -2864,15 +2878,13 @@ static int vega20_get_fan_speed_rpm(struct smu_context *smu,
{
int ret;
- ret = smu_send_smc_msg(smu, SMU_MSG_GetCurrentRpm);
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetCurrentRpm, speed);
if (ret) {
pr_err("Attempt to get current RPM from SMC Failed!\n");
return ret;
}
- smu_read_smc_arg(smu, speed);
-
return 0;
}
@@ -3137,7 +3149,7 @@ static int vega20_set_df_cstate(struct smu_context *smu,
return -EINVAL;
}
- return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state);
+ return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL);
}
static int vega20_update_pcie_parameters(struct smu_context *smu,
@@ -3155,7 +3167,8 @@ static int vega20_update_pcie_parameters(struct smu_context *smu,
pptable->PcieLaneCount[i] : pcie_width_cap);
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_OverridePcieParameters,
- smu_pcie_arg);
+ smu_pcie_arg,
+ NULL);
}
return ret;
@@ -3229,7 +3242,6 @@ static const struct pptable_funcs vega20_ppt_funcs = {
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
.system_features_control = smu_v11_0_system_features_control,
.send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
- .read_smc_arg = smu_v11_0_read_arg,
.init_display_count = smu_v11_0_init_display_count,
.set_allowed_mask = smu_v11_0_set_allowed_mask,
.get_enabled_mask = smu_v11_0_get_enabled_mask,
diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c
index 8ae1e1f97a73..be7c29cec318 100644
--- a/drivers/gpu/drm/arc/arcpgu_crtc.c
+++ b/drivers/gpu/drm/arc/arcpgu_crtc.c
@@ -9,7 +9,6 @@
#include <drm/drm_device.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_vblank.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <linux/clk.h>
@@ -138,24 +137,9 @@ static void arc_pgu_crtc_atomic_disable(struct drm_crtc *crtc,
~ARCPGU_CTRL_ENABLE_MASK);
}
-static void arc_pgu_crtc_atomic_begin(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
-{
- struct drm_pending_vblank_event *event = crtc->state->event;
-
- if (event) {
- crtc->state->event = NULL;
-
- spin_lock_irq(&crtc->dev->event_lock);
- drm_crtc_send_vblank_event(crtc, event);
- spin_unlock_irq(&crtc->dev->event_lock);
- }
-}
-
static const struct drm_crtc_helper_funcs arc_pgu_crtc_helper_funcs = {
.mode_valid = arc_pgu_crtc_mode_valid,
.mode_set_nofb = arc_pgu_crtc_mode_set_nofb,
- .atomic_begin = arc_pgu_crtc_atomic_begin,
.atomic_enable = arc_pgu_crtc_atomic_enable,
.atomic_disable = arc_pgu_crtc_atomic_disable,
};
diff --git a/drivers/gpu/drm/arc/arcpgu_hdmi.c b/drivers/gpu/drm/arc/arcpgu_hdmi.c
index 8fd7094beece..52839934f2fb 100644
--- a/drivers/gpu/drm/arc/arcpgu_hdmi.c
+++ b/drivers/gpu/drm/arc/arcpgu_hdmi.c
@@ -40,7 +40,7 @@ int arcpgu_drm_hdmi_init(struct drm_device *drm, struct device_node *np)
return ret;
/* Link drm_bridge to encoder */
- ret = drm_bridge_attach(encoder, bridge, NULL);
+ ret = drm_bridge_attach(encoder, bridge, NULL, 0);
if (ret)
drm_encoder_cleanup(encoder);
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index ac8a78bfda03..f2dc371bd8e5 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -129,18 +129,12 @@ int armada_fbdev_init(struct drm_device *dev)
drm_fb_helper_prepare(dev, fbh, &armada_fb_helper_funcs);
- ret = drm_fb_helper_init(dev, fbh, 1);
+ ret = drm_fb_helper_init(dev, fbh);
if (ret) {
DRM_ERROR("failed to initialize drm fb helper\n");
goto err_fb_helper;
}
- ret = drm_fb_helper_single_add_all_connectors(fbh);
- if (ret) {
- DRM_ERROR("failed to add fb connectors\n");
- goto err_fb_setup;
- }
-
ret = drm_fb_helper_initial_config(fbh, 32);
if (ret) {
DRM_ERROR("failed to set initial config\n");
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index f5d8780776ae..656d591b154b 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -121,6 +121,7 @@ struct ast_private {
unsigned int next_index;
} cursor;
+ struct drm_encoder encoder;
struct drm_plane primary_plane;
struct drm_plane cursor_plane;
@@ -238,13 +239,8 @@ struct ast_crtc {
u8 offset_x, offset_y;
};
-struct ast_encoder {
- struct drm_encoder base;
-};
-
#define to_ast_crtc(x) container_of(x, struct ast_crtc, base)
#define to_ast_connector(x) container_of(x, struct ast_connector, base)
-#define to_ast_encoder(x) container_of(x, struct ast_encoder, base)
struct ast_vbios_stdtable {
u8 misc;
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index b79f484e9bd2..18a0a4ce00f6 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -388,31 +388,9 @@ static int ast_get_dram_info(struct drm_device *dev)
return 0;
}
-enum drm_mode_status ast_mode_config_mode_valid(struct drm_device *dev,
- const struct drm_display_mode *mode)
-{
- static const unsigned long max_bpp = 4; /* DRM_FORMAT_XRGBA8888 */
-
- struct ast_private *ast = dev->dev_private;
- unsigned long fbsize, fbpages, max_fbpages;
-
- /* To support double buffering, a framebuffer may not
- * consume more than half of the available VRAM.
- */
- max_fbpages = (ast->vram_size / 2) >> PAGE_SHIFT;
-
- fbsize = mode->hdisplay * mode->vdisplay * max_bpp;
- fbpages = DIV_ROUND_UP(fbsize, PAGE_SIZE);
-
- if (fbpages > max_fbpages)
- return MODE_MEM;
-
- return MODE_OK;
-}
-
static const struct drm_mode_config_funcs ast_mode_funcs = {
.fb_create = drm_gem_fb_create,
- .mode_valid = ast_mode_config_mode_valid,
+ .mode_valid = drm_vram_helper_mode_valid,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 34608f0499eb..cdd6c46d6557 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -40,6 +40,7 @@
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "ast_drv.h"
#include "ast_tables.h"
@@ -833,8 +834,6 @@ static void ast_crtc_helper_atomic_flush(struct drm_crtc *crtc,
struct ast_vbios_mode_info *vbios_mode_info;
struct drm_display_mode *adjusted_mode;
- crtc->state->no_vblank = true;
-
ast_state = to_ast_crtc_state(crtc->state);
format = ast_state->format;
@@ -959,28 +958,18 @@ err_kfree:
* Encoder
*/
-static void ast_encoder_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
- kfree(encoder);
-}
-
-static const struct drm_encoder_funcs ast_enc_funcs = {
- .destroy = ast_encoder_destroy,
-};
-
static int ast_encoder_init(struct drm_device *dev)
{
- struct ast_encoder *ast_encoder;
+ struct ast_private *ast = dev->dev_private;
+ struct drm_encoder *encoder = &ast->encoder;
+ int ret;
- ast_encoder = kzalloc(sizeof(struct ast_encoder), GFP_KERNEL);
- if (!ast_encoder)
- return -ENOMEM;
+ ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_DAC);
+ if (ret)
+ return ret;
- drm_encoder_init(dev, &ast_encoder->base, &ast_enc_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
+ encoder->possible_crtcs = 1;
- ast_encoder->base.possible_crtcs = 1;
return 0;
}
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index 121b62682d80..e2019fe97fff 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -114,7 +114,7 @@ static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, int endpoint)
}
if (bridge) {
- ret = drm_bridge_attach(&output->encoder, bridge, NULL);
+ ret = drm_bridge_attach(&output->encoder, bridge, NULL, 0);
if (!ret)
return 0;
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index 10460878414e..addb0568c1af 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -23,7 +23,6 @@ static void bochs_unload(struct drm_device *dev)
bochs_kms_fini(bochs);
bochs_mm_fini(bochs);
- bochs_hw_fini(dev);
kfree(bochs);
dev->dev_private = NULL;
}
@@ -69,6 +68,7 @@ static struct drm_driver bochs_driver = {
.major = 1,
.minor = 0,
DRM_GEM_VRAM_DRIVER,
+ .release = bochs_unload,
};
/* ---------------------------------------------------------------------- */
@@ -148,9 +148,9 @@ static void bochs_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
+ drm_dev_unplug(dev);
drm_atomic_helper_shutdown(dev);
- drm_dev_unregister(dev);
- bochs_unload(dev);
+ bochs_hw_fini(dev);
drm_dev_put(dev);
}
diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c
index b615b7dfdd9d..952199cc0462 100644
--- a/drivers/gpu/drm/bochs/bochs_hw.c
+++ b/drivers/gpu/drm/bochs/bochs_hw.c
@@ -4,6 +4,7 @@
#include <linux/pci.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fourcc.h>
#include "bochs.h"
@@ -194,6 +195,8 @@ void bochs_hw_fini(struct drm_device *dev)
{
struct bochs_device *bochs = dev->dev_private;
+ /* TODO: shot down existing vram mappings */
+
if (bochs->mmio)
iounmap(bochs->mmio);
if (bochs->ioports)
@@ -207,6 +210,11 @@ void bochs_hw_fini(struct drm_device *dev)
void bochs_hw_setmode(struct bochs_device *bochs,
struct drm_display_mode *mode)
{
+ int idx;
+
+ if (!drm_dev_enter(bochs->dev, &idx))
+ return;
+
bochs->xres = mode->hdisplay;
bochs->yres = mode->vdisplay;
bochs->bpp = 32;
@@ -232,11 +240,18 @@ void bochs_hw_setmode(struct bochs_device *bochs,
bochs_dispi_write(bochs, VBE_DISPI_INDEX_ENABLE,
VBE_DISPI_ENABLED | VBE_DISPI_LFB_ENABLED);
+
+ drm_dev_exit(idx);
}
void bochs_hw_setformat(struct bochs_device *bochs,
const struct drm_format_info *format)
{
+ int idx;
+
+ if (!drm_dev_enter(bochs->dev, &idx))
+ return;
+
DRM_DEBUG_DRIVER("format %c%c%c%c\n",
(format->format >> 0) & 0xff,
(format->format >> 8) & 0xff,
@@ -256,13 +271,18 @@ void bochs_hw_setformat(struct bochs_device *bochs,
__func__, format->format);
break;
}
+
+ drm_dev_exit(idx);
}
void bochs_hw_setbase(struct bochs_device *bochs,
int x, int y, int stride, u64 addr)
{
unsigned long offset;
- unsigned int vx, vy, vwidth;
+ unsigned int vx, vy, vwidth, idx;
+
+ if (!drm_dev_enter(bochs->dev, &idx))
+ return;
bochs->stride = stride;
offset = (unsigned long)addr +
@@ -277,4 +297,6 @@ void bochs_hw_setbase(struct bochs_device *bochs,
bochs_dispi_write(bochs, VBE_DISPI_INDEX_VIRT_WIDTH, vwidth);
bochs_dispi_write(bochs, VBE_DISPI_INDEX_X_OFFSET, vx);
bochs_dispi_write(bochs, VBE_DISPI_INDEX_Y_OFFSET, vy);
+
+ drm_dev_exit(idx);
}
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 3f0006c2470d..8066d7d370d5 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -7,7 +7,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_vblank.h>
#include "bochs.h"
@@ -57,16 +56,8 @@ static void bochs_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct bochs_device *bochs = pipe->crtc.dev->dev_private;
- struct drm_crtc *crtc = &pipe->crtc;
bochs_plane_update(bochs, pipe->plane.state);
-
- if (crtc->state->event) {
- spin_lock_irq(&crtc->dev->event_lock);
- drm_crtc_send_vblank_event(crtc, crtc->state->event);
- crtc->state->event = NULL;
- spin_unlock_irq(&crtc->dev->event_lock);
- }
}
static const struct drm_simple_display_pipe_funcs bochs_pipe_funcs = {
@@ -92,32 +83,11 @@ static int bochs_connector_get_modes(struct drm_connector *connector)
return count;
}
-static enum drm_mode_status bochs_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- struct bochs_device *bochs =
- container_of(connector, struct bochs_device, connector);
- unsigned long size = mode->hdisplay * mode->vdisplay * 4;
-
- /*
- * Make sure we can fit two framebuffers into video memory.
- * This allows up to 1600x1200 with 16 MB (default size).
- * If you want more try this:
- * 'qemu -vga std -global VGA.vgamem_mb=32 $otherargs'
- */
- if (size * 2 > bochs->fb_size)
- return MODE_BAD;
-
- return MODE_OK;
-}
-
static const struct drm_connector_helper_funcs bochs_connector_connector_helper_funcs = {
.get_modes = bochs_connector_get_modes,
- .mode_valid = bochs_connector_mode_valid,
};
static const struct drm_connector_funcs bochs_connector_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
@@ -157,6 +127,7 @@ bochs_gem_fb_create(struct drm_device *dev, struct drm_file *file,
const struct drm_mode_config_funcs bochs_mode_funcs = {
.fb_create = bochs_gem_fb_create,
+ .mode_valid = drm_vram_helper_mode_valid,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
@@ -192,6 +163,9 @@ int bochs_kms_init(struct bochs_device *bochs)
void bochs_kms_fini(struct bochs_device *bochs)
{
+ if (!bochs->dev->mode_config.num_connector)
+ return;
+
drm_atomic_helper_shutdown(bochs->dev);
drm_mode_config_cleanup(bochs->dev);
}
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 0b9ca5862455..aaed2347ace9 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -27,13 +27,16 @@ config DRM_CDNS_DSI
Support Cadence DPI to DSI bridge. This is an internal
bridge and is meant to be directly embedded in a SoC.
-config DRM_DUMB_VGA_DAC
- tristate "Dumb VGA DAC Bridge support"
+config DRM_DISPLAY_CONNECTOR
+ tristate "Display connector support"
depends on OF
- select DRM_KMS_HELPER
help
- Support for non-programmable RGB to VGA DAC bridges, such as ADI
- ADV7123, TI THS8134 and THS8135 or passive resistor ladder DACs.
+ Driver for display connectors with support for DDC and hot-plug
+ detection. Most display controller handle display connectors
+ internally and don't need this driver, but the DRM subsystem is
+ moving towards separating connector handling from display controllers
+ on ARM-based platforms. Saying Y here when this driver is not needed
+ will not cause any issue.
config DRM_LVDS_CODEC
tristate "Transparent LVDS encoders and decoders support"
@@ -72,6 +75,17 @@ config DRM_PARADE_PS8622
---help---
Parade eDP-LVDS bridge chip driver.
+config DRM_PARADE_PS8640
+ tristate "Parade PS8640 MIPI DSI to eDP Converter"
+ depends on OF
+ select DRM_KMS_HELPER
+ select DRM_MIPI_DSI
+ select DRM_PANEL
+ help
+ Choose this option if you have PS8640 for display
+ The PS8640 is a high-performance and low-power
+ MIPI DSI to eDP converter
+
config DRM_SIL_SII8620
tristate "Silicon Image SII8620 HDMI/MHL bridge"
depends on OF
@@ -87,6 +101,7 @@ config DRM_SII902X
select DRM_KMS_HELPER
select REGMAP_I2C
select I2C_MUX
+ select SND_SOC_HDMI_CODEC if SND_SOC
---help---
Silicon Image sii902x bridge chip driver.
@@ -98,6 +113,14 @@ config DRM_SII9234
It is an I2C driver, that detects connection of MHL bridge
and starts encapsulation of HDMI signal.
+config DRM_SIMPLE_BRIDGE
+ tristate "Simple DRM bridge support"
+ depends on OF
+ select DRM_KMS_HELPER
+ help
+ Support for non-programmable DRM bridges, such as ADI ADV7123, TI
+ THS8134 and THS8135 or passive resistor ladder DACs.
+
config DRM_THINE_THC63LVD1024
tristate "Thine THC63LVD1024 LVDS decoder bridge"
depends on OF
@@ -122,6 +145,16 @@ config DRM_TOSHIBA_TC358767
---help---
Toshiba TC358767 eDP bridge chip driver.
+config DRM_TOSHIBA_TC358768
+ tristate "Toshiba TC358768 MIPI DSI bridge"
+ depends on OF
+ select DRM_KMS_HELPER
+ select REGMAP_I2C
+ select DRM_PANEL
+ select DRM_MIPI_DSI
+ help
+ Toshiba TC358768AXBG/TC358778XBG DSI bridge chip driver.
+
config DRM_TI_TFP410
tristate "TI TFP410 DVI/HDMI bridge"
depends on OF
@@ -139,6 +172,14 @@ config DRM_TI_SN65DSI86
help
Texas Instruments SN65DSI86 DSI to eDP Bridge driver
+config DRM_TI_TPD12S015
+ tristate "TI TPD12S015 HDMI level shifter and ESD protection"
+ depends on OF
+ select DRM_KMS_HELPER
+ help
+ Texas Instruments TPD12S015 HDMI level shifter and ESD protection
+ driver.
+
source "drivers/gpu/drm/bridge/analogix/Kconfig"
source "drivers/gpu/drm/bridge/adv7511/Kconfig"
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index cd16ce830270..6fb062b5b0f0 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -1,19 +1,23 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DRM_CDNS_DSI) += cdns-dsi.o
-obj-$(CONFIG_DRM_DUMB_VGA_DAC) += dumb-vga-dac.o
+obj-$(CONFIG_DRM_DISPLAY_CONNECTOR) += display-connector.o
obj-$(CONFIG_DRM_LVDS_CODEC) += lvds-codec.o
obj-$(CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW) += megachips-stdpxxxx-ge-b850v3-fw.o
obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
+obj-$(CONFIG_DRM_PARADE_PS8640) += parade-ps8640.o
obj-$(CONFIG_DRM_SIL_SII8620) += sil-sii8620.o
obj-$(CONFIG_DRM_SII902X) += sii902x.o
obj-$(CONFIG_DRM_SII9234) += sii9234.o
+obj-$(CONFIG_DRM_SIMPLE_BRIDGE) += simple-bridge.o
obj-$(CONFIG_DRM_THINE_THC63LVD1024) += thc63lvd1024.o
obj-$(CONFIG_DRM_TOSHIBA_TC358764) += tc358764.o
obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o
+obj-$(CONFIG_DRM_TOSHIBA_TC358768) += tc358768.o
obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/
obj-$(CONFIG_DRM_TI_SN65DSI86) += ti-sn65dsi86.o
obj-$(CONFIG_DRM_TI_TFP410) += ti-tfp410.o
+obj-$(CONFIG_DRM_TI_TPD12S015) += ti-tpd12s015.o
obj-y += analogix/
obj-y += synopsys/
diff --git a/drivers/gpu/drm/bridge/adv7511/Kconfig b/drivers/gpu/drm/bridge/adv7511/Kconfig
index 8a56ff81f4fb..47d4eb9e845d 100644
--- a/drivers/gpu/drm/bridge/adv7511/Kconfig
+++ b/drivers/gpu/drm/bridge/adv7511/Kconfig
@@ -4,8 +4,9 @@ config DRM_I2C_ADV7511
depends on OF
select DRM_KMS_HELPER
select REGMAP_I2C
+ select DRM_MIPI_DSI
help
- Support for the Analog Device ADV7511(W) and ADV7513 HDMI encoders.
+ Support for the Analog Device ADV7511(W)/13/33/35 HDMI encoders.
config DRM_I2C_ADV7511_AUDIO
bool "ADV7511 HDMI Audio driver"
@@ -15,16 +16,8 @@ config DRM_I2C_ADV7511_AUDIO
Support the ADV7511 HDMI Audio interface. This is used in
conjunction with the AV7511 HDMI driver.
-config DRM_I2C_ADV7533
- bool "ADV7533 encoder"
- depends on DRM_I2C_ADV7511
- select DRM_MIPI_DSI
- default y
- help
- Support for the Analog Devices ADV7533 DSI to HDMI encoder.
-
config DRM_I2C_ADV7511_CEC
- bool "ADV7511/33 HDMI CEC driver"
+ bool "ADV7511/33/35 HDMI CEC driver"
depends on DRM_I2C_ADV7511
select CEC_CORE
default y
diff --git a/drivers/gpu/drm/bridge/adv7511/Makefile b/drivers/gpu/drm/bridge/adv7511/Makefile
index b46ebeb35fd4..d8ceb534b51f 100644
--- a/drivers/gpu/drm/bridge/adv7511/Makefile
+++ b/drivers/gpu/drm/bridge/adv7511/Makefile
@@ -1,6 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-adv7511-y := adv7511_drv.o
+adv7511-y := adv7511_drv.o adv7533.o
adv7511-$(CONFIG_DRM_I2C_ADV7511_AUDIO) += adv7511_audio.o
adv7511-$(CONFIG_DRM_I2C_ADV7511_CEC) += adv7511_cec.o
-adv7511-$(CONFIG_DRM_I2C_ADV7533) += adv7533.o
obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511.o
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
index 52b2adfdc877..a9bb734366ae 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511.h
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
@@ -320,6 +320,7 @@ struct adv7511_video_config {
enum adv7511_type {
ADV7511,
ADV7533,
+ ADV7535,
};
#define ADV7511_MAX_ADDRS 3
@@ -393,7 +394,6 @@ static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
}
#endif
-#ifdef CONFIG_DRM_I2C_ADV7533
void adv7533_dsi_power_on(struct adv7511 *adv);
void adv7533_dsi_power_off(struct adv7511 *adv);
void adv7533_mode_set(struct adv7511 *adv, const struct drm_display_mode *mode);
@@ -402,44 +402,6 @@ int adv7533_patch_cec_registers(struct adv7511 *adv);
int adv7533_attach_dsi(struct adv7511 *adv);
void adv7533_detach_dsi(struct adv7511 *adv);
int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv);
-#else
-static inline void adv7533_dsi_power_on(struct adv7511 *adv)
-{
-}
-
-static inline void adv7533_dsi_power_off(struct adv7511 *adv)
-{
-}
-
-static inline void adv7533_mode_set(struct adv7511 *adv,
- const struct drm_display_mode *mode)
-{
-}
-
-static inline int adv7533_patch_registers(struct adv7511 *adv)
-{
- return -ENODEV;
-}
-
-static inline int adv7533_patch_cec_registers(struct adv7511 *adv)
-{
- return -ENODEV;
-}
-
-static inline int adv7533_attach_dsi(struct adv7511 *adv)
-{
- return -ENODEV;
-}
-
-static inline void adv7533_detach_dsi(struct adv7511 *adv)
-{
-}
-
-static inline int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
-{
- return -ENODEV;
-}
-#endif
#ifdef CONFIG_DRM_I2C_ADV7511_AUDIO
int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511);
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 9e13e466e72c..87b58c1acff4 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -367,7 +367,7 @@ static void adv7511_power_on(struct adv7511 *adv7511)
*/
regcache_sync(adv7511->regmap);
- if (adv7511->type == ADV7533)
+ if (adv7511->type == ADV7533 || adv7511->type == ADV7535)
adv7533_dsi_power_on(adv7511);
adv7511->powered = true;
}
@@ -387,7 +387,7 @@ static void __adv7511_power_off(struct adv7511 *adv7511)
static void adv7511_power_off(struct adv7511 *adv7511)
{
__adv7511_power_off(adv7511);
- if (adv7511->type == ADV7533)
+ if (adv7511->type == ADV7533 || adv7511->type == ADV7535)
adv7533_dsi_power_off(adv7511);
adv7511->powered = false;
}
@@ -761,7 +761,7 @@ static void adv7511_mode_set(struct adv7511 *adv7511,
regmap_update_bits(adv7511->regmap, 0x17,
0x60, (vsync_polarity << 6) | (hsync_polarity << 5));
- if (adv7511->type == ADV7533)
+ if (adv7511->type == ADV7533 || adv7511->type == ADV7535)
adv7533_mode_set(adv7511, adj_mode);
drm_mode_copy(&adv7511->curr_mode, adj_mode);
@@ -847,11 +847,17 @@ static void adv7511_bridge_mode_set(struct drm_bridge *bridge,
adv7511_mode_set(adv, mode, adj_mode);
}
-static int adv7511_bridge_attach(struct drm_bridge *bridge)
+static int adv7511_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct adv7511 *adv = bridge_to_adv7511(bridge);
int ret;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
if (!bridge->encoder) {
DRM_ERROR("Parent encoder object not found");
return -ENODEV;
@@ -874,7 +880,7 @@ static int adv7511_bridge_attach(struct drm_bridge *bridge)
&adv7511_connector_helper_funcs);
drm_connector_attach_encoder(&adv->connector, bridge->encoder);
- if (adv->type == ADV7533)
+ if (adv->type == ADV7533 || adv->type == ADV7535)
ret = adv7533_attach_dsi(adv);
if (adv->i2c_main->irq)
@@ -952,7 +958,7 @@ static bool adv7511_cec_register_volatile(struct device *dev, unsigned int reg)
struct i2c_client *i2c = to_i2c_client(dev);
struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
- if (adv7511->type == ADV7533)
+ if (adv7511->type == ADV7533 || adv7511->type == ADV7535)
reg -= ADV7533_REG_CEC_OFFSET;
switch (reg) {
@@ -994,7 +1000,7 @@ static int adv7511_init_cec_regmap(struct adv7511 *adv)
goto err;
}
- if (adv->type == ADV7533) {
+ if (adv->type == ADV7533 || adv->type == ADV7535) {
ret = adv7533_patch_cec_registers(adv);
if (ret)
goto err;
@@ -1242,7 +1248,7 @@ static int adv7511_remove(struct i2c_client *i2c)
{
struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
- if (adv7511->type == ADV7533)
+ if (adv7511->type == ADV7533 || adv7511->type == ADV7535)
adv7533_detach_dsi(adv7511);
i2c_unregister_device(adv7511->i2c_cec);
if (adv7511->cec_clk)
@@ -1266,9 +1272,8 @@ static const struct i2c_device_id adv7511_i2c_ids[] = {
{ "adv7511", ADV7511 },
{ "adv7511w", ADV7511 },
{ "adv7513", ADV7511 },
-#ifdef CONFIG_DRM_I2C_ADV7533
{ "adv7533", ADV7533 },
-#endif
+ { "adv7535", ADV7535 },
{ }
};
MODULE_DEVICE_TABLE(i2c, adv7511_i2c_ids);
@@ -1277,9 +1282,8 @@ static const struct of_device_id adv7511_of_ids[] = {
{ .compatible = "adi,adv7511", .data = (void *)ADV7511 },
{ .compatible = "adi,adv7511w", .data = (void *)ADV7511 },
{ .compatible = "adi,adv7513", .data = (void *)ADV7511 },
-#ifdef CONFIG_DRM_I2C_ADV7533
{ .compatible = "adi,adv7533", .data = (void *)ADV7533 },
-#endif
+ { .compatible = "adi,adv7535", .data = (void *)ADV7535 },
{ }
};
MODULE_DEVICE_TABLE(of, adv7511_of_ids);
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
index 56f55c53abfd..2bc6e4f85171 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
@@ -210,8 +210,7 @@ static int anx6345_dp_link_training(struct anx6345 *anx6345)
if (err)
return err;
- dpcd[0] = drm_dp_max_link_rate(anx6345->dpcd);
- dpcd[0] = drm_dp_link_rate_to_bw_code(dpcd[0]);
+ dpcd[0] = dp_bw;
err = regmap_write(anx6345->map[I2C_IDX_DPTX],
SP_DP_MAIN_LINK_BW_SET_REG, dpcd[0]);
if (err)
@@ -520,11 +519,17 @@ static const struct drm_connector_funcs anx6345_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int anx6345_bridge_attach(struct drm_bridge *bridge)
+static int anx6345_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct anx6345 *anx6345 = bridge_to_anx6345(bridge);
int err;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
if (!bridge->encoder) {
DRM_ERROR("Parent encoder object not found");
return -ENODEV;
@@ -712,16 +717,20 @@ static int anx6345_i2c_probe(struct i2c_client *client,
DRM_DEBUG("No panel found\n");
/* 1.2V digital core power regulator */
- anx6345->dvdd12 = devm_regulator_get(dev, "dvdd12-supply");
+ anx6345->dvdd12 = devm_regulator_get(dev, "dvdd12");
if (IS_ERR(anx6345->dvdd12)) {
- DRM_ERROR("dvdd12-supply not found\n");
+ if (PTR_ERR(anx6345->dvdd12) != -EPROBE_DEFER)
+ DRM_ERROR("Failed to get dvdd12 supply (%ld)\n",
+ PTR_ERR(anx6345->dvdd12));
return PTR_ERR(anx6345->dvdd12);
}
/* 2.5V digital core power regulator */
- anx6345->dvdd25 = devm_regulator_get(dev, "dvdd25-supply");
+ anx6345->dvdd25 = devm_regulator_get(dev, "dvdd25");
if (IS_ERR(anx6345->dvdd25)) {
- DRM_ERROR("dvdd25-supply not found\n");
+ if (PTR_ERR(anx6345->dvdd25) != -EPROBE_DEFER)
+ DRM_ERROR("Failed to get dvdd25 supply (%ld)\n",
+ PTR_ERR(anx6345->dvdd25));
return PTR_ERR(anx6345->dvdd25);
}
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
index 41867be03751..0d5a5ad0c9ee 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
@@ -722,10 +722,9 @@ static int anx78xx_dp_link_training(struct anx78xx *anx78xx)
if (err)
return err;
- dpcd[0] = drm_dp_max_link_rate(anx78xx->dpcd);
- dpcd[0] = drm_dp_link_rate_to_bw_code(dpcd[0]);
err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
- SP_DP_MAIN_LINK_BW_SET_REG, dpcd[0]);
+ SP_DP_MAIN_LINK_BW_SET_REG,
+ anx78xx->dpcd[DP_MAX_LINK_RATE]);
if (err)
return err;
@@ -887,11 +886,17 @@ static const struct drm_connector_funcs anx78xx_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int anx78xx_bridge_attach(struct drm_bridge *bridge)
+static int anx78xx_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct anx78xx *anx78xx = bridge_to_anx78xx(bridge);
int err;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
if (!bridge->encoder) {
DRM_ERROR("Parent encoder object not found");
return -ENODEV;
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 6effe532f820..9ded2cef57dd 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1216,13 +1216,19 @@ static const struct drm_connector_funcs analogix_dp_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int analogix_dp_bridge_attach(struct drm_bridge *bridge)
+static int analogix_dp_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct analogix_dp_device *dp = bridge->driver_private;
struct drm_encoder *encoder = dp->encoder;
struct drm_connector *connector = NULL;
int ret = 0;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
if (!bridge->encoder) {
DRM_ERROR("Parent encoder object not found");
return -ENODEV;
@@ -1289,19 +1295,21 @@ struct drm_crtc *analogix_dp_get_new_crtc(struct analogix_dp_device *dp,
return conn_state->crtc;
}
-static void analogix_dp_bridge_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+static void
+analogix_dp_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
+ struct drm_atomic_state *old_state = old_bridge_state->base.state;
struct analogix_dp_device *dp = bridge->driver_private;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
int ret;
- crtc = analogix_dp_get_new_crtc(dp, state);
+ crtc = analogix_dp_get_new_crtc(dp, old_state);
if (!crtc)
return;
- old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
+ old_crtc_state = drm_atomic_get_old_crtc_state(old_state, crtc);
/* Don't touch the panel if we're coming back from PSR */
if (old_crtc_state && old_crtc_state->self_refresh_active)
return;
@@ -1366,20 +1374,22 @@ out_dp_clk_pre:
return ret;
}
-static void analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+static void
+analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
+ struct drm_atomic_state *old_state = old_bridge_state->base.state;
struct analogix_dp_device *dp = bridge->driver_private;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
int timeout_loop = 0;
int ret;
- crtc = analogix_dp_get_new_crtc(dp, state);
+ crtc = analogix_dp_get_new_crtc(dp, old_state);
if (!crtc)
return;
- old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
+ old_crtc_state = drm_atomic_get_old_crtc_state(old_state, crtc);
/* Not a full enable, just disable PSR and continue */
if (old_crtc_state && old_crtc_state->self_refresh_active) {
ret = analogix_dp_disable_psr(dp);
@@ -1440,18 +1450,20 @@ static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
dp->dpms_mode = DRM_MODE_DPMS_OFF;
}
-static void analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+static void
+analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
+ struct drm_atomic_state *old_state = old_bridge_state->base.state;
struct analogix_dp_device *dp = bridge->driver_private;
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state = NULL;
- crtc = analogix_dp_get_new_crtc(dp, state);
+ crtc = analogix_dp_get_new_crtc(dp, old_state);
if (!crtc)
goto out;
- new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ new_crtc_state = drm_atomic_get_new_crtc_state(old_state, crtc);
if (!new_crtc_state)
goto out;
@@ -1463,20 +1475,21 @@ out:
analogix_dp_bridge_disable(bridge);
}
-static
-void analogix_dp_bridge_atomic_post_disable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+static void
+analogix_dp_bridge_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
+ struct drm_atomic_state *old_state = old_bridge_state->base.state;
struct analogix_dp_device *dp = bridge->driver_private;
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state;
int ret;
- crtc = analogix_dp_get_new_crtc(dp, state);
+ crtc = analogix_dp_get_new_crtc(dp, old_state);
if (!crtc)
return;
- new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ new_crtc_state = drm_atomic_get_new_crtc_state(old_state, crtc);
if (!new_crtc_state || !new_crtc_state->self_refresh_active)
return;
@@ -1563,6 +1576,9 @@ static void analogix_dp_bridge_mode_set(struct drm_bridge *bridge,
}
static const struct drm_bridge_funcs analogix_dp_bridge_funcs = {
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_pre_enable = analogix_dp_bridge_atomic_pre_enable,
.atomic_enable = analogix_dp_bridge_atomic_enable,
.atomic_disable = analogix_dp_bridge_atomic_disable,
@@ -1588,7 +1604,7 @@ static int analogix_dp_create_bridge(struct drm_device *drm_dev,
bridge->driver_private = dp;
bridge->funcs = &analogix_dp_bridge_funcs;
- ret = drm_bridge_attach(dp->encoder, bridge, NULL);
+ ret = drm_bridge_attach(dp->encoder, bridge, NULL, 0);
if (ret) {
DRM_ERROR("failed to attach drm bridge\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/bridge/cdns-dsi.c b/drivers/gpu/drm/bridge/cdns-dsi.c
index b7c97f060241..69c3892caee5 100644
--- a/drivers/gpu/drm/bridge/cdns-dsi.c
+++ b/drivers/gpu/drm/bridge/cdns-dsi.c
@@ -644,7 +644,8 @@ static int cdns_dsi_check_conf(struct cdns_dsi *dsi,
return 0;
}
-static int cdns_dsi_bridge_attach(struct drm_bridge *bridge)
+static int cdns_dsi_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
struct cdns_dsi *dsi = input_to_dsi(input);
@@ -656,7 +657,8 @@ static int cdns_dsi_bridge_attach(struct drm_bridge *bridge)
return -ENOTSUPP;
}
- return drm_bridge_attach(bridge->encoder, output->bridge, bridge);
+ return drm_bridge_attach(bridge->encoder, output->bridge, bridge,
+ flags);
}
static enum drm_mode_status
diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c
new file mode 100644
index 000000000000..4d278573cdb9
--- /dev/null
+++ b/drivers/gpu/drm/bridge/display-connector.c
@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Laurent Pinchart <[email protected]>
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_edid.h>
+
+struct display_connector {
+ struct drm_bridge bridge;
+
+ struct gpio_desc *hpd_gpio;
+ int hpd_irq;
+};
+
+static inline struct display_connector *
+to_display_connector(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct display_connector, bridge);
+}
+
+static int display_connector_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL;
+}
+
+static enum drm_connector_status
+display_connector_detect(struct drm_bridge *bridge)
+{
+ struct display_connector *conn = to_display_connector(bridge);
+
+ if (conn->hpd_gpio) {
+ if (gpiod_get_value_cansleep(conn->hpd_gpio))
+ return connector_status_connected;
+ else
+ return connector_status_disconnected;
+ }
+
+ if (conn->bridge.ddc && drm_probe_ddc(conn->bridge.ddc))
+ return connector_status_connected;
+
+ switch (conn->bridge.type) {
+ case DRM_MODE_CONNECTOR_DVIA:
+ case DRM_MODE_CONNECTOR_DVID:
+ case DRM_MODE_CONNECTOR_DVII:
+ case DRM_MODE_CONNECTOR_HDMIA:
+ case DRM_MODE_CONNECTOR_HDMIB:
+ /*
+ * For DVI and HDMI connectors a DDC probe failure indicates
+ * that no cable is connected.
+ */
+ return connector_status_disconnected;
+
+ case DRM_MODE_CONNECTOR_Composite:
+ case DRM_MODE_CONNECTOR_SVIDEO:
+ case DRM_MODE_CONNECTOR_VGA:
+ default:
+ /*
+ * Composite and S-Video connectors have no other detection
+ * mean than the HPD GPIO. For VGA connectors, even if we have
+ * an I2C bus, we can't assume that the cable is disconnected
+ * if drm_probe_ddc fails, as some cables don't wire the DDC
+ * pins.
+ */
+ return connector_status_unknown;
+ }
+}
+
+static struct edid *display_connector_get_edid(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct display_connector *conn = to_display_connector(bridge);
+
+ return drm_get_edid(connector, conn->bridge.ddc);
+}
+
+static const struct drm_bridge_funcs display_connector_bridge_funcs = {
+ .attach = display_connector_attach,
+ .detect = display_connector_detect,
+ .get_edid = display_connector_get_edid,
+};
+
+static irqreturn_t display_connector_hpd_irq(int irq, void *arg)
+{
+ struct display_connector *conn = arg;
+ struct drm_bridge *bridge = &conn->bridge;
+
+ drm_bridge_hpd_notify(bridge, display_connector_detect(bridge));
+
+ return IRQ_HANDLED;
+}
+
+static int display_connector_probe(struct platform_device *pdev)
+{
+ struct display_connector *conn;
+ unsigned int type;
+ const char *label;
+ int ret;
+
+ conn = devm_kzalloc(&pdev->dev, sizeof(*conn), GFP_KERNEL);
+ if (!conn)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, conn);
+
+ type = (uintptr_t)of_device_get_match_data(&pdev->dev);
+
+ /* Get the exact connector type. */
+ switch (type) {
+ case DRM_MODE_CONNECTOR_DVII: {
+ bool analog, digital;
+
+ analog = of_property_read_bool(pdev->dev.of_node, "analog");
+ digital = of_property_read_bool(pdev->dev.of_node, "digital");
+ if (analog && !digital) {
+ conn->bridge.type = DRM_MODE_CONNECTOR_DVIA;
+ } else if (!analog && digital) {
+ conn->bridge.type = DRM_MODE_CONNECTOR_DVID;
+ } else if (analog && digital) {
+ conn->bridge.type = DRM_MODE_CONNECTOR_DVII;
+ } else {
+ dev_err(&pdev->dev, "DVI connector with no type\n");
+ return -EINVAL;
+ }
+ break;
+ }
+
+ case DRM_MODE_CONNECTOR_HDMIA: {
+ const char *hdmi_type;
+
+ ret = of_property_read_string(pdev->dev.of_node, "type",
+ &hdmi_type);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "HDMI connector with no type\n");
+ return -EINVAL;
+ }
+
+ if (!strcmp(hdmi_type, "a") || !strcmp(hdmi_type, "c") ||
+ !strcmp(hdmi_type, "d") || !strcmp(hdmi_type, "e")) {
+ conn->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
+ } else if (!strcmp(hdmi_type, "b")) {
+ conn->bridge.type = DRM_MODE_CONNECTOR_HDMIB;
+ } else {
+ dev_err(&pdev->dev,
+ "Unsupported HDMI connector type '%s'\n",
+ hdmi_type);
+ return -EINVAL;
+ }
+
+ break;
+ }
+
+ default:
+ conn->bridge.type = type;
+ break;
+ }
+
+ /* All the supported connector types support interlaced modes. */
+ conn->bridge.interlace_allowed = true;
+
+ /* Get the optional connector label. */
+ of_property_read_string(pdev->dev.of_node, "label", &label);
+
+ /*
+ * Get the HPD GPIO for DVI and HDMI connectors. If the GPIO can provide
+ * edge interrupts, register an interrupt handler.
+ */
+ if (type == DRM_MODE_CONNECTOR_DVII ||
+ type == DRM_MODE_CONNECTOR_HDMIA) {
+ conn->hpd_gpio = devm_gpiod_get_optional(&pdev->dev, "hpd",
+ GPIOD_IN);
+ if (IS_ERR(conn->hpd_gpio)) {
+ if (PTR_ERR(conn->hpd_gpio) != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "Unable to retrieve HPD GPIO\n");
+ return PTR_ERR(conn->hpd_gpio);
+ }
+
+ conn->hpd_irq = gpiod_to_irq(conn->hpd_gpio);
+ } else {
+ conn->hpd_irq = -EINVAL;
+ }
+
+ if (conn->hpd_irq >= 0) {
+ ret = devm_request_threaded_irq(&pdev->dev, conn->hpd_irq,
+ NULL, display_connector_hpd_irq,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ "HPD", conn);
+ if (ret) {
+ dev_info(&pdev->dev,
+ "Failed to request HPD edge interrupt, falling back to polling\n");
+ conn->hpd_irq = -EINVAL;
+ }
+ }
+
+ /* Retrieve the DDC I2C adapter for DVI, HDMI and VGA connectors. */
+ if (type == DRM_MODE_CONNECTOR_DVII ||
+ type == DRM_MODE_CONNECTOR_HDMIA ||
+ type == DRM_MODE_CONNECTOR_VGA) {
+ struct device_node *phandle;
+
+ phandle = of_parse_phandle(pdev->dev.of_node, "ddc-i2c-bus", 0);
+ if (phandle) {
+ conn->bridge.ddc = of_get_i2c_adapter_by_node(phandle);
+ of_node_put(phandle);
+ if (!conn->bridge.ddc)
+ return -EPROBE_DEFER;
+ } else {
+ dev_dbg(&pdev->dev,
+ "No I2C bus specified, disabling EDID readout\n");
+ }
+ }
+
+ conn->bridge.funcs = &display_connector_bridge_funcs;
+ conn->bridge.of_node = pdev->dev.of_node;
+
+ if (conn->bridge.ddc)
+ conn->bridge.ops |= DRM_BRIDGE_OP_EDID
+ | DRM_BRIDGE_OP_DETECT;
+ if (conn->hpd_gpio)
+ conn->bridge.ops |= DRM_BRIDGE_OP_DETECT;
+ if (conn->hpd_irq >= 0)
+ conn->bridge.ops |= DRM_BRIDGE_OP_HPD;
+
+ dev_dbg(&pdev->dev,
+ "Found %s display connector '%s' %s DDC bus and %s HPD GPIO (ops 0x%x)\n",
+ drm_get_connector_type_name(conn->bridge.type),
+ label ? label : "<unlabelled>",
+ conn->bridge.ddc ? "with" : "without",
+ conn->hpd_gpio ? "with" : "without",
+ conn->bridge.ops);
+
+ drm_bridge_add(&conn->bridge);
+
+ return 0;
+}
+
+static int display_connector_remove(struct platform_device *pdev)
+{
+ struct display_connector *conn = platform_get_drvdata(pdev);
+
+ drm_bridge_remove(&conn->bridge);
+
+ if (!IS_ERR(conn->bridge.ddc))
+ i2c_put_adapter(conn->bridge.ddc);
+
+ return 0;
+}
+
+static const struct of_device_id display_connector_match[] = {
+ {
+ .compatible = "composite-video-connector",
+ .data = (void *)DRM_MODE_CONNECTOR_Composite,
+ }, {
+ .compatible = "dvi-connector",
+ .data = (void *)DRM_MODE_CONNECTOR_DVII,
+ }, {
+ .compatible = "hdmi-connector",
+ .data = (void *)DRM_MODE_CONNECTOR_HDMIA,
+ }, {
+ .compatible = "svideo-connector",
+ .data = (void *)DRM_MODE_CONNECTOR_SVIDEO,
+ }, {
+ .compatible = "vga-connector",
+ .data = (void *)DRM_MODE_CONNECTOR_VGA,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, display_connector_match);
+
+static struct platform_driver display_connector_driver = {
+ .probe = display_connector_probe,
+ .remove = display_connector_remove,
+ .driver = {
+ .name = "display-connector",
+ .of_match_table = display_connector_match,
+ },
+};
+module_platform_driver(display_connector_driver);
+
+MODULE_AUTHOR("Laurent Pinchart <[email protected]>");
+MODULE_DESCRIPTION("Display connector driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c
deleted file mode 100644
index cc33dc411b9e..000000000000
--- a/drivers/gpu/drm/bridge/dumb-vga-dac.c
+++ /dev/null
@@ -1,300 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2015-2016 Free Electrons
- * Copyright (C) 2015-2016 NextThing Co
- *
- * Maxime Ripard <[email protected]>
- */
-
-#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_graph.h>
-#include <linux/regulator/consumer.h>
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_bridge.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_print.h>
-#include <drm/drm_probe_helper.h>
-
-struct dumb_vga {
- struct drm_bridge bridge;
- struct drm_connector connector;
-
- struct i2c_adapter *ddc;
- struct regulator *vdd;
-};
-
-static inline struct dumb_vga *
-drm_bridge_to_dumb_vga(struct drm_bridge *bridge)
-{
- return container_of(bridge, struct dumb_vga, bridge);
-}
-
-static inline struct dumb_vga *
-drm_connector_to_dumb_vga(struct drm_connector *connector)
-{
- return container_of(connector, struct dumb_vga, connector);
-}
-
-static int dumb_vga_get_modes(struct drm_connector *connector)
-{
- struct dumb_vga *vga = drm_connector_to_dumb_vga(connector);
- struct edid *edid;
- int ret;
-
- if (!vga->ddc)
- goto fallback;
-
- edid = drm_get_edid(connector, vga->ddc);
- if (!edid) {
- DRM_INFO("EDID readout failed, falling back to standard modes\n");
- goto fallback;
- }
-
- drm_connector_update_edid_property(connector, edid);
- ret = drm_add_edid_modes(connector, edid);
- kfree(edid);
- return ret;
-
-fallback:
- /*
- * In case we cannot retrieve the EDIDs (broken or missing i2c
- * bus), fallback on the XGA standards
- */
- ret = drm_add_modes_noedid(connector, 1920, 1200);
-
- /* And prefer a mode pretty much anyone can handle */
- drm_set_preferred_mode(connector, 1024, 768);
-
- return ret;
-}
-
-static const struct drm_connector_helper_funcs dumb_vga_con_helper_funcs = {
- .get_modes = dumb_vga_get_modes,
-};
-
-static enum drm_connector_status
-dumb_vga_connector_detect(struct drm_connector *connector, bool force)
-{
- struct dumb_vga *vga = drm_connector_to_dumb_vga(connector);
-
- /*
- * Even if we have an I2C bus, we can't assume that the cable
- * is disconnected if drm_probe_ddc fails. Some cables don't
- * wire the DDC pins, or the I2C bus might not be working at
- * all.
- */
- if (vga->ddc && drm_probe_ddc(vga->ddc))
- return connector_status_connected;
-
- return connector_status_unknown;
-}
-
-static const struct drm_connector_funcs dumb_vga_con_funcs = {
- .detect = dumb_vga_connector_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int dumb_vga_attach(struct drm_bridge *bridge)
-{
- struct dumb_vga *vga = drm_bridge_to_dumb_vga(bridge);
- int ret;
-
- if (!bridge->encoder) {
- DRM_ERROR("Missing encoder\n");
- return -ENODEV;
- }
-
- drm_connector_helper_add(&vga->connector,
- &dumb_vga_con_helper_funcs);
- ret = drm_connector_init_with_ddc(bridge->dev, &vga->connector,
- &dumb_vga_con_funcs,
- DRM_MODE_CONNECTOR_VGA,
- vga->ddc);
- if (ret) {
- DRM_ERROR("Failed to initialize connector\n");
- return ret;
- }
-
- drm_connector_attach_encoder(&vga->connector,
- bridge->encoder);
-
- return 0;
-}
-
-static void dumb_vga_enable(struct drm_bridge *bridge)
-{
- struct dumb_vga *vga = drm_bridge_to_dumb_vga(bridge);
- int ret = 0;
-
- if (vga->vdd)
- ret = regulator_enable(vga->vdd);
-
- if (ret)
- DRM_ERROR("Failed to enable vdd regulator: %d\n", ret);
-}
-
-static void dumb_vga_disable(struct drm_bridge *bridge)
-{
- struct dumb_vga *vga = drm_bridge_to_dumb_vga(bridge);
-
- if (vga->vdd)
- regulator_disable(vga->vdd);
-}
-
-static const struct drm_bridge_funcs dumb_vga_bridge_funcs = {
- .attach = dumb_vga_attach,
- .enable = dumb_vga_enable,
- .disable = dumb_vga_disable,
-};
-
-static struct i2c_adapter *dumb_vga_retrieve_ddc(struct device *dev)
-{
- struct device_node *phandle, *remote;
- struct i2c_adapter *ddc;
-
- remote = of_graph_get_remote_node(dev->of_node, 1, -1);
- if (!remote)
- return ERR_PTR(-EINVAL);
-
- phandle = of_parse_phandle(remote, "ddc-i2c-bus", 0);
- of_node_put(remote);
- if (!phandle)
- return ERR_PTR(-ENODEV);
-
- ddc = of_get_i2c_adapter_by_node(phandle);
- of_node_put(phandle);
- if (!ddc)
- return ERR_PTR(-EPROBE_DEFER);
-
- return ddc;
-}
-
-static int dumb_vga_probe(struct platform_device *pdev)
-{
- struct dumb_vga *vga;
-
- vga = devm_kzalloc(&pdev->dev, sizeof(*vga), GFP_KERNEL);
- if (!vga)
- return -ENOMEM;
- platform_set_drvdata(pdev, vga);
-
- vga->vdd = devm_regulator_get_optional(&pdev->dev, "vdd");
- if (IS_ERR(vga->vdd)) {
- int ret = PTR_ERR(vga->vdd);
- if (ret == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- vga->vdd = NULL;
- dev_dbg(&pdev->dev, "No vdd regulator found: %d\n", ret);
- }
-
- vga->ddc = dumb_vga_retrieve_ddc(&pdev->dev);
- if (IS_ERR(vga->ddc)) {
- if (PTR_ERR(vga->ddc) == -ENODEV) {
- dev_dbg(&pdev->dev,
- "No i2c bus specified. Disabling EDID readout\n");
- vga->ddc = NULL;
- } else {
- dev_err(&pdev->dev, "Couldn't retrieve i2c bus\n");
- return PTR_ERR(vga->ddc);
- }
- }
-
- vga->bridge.funcs = &dumb_vga_bridge_funcs;
- vga->bridge.of_node = pdev->dev.of_node;
- vga->bridge.timings = of_device_get_match_data(&pdev->dev);
-
- drm_bridge_add(&vga->bridge);
-
- return 0;
-}
-
-static int dumb_vga_remove(struct platform_device *pdev)
-{
- struct dumb_vga *vga = platform_get_drvdata(pdev);
-
- drm_bridge_remove(&vga->bridge);
-
- if (vga->ddc)
- i2c_put_adapter(vga->ddc);
-
- return 0;
-}
-
-/*
- * We assume the ADV7123 DAC is the "default" for historical reasons
- * Information taken from the ADV7123 datasheet, revision D.
- * NOTE: the ADV7123EP seems to have other timings and need a new timings
- * set if used.
- */
-static const struct drm_bridge_timings default_dac_timings = {
- /* Timing specifications, datasheet page 7 */
- .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
- .setup_time_ps = 500,
- .hold_time_ps = 1500,
-};
-
-/*
- * Information taken from the THS8134, THS8134A, THS8134B datasheet named
- * "SLVS205D", dated May 1990, revised March 2000.
- */
-static const struct drm_bridge_timings ti_ths8134_dac_timings = {
- /* From timing diagram, datasheet page 9 */
- .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
- /* From datasheet, page 12 */
- .setup_time_ps = 3000,
- /* I guess this means latched input */
- .hold_time_ps = 0,
-};
-
-/*
- * Information taken from the THS8135 datasheet named "SLAS343B", dated
- * May 2001, revised April 2013.
- */
-static const struct drm_bridge_timings ti_ths8135_dac_timings = {
- /* From timing diagram, datasheet page 14 */
- .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
- /* From datasheet, page 16 */
- .setup_time_ps = 2000,
- .hold_time_ps = 500,
-};
-
-static const struct of_device_id dumb_vga_match[] = {
- {
- .compatible = "dumb-vga-dac",
- .data = NULL,
- },
- {
- .compatible = "adi,adv7123",
- .data = &default_dac_timings,
- },
- {
- .compatible = "ti,ths8135",
- .data = &ti_ths8135_dac_timings,
- },
- {
- .compatible = "ti,ths8134",
- .data = &ti_ths8134_dac_timings,
- },
- {},
-};
-MODULE_DEVICE_TABLE(of, dumb_vga_match);
-
-static struct platform_driver dumb_vga_driver = {
- .probe = dumb_vga_probe,
- .remove = dumb_vga_remove,
- .driver = {
- .name = "dumb-vga-dac",
- .of_match_table = dumb_vga_match,
- },
-};
-module_platform_driver(dumb_vga_driver);
-
-MODULE_AUTHOR("Maxime Ripard <[email protected]>");
-MODULE_DESCRIPTION("Dumb VGA DAC bridge driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/lvds-codec.c b/drivers/gpu/drm/bridge/lvds-codec.c
index 5f04cc11227e..24fb1befdfa2 100644
--- a/drivers/gpu/drm/bridge/lvds-codec.c
+++ b/drivers/gpu/drm/bridge/lvds-codec.c
@@ -21,19 +21,23 @@ struct lvds_codec {
u32 connector_type;
};
-static int lvds_codec_attach(struct drm_bridge *bridge)
+static inline struct lvds_codec *to_lvds_codec(struct drm_bridge *bridge)
{
- struct lvds_codec *lvds_codec = container_of(bridge,
- struct lvds_codec, bridge);
+ return container_of(bridge, struct lvds_codec, bridge);
+}
+
+static int lvds_codec_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct lvds_codec *lvds_codec = to_lvds_codec(bridge);
return drm_bridge_attach(bridge->encoder, lvds_codec->panel_bridge,
- bridge);
+ bridge, flags);
}
static void lvds_codec_enable(struct drm_bridge *bridge)
{
- struct lvds_codec *lvds_codec = container_of(bridge,
- struct lvds_codec, bridge);
+ struct lvds_codec *lvds_codec = to_lvds_codec(bridge);
if (lvds_codec->powerdown_gpio)
gpiod_set_value_cansleep(lvds_codec->powerdown_gpio, 0);
@@ -41,14 +45,13 @@ static void lvds_codec_enable(struct drm_bridge *bridge)
static void lvds_codec_disable(struct drm_bridge *bridge)
{
- struct lvds_codec *lvds_codec = container_of(bridge,
- struct lvds_codec, bridge);
+ struct lvds_codec *lvds_codec = to_lvds_codec(bridge);
if (lvds_codec->powerdown_gpio)
gpiod_set_value_cansleep(lvds_codec->powerdown_gpio, 1);
}
-static struct drm_bridge_funcs funcs = {
+static const struct drm_bridge_funcs funcs = {
.attach = lvds_codec_attach,
.enable = lvds_codec_enable,
.disable = lvds_codec_disable,
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index e8a49f6146c6..6200f12a37e6 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -206,13 +206,19 @@ static irqreturn_t ge_b850v3_lvds_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int ge_b850v3_lvds_attach(struct drm_bridge *bridge)
+static int ge_b850v3_lvds_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct drm_connector *connector = &ge_b850v3_lvds_ptr->connector;
struct i2c_client *stdp4028_i2c
= ge_b850v3_lvds_ptr->stdp4028_i2c;
int ret;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
if (!bridge->encoder) {
DRM_ERROR("Parent encoder object not found");
return -ENODEV;
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 57ff01339559..438e566ce0a4 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -236,11 +236,17 @@ static const struct drm_connector_funcs ptn3460_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int ptn3460_bridge_attach(struct drm_bridge *bridge)
+static int ptn3460_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge);
int ret;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
if (!bridge->encoder) {
DRM_ERROR("Parent encoder object not found");
return -ENODEV;
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index f66777e24968..8461ee8304ba 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -53,12 +53,16 @@ static const struct drm_connector_funcs panel_bridge_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int panel_bridge_attach(struct drm_bridge *bridge)
+static int panel_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
struct drm_connector *connector = &panel_bridge->connector;
int ret;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
+ return 0;
+
if (!bridge->encoder) {
DRM_ERROR("Missing encoder\n");
return -ENODEV;
@@ -120,6 +124,14 @@ static void panel_bridge_post_disable(struct drm_bridge *bridge)
drm_panel_unprepare(panel_bridge->panel);
}
+static int panel_bridge_get_modes(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
+
+ return drm_panel_get_modes(panel_bridge->panel, connector);
+}
+
static const struct drm_bridge_funcs panel_bridge_bridge_funcs = {
.attach = panel_bridge_attach,
.detach = panel_bridge_detach,
@@ -127,6 +139,11 @@ static const struct drm_bridge_funcs panel_bridge_bridge_funcs = {
.enable = panel_bridge_enable,
.disable = panel_bridge_disable,
.post_disable = panel_bridge_post_disable,
+ .get_modes = panel_bridge_get_modes,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_get_input_bus_fmts = drm_atomic_helper_bridge_propagate_bus_fmt,
};
/**
@@ -151,7 +168,7 @@ static const struct drm_bridge_funcs panel_bridge_bridge_funcs = {
* known type. Calling this function with a panel whose connector type is
* DRM_MODE_CONNECTOR_Unknown will return NULL.
*
- * See devm_drm_panel_bridge_add() for an automatically manged version of this
+ * See devm_drm_panel_bridge_add() for an automatically managed version of this
* function.
*/
struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel)
@@ -196,6 +213,8 @@ struct drm_bridge *drm_panel_bridge_add_typed(struct drm_panel *panel,
#ifdef CONFIG_OF
panel_bridge->bridge.of_node = panel->dev->of_node;
#endif
+ panel_bridge->bridge.ops = DRM_BRIDGE_OP_MODES;
+ panel_bridge->bridge.type = connector_type;
drm_bridge_add(&panel_bridge->bridge);
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index 10c47c008b40..d789ea2a7fb9 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -476,11 +476,17 @@ static const struct drm_connector_funcs ps8622_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int ps8622_attach(struct drm_bridge *bridge)
+static int ps8622_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge);
int ret;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
if (!bridge->encoder) {
DRM_ERROR("Parent encoder object not found");
return -ENODEV;
diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c
new file mode 100644
index 000000000000..d3a53442d449
--- /dev/null
+++ b/drivers/gpu/drm/bridge/parade-ps8640.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+#define PAGE2_GPIO_H 0xa7
+#define PS_GPIO9 BIT(1)
+#define PAGE2_I2C_BYPASS 0xea
+#define I2C_BYPASS_EN 0xd0
+#define PAGE2_MCS_EN 0xf3
+#define MCS_EN BIT(0)
+#define PAGE3_SET_ADD 0xfe
+#define VDO_CTL_ADD 0x13
+#define VDO_DIS 0x18
+#define VDO_EN 0x1c
+#define DP_NUM_LANES 4
+
+/*
+ * PS8640 uses multiple addresses:
+ * page[0]: for DP control
+ * page[1]: for VIDEO Bridge
+ * page[2]: for control top
+ * page[3]: for DSI Link Control1
+ * page[4]: for MIPI Phy
+ * page[5]: for VPLL
+ * page[6]: for DSI Link Control2
+ * page[7]: for SPI ROM mapping
+ */
+enum page_addr_offset {
+ PAGE0_DP_CNTL = 0,
+ PAGE1_VDO_BDG,
+ PAGE2_TOP_CNTL,
+ PAGE3_DSI_CNTL1,
+ PAGE4_MIPI_PHY,
+ PAGE5_VPLL,
+ PAGE6_DSI_CNTL2,
+ PAGE7_SPI_CNTL,
+ MAX_DEVS
+};
+
+enum ps8640_vdo_control {
+ DISABLE = VDO_DIS,
+ ENABLE = VDO_EN,
+};
+
+struct ps8640 {
+ struct drm_bridge bridge;
+ struct drm_bridge *panel_bridge;
+ struct mipi_dsi_device *dsi;
+ struct i2c_client *page[MAX_DEVS];
+ struct regulator_bulk_data supplies[2];
+ struct gpio_desc *gpio_reset;
+ struct gpio_desc *gpio_powerdown;
+};
+
+static inline struct ps8640 *bridge_to_ps8640(struct drm_bridge *e)
+{
+ return container_of(e, struct ps8640, bridge);
+}
+
+static int ps8640_bridge_vdo_control(struct ps8640 *ps_bridge,
+ const enum ps8640_vdo_control ctrl)
+{
+ struct i2c_client *client = ps_bridge->page[PAGE3_DSI_CNTL1];
+ u8 vdo_ctrl_buf[] = { VDO_CTL_ADD, ctrl };
+ int ret;
+
+ ret = i2c_smbus_write_i2c_block_data(client, PAGE3_SET_ADD,
+ sizeof(vdo_ctrl_buf),
+ vdo_ctrl_buf);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void ps8640_pre_enable(struct drm_bridge *bridge)
+{
+ struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
+ struct i2c_client *client = ps_bridge->page[PAGE2_TOP_CNTL];
+ unsigned long timeout;
+ int ret, status;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ps_bridge->supplies),
+ ps_bridge->supplies);
+ if (ret < 0) {
+ DRM_ERROR("cannot enable regulators %d\n", ret);
+ return;
+ }
+
+ gpiod_set_value(ps_bridge->gpio_powerdown, 0);
+ gpiod_set_value(ps_bridge->gpio_reset, 1);
+ usleep_range(2000, 2500);
+ gpiod_set_value(ps_bridge->gpio_reset, 0);
+
+ /*
+ * Wait for the ps8640 embedded MCU to be ready
+ * First wait 200ms and then check the MCU ready flag every 20ms
+ */
+ msleep(200);
+
+ timeout = jiffies + msecs_to_jiffies(200) + 1;
+
+ while (time_is_after_jiffies(timeout)) {
+ status = i2c_smbus_read_byte_data(client, PAGE2_GPIO_H);
+ if (status < 0) {
+ DRM_ERROR("failed read PAGE2_GPIO_H: %d\n", status);
+ goto err_regulators_disable;
+ }
+ if ((status & PS_GPIO9) == PS_GPIO9)
+ break;
+
+ msleep(20);
+ }
+
+ msleep(50);
+
+ /*
+ * The Manufacturer Command Set (MCS) is a device dependent interface
+ * intended for factory programming of the display module default
+ * parameters. Once the display module is configured, the MCS shall be
+ * disabled by the manufacturer. Once disabled, all MCS commands are
+ * ignored by the display interface.
+ */
+ status = i2c_smbus_read_byte_data(client, PAGE2_MCS_EN);
+ if (status < 0) {
+ DRM_ERROR("failed read PAGE2_MCS_EN: %d\n", status);
+ goto err_regulators_disable;
+ }
+
+ ret = i2c_smbus_write_byte_data(client, PAGE2_MCS_EN,
+ status & ~MCS_EN);
+ if (ret < 0) {
+ DRM_ERROR("failed write PAGE2_MCS_EN: %d\n", ret);
+ goto err_regulators_disable;
+ }
+
+ ret = ps8640_bridge_vdo_control(ps_bridge, ENABLE);
+ if (ret) {
+ DRM_ERROR("failed to enable VDO: %d\n", ret);
+ goto err_regulators_disable;
+ }
+
+ /* Switch access edp panel's edid through i2c */
+ ret = i2c_smbus_write_byte_data(client, PAGE2_I2C_BYPASS,
+ I2C_BYPASS_EN);
+ if (ret < 0) {
+ DRM_ERROR("failed write PAGE2_I2C_BYPASS: %d\n", ret);
+ goto err_regulators_disable;
+ }
+
+ return;
+
+err_regulators_disable:
+ regulator_bulk_disable(ARRAY_SIZE(ps_bridge->supplies),
+ ps_bridge->supplies);
+}
+
+static void ps8640_post_disable(struct drm_bridge *bridge)
+{
+ struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
+ int ret;
+
+ ret = ps8640_bridge_vdo_control(ps_bridge, DISABLE);
+ if (ret < 0)
+ DRM_ERROR("failed to disable VDO: %d\n", ret);
+
+ gpiod_set_value(ps_bridge->gpio_reset, 1);
+ gpiod_set_value(ps_bridge->gpio_powerdown, 1);
+ ret = regulator_bulk_disable(ARRAY_SIZE(ps_bridge->supplies),
+ ps_bridge->supplies);
+ if (ret < 0)
+ DRM_ERROR("cannot disable regulators %d\n", ret);
+}
+
+static int ps8640_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
+ struct device *dev = &ps_bridge->page[0]->dev;
+ struct device_node *in_ep, *dsi_node;
+ struct mipi_dsi_device *dsi;
+ struct mipi_dsi_host *host;
+ int ret;
+ const struct mipi_dsi_device_info info = { .type = "ps8640",
+ .channel = 0,
+ .node = NULL,
+ };
+ /* port@0 is ps8640 dsi input port */
+ in_ep = of_graph_get_endpoint_by_regs(dev->of_node, 0, -1);
+ if (!in_ep)
+ return -ENODEV;
+
+ dsi_node = of_graph_get_remote_port_parent(in_ep);
+ of_node_put(in_ep);
+ if (!dsi_node)
+ return -ENODEV;
+
+ host = of_find_mipi_dsi_host_by_node(dsi_node);
+ of_node_put(dsi_node);
+ if (!host)
+ return -ENODEV;
+
+ dsi = mipi_dsi_device_register_full(host, &info);
+ if (IS_ERR(dsi)) {
+ dev_err(dev, "failed to create dsi device\n");
+ ret = PTR_ERR(dsi);
+ return ret;
+ }
+
+ ps_bridge->dsi = dsi;
+
+ dsi->host = host;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
+ MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->lanes = DP_NUM_LANES;
+ ret = mipi_dsi_attach(dsi);
+ if (ret)
+ goto err_dsi_attach;
+
+ /* Attach the panel-bridge to the dsi bridge */
+ return drm_bridge_attach(bridge->encoder, ps_bridge->panel_bridge,
+ &ps_bridge->bridge, flags);
+
+err_dsi_attach:
+ mipi_dsi_device_unregister(dsi);
+ return ret;
+}
+
+static const struct drm_bridge_funcs ps8640_bridge_funcs = {
+ .attach = ps8640_bridge_attach,
+ .post_disable = ps8640_post_disable,
+ .pre_enable = ps8640_pre_enable,
+};
+
+static int ps8640_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct device_node *np = dev->of_node;
+ struct ps8640 *ps_bridge;
+ struct drm_panel *panel;
+ int ret;
+ u32 i;
+
+ ps_bridge = devm_kzalloc(dev, sizeof(*ps_bridge), GFP_KERNEL);
+ if (!ps_bridge)
+ return -ENOMEM;
+
+ /* port@1 is ps8640 output port */
+ ret = drm_of_find_panel_or_bridge(np, 1, 0, &panel, NULL);
+ if (ret < 0)
+ return ret;
+ if (!panel)
+ return -ENODEV;
+
+ panel->connector_type = DRM_MODE_CONNECTOR_eDP;
+
+ ps_bridge->panel_bridge = devm_drm_panel_bridge_add(dev, panel);
+ if (IS_ERR(ps_bridge->panel_bridge))
+ return PTR_ERR(ps_bridge->panel_bridge);
+
+ ps_bridge->supplies[0].supply = "vdd33";
+ ps_bridge->supplies[1].supply = "vdd12";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ps_bridge->supplies),
+ ps_bridge->supplies);
+ if (ret)
+ return ret;
+
+ ps_bridge->gpio_powerdown = devm_gpiod_get(&client->dev, "powerdown",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(ps_bridge->gpio_powerdown))
+ return PTR_ERR(ps_bridge->gpio_powerdown);
+
+ /*
+ * Assert the reset to avoid the bridge being initialized prematurely
+ */
+ ps_bridge->gpio_reset = devm_gpiod_get(&client->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(ps_bridge->gpio_reset))
+ return PTR_ERR(ps_bridge->gpio_reset);
+
+ ps_bridge->bridge.funcs = &ps8640_bridge_funcs;
+ ps_bridge->bridge.of_node = dev->of_node;
+
+ ps_bridge->page[PAGE0_DP_CNTL] = client;
+
+ for (i = 1; i < ARRAY_SIZE(ps_bridge->page); i++) {
+ ps_bridge->page[i] = devm_i2c_new_dummy_device(&client->dev,
+ client->adapter,
+ client->addr + i);
+ if (IS_ERR(ps_bridge->page[i])) {
+ dev_err(dev, "failed i2c dummy device, address %02x\n",
+ client->addr + i);
+ return PTR_ERR(ps_bridge->page[i]);
+ }
+ }
+
+ i2c_set_clientdata(client, ps_bridge);
+
+ drm_bridge_add(&ps_bridge->bridge);
+
+ return 0;
+}
+
+static int ps8640_remove(struct i2c_client *client)
+{
+ struct ps8640 *ps_bridge = i2c_get_clientdata(client);
+
+ drm_bridge_remove(&ps_bridge->bridge);
+
+ return 0;
+}
+
+static const struct of_device_id ps8640_match[] = {
+ { .compatible = "parade,ps8640" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ps8640_match);
+
+static struct i2c_driver ps8640_driver = {
+ .probe_new = ps8640_probe,
+ .remove = ps8640_remove,
+ .driver = {
+ .name = "ps8640",
+ .of_match_table = ps8640_match,
+ },
+};
+module_i2c_driver(ps8640_driver);
+
+MODULE_AUTHOR("Jitao Shi <[email protected]>");
+MODULE_AUTHOR("CK Hu <[email protected]>");
+MODULE_AUTHOR("Enric Balletbo i Serra <[email protected]>");
+MODULE_DESCRIPTION("PARADE ps8640 DSI-eDP converter driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index b70e8c5cf2e1..6dad025f8da7 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -399,12 +399,18 @@ out:
mutex_unlock(&sii902x->mutex);
}
-static int sii902x_bridge_attach(struct drm_bridge *bridge)
+static int sii902x_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct sii902x *sii902x = bridge_to_sii902x(bridge);
struct drm_device *drm = bridge->dev;
int ret;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
drm_connector_helper_add(&sii902x->connector,
&sii902x_connector_helper_funcs);
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index 4c0eef406eb1..92acd336aa89 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -2202,7 +2202,8 @@ static inline struct sii8620 *bridge_to_sii8620(struct drm_bridge *bridge)
return container_of(bridge, struct sii8620, bridge);
}
-static int sii8620_attach(struct drm_bridge *bridge)
+static int sii8620_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct sii8620 *ctx = bridge_to_sii8620(bridge);
diff --git a/drivers/gpu/drm/bridge/simple-bridge.c b/drivers/gpu/drm/bridge/simple-bridge.c
new file mode 100644
index 000000000000..a2dca7a3ef03
--- /dev/null
+++ b/drivers/gpu/drm/bridge/simple-bridge.c
@@ -0,0 +1,342 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2015-2016 Free Electrons
+ * Copyright (C) 2015-2016 NextThing Co
+ *
+ * Maxime Ripard <[email protected]>
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
+struct simple_bridge_info {
+ const struct drm_bridge_timings *timings;
+ unsigned int connector_type;
+};
+
+struct simple_bridge {
+ struct drm_bridge bridge;
+ struct drm_connector connector;
+
+ const struct simple_bridge_info *info;
+
+ struct i2c_adapter *ddc;
+ struct regulator *vdd;
+ struct gpio_desc *enable;
+};
+
+static inline struct simple_bridge *
+drm_bridge_to_simple_bridge(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct simple_bridge, bridge);
+}
+
+static inline struct simple_bridge *
+drm_connector_to_simple_bridge(struct drm_connector *connector)
+{
+ return container_of(connector, struct simple_bridge, connector);
+}
+
+static int simple_bridge_get_modes(struct drm_connector *connector)
+{
+ struct simple_bridge *sbridge = drm_connector_to_simple_bridge(connector);
+ struct edid *edid;
+ int ret;
+
+ if (!sbridge->ddc)
+ goto fallback;
+
+ edid = drm_get_edid(connector, sbridge->ddc);
+ if (!edid) {
+ DRM_INFO("EDID readout failed, falling back to standard modes\n");
+ goto fallback;
+ }
+
+ drm_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ return ret;
+
+fallback:
+ /*
+ * In case we cannot retrieve the EDIDs (broken or missing i2c
+ * bus), fallback on the XGA standards
+ */
+ ret = drm_add_modes_noedid(connector, 1920, 1200);
+
+ /* And prefer a mode pretty much anyone can handle */
+ drm_set_preferred_mode(connector, 1024, 768);
+
+ return ret;
+}
+
+static const struct drm_connector_helper_funcs simple_bridge_con_helper_funcs = {
+ .get_modes = simple_bridge_get_modes,
+};
+
+static enum drm_connector_status
+simple_bridge_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct simple_bridge *sbridge = drm_connector_to_simple_bridge(connector);
+
+ /*
+ * Even if we have an I2C bus, we can't assume that the cable
+ * is disconnected if drm_probe_ddc fails. Some cables don't
+ * wire the DDC pins, or the I2C bus might not be working at
+ * all.
+ */
+ if (sbridge->ddc && drm_probe_ddc(sbridge->ddc))
+ return connector_status_connected;
+
+ return connector_status_unknown;
+}
+
+static const struct drm_connector_funcs simple_bridge_con_funcs = {
+ .detect = simple_bridge_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int simple_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct simple_bridge *sbridge = drm_bridge_to_simple_bridge(bridge);
+ int ret;
+
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
+ if (!bridge->encoder) {
+ DRM_ERROR("Missing encoder\n");
+ return -ENODEV;
+ }
+
+ drm_connector_helper_add(&sbridge->connector,
+ &simple_bridge_con_helper_funcs);
+ ret = drm_connector_init_with_ddc(bridge->dev, &sbridge->connector,
+ &simple_bridge_con_funcs,
+ sbridge->info->connector_type,
+ sbridge->ddc);
+ if (ret) {
+ DRM_ERROR("Failed to initialize connector\n");
+ return ret;
+ }
+
+ drm_connector_attach_encoder(&sbridge->connector,
+ bridge->encoder);
+
+ return 0;
+}
+
+static void simple_bridge_enable(struct drm_bridge *bridge)
+{
+ struct simple_bridge *sbridge = drm_bridge_to_simple_bridge(bridge);
+ int ret;
+
+ if (sbridge->vdd) {
+ ret = regulator_enable(sbridge->vdd);
+ if (ret)
+ DRM_ERROR("Failed to enable vdd regulator: %d\n", ret);
+ }
+
+ gpiod_set_value_cansleep(sbridge->enable, 1);
+}
+
+static void simple_bridge_disable(struct drm_bridge *bridge)
+{
+ struct simple_bridge *sbridge = drm_bridge_to_simple_bridge(bridge);
+
+ gpiod_set_value_cansleep(sbridge->enable, 0);
+
+ if (sbridge->vdd)
+ regulator_disable(sbridge->vdd);
+}
+
+static const struct drm_bridge_funcs simple_bridge_bridge_funcs = {
+ .attach = simple_bridge_attach,
+ .enable = simple_bridge_enable,
+ .disable = simple_bridge_disable,
+};
+
+static struct i2c_adapter *simple_bridge_retrieve_ddc(struct device *dev)
+{
+ struct device_node *phandle, *remote;
+ struct i2c_adapter *ddc;
+
+ remote = of_graph_get_remote_node(dev->of_node, 1, -1);
+ if (!remote)
+ return ERR_PTR(-EINVAL);
+
+ phandle = of_parse_phandle(remote, "ddc-i2c-bus", 0);
+ of_node_put(remote);
+ if (!phandle)
+ return ERR_PTR(-ENODEV);
+
+ ddc = of_get_i2c_adapter_by_node(phandle);
+ of_node_put(phandle);
+ if (!ddc)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return ddc;
+}
+
+static int simple_bridge_probe(struct platform_device *pdev)
+{
+ struct simple_bridge *sbridge;
+
+ sbridge = devm_kzalloc(&pdev->dev, sizeof(*sbridge), GFP_KERNEL);
+ if (!sbridge)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, sbridge);
+
+ sbridge->info = of_device_get_match_data(&pdev->dev);
+
+ sbridge->vdd = devm_regulator_get_optional(&pdev->dev, "vdd");
+ if (IS_ERR(sbridge->vdd)) {
+ int ret = PTR_ERR(sbridge->vdd);
+ if (ret == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ sbridge->vdd = NULL;
+ dev_dbg(&pdev->dev, "No vdd regulator found: %d\n", ret);
+ }
+
+ sbridge->enable = devm_gpiod_get_optional(&pdev->dev, "enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(sbridge->enable)) {
+ if (PTR_ERR(sbridge->enable) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Unable to retrieve enable GPIO\n");
+ return PTR_ERR(sbridge->enable);
+ }
+
+ sbridge->ddc = simple_bridge_retrieve_ddc(&pdev->dev);
+ if (IS_ERR(sbridge->ddc)) {
+ if (PTR_ERR(sbridge->ddc) == -ENODEV) {
+ dev_dbg(&pdev->dev,
+ "No i2c bus specified. Disabling EDID readout\n");
+ sbridge->ddc = NULL;
+ } else {
+ dev_err(&pdev->dev, "Couldn't retrieve i2c bus\n");
+ return PTR_ERR(sbridge->ddc);
+ }
+ }
+
+ sbridge->bridge.funcs = &simple_bridge_bridge_funcs;
+ sbridge->bridge.of_node = pdev->dev.of_node;
+ sbridge->bridge.timings = sbridge->info->timings;
+
+ drm_bridge_add(&sbridge->bridge);
+
+ return 0;
+}
+
+static int simple_bridge_remove(struct platform_device *pdev)
+{
+ struct simple_bridge *sbridge = platform_get_drvdata(pdev);
+
+ drm_bridge_remove(&sbridge->bridge);
+
+ if (sbridge->ddc)
+ i2c_put_adapter(sbridge->ddc);
+
+ return 0;
+}
+
+/*
+ * We assume the ADV7123 DAC is the "default" for historical reasons
+ * Information taken from the ADV7123 datasheet, revision D.
+ * NOTE: the ADV7123EP seems to have other timings and need a new timings
+ * set if used.
+ */
+static const struct drm_bridge_timings default_bridge_timings = {
+ /* Timing specifications, datasheet page 7 */
+ .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
+ .setup_time_ps = 500,
+ .hold_time_ps = 1500,
+};
+
+/*
+ * Information taken from the THS8134, THS8134A, THS8134B datasheet named
+ * "SLVS205D", dated May 1990, revised March 2000.
+ */
+static const struct drm_bridge_timings ti_ths8134_bridge_timings = {
+ /* From timing diagram, datasheet page 9 */
+ .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
+ /* From datasheet, page 12 */
+ .setup_time_ps = 3000,
+ /* I guess this means latched input */
+ .hold_time_ps = 0,
+};
+
+/*
+ * Information taken from the THS8135 datasheet named "SLAS343B", dated
+ * May 2001, revised April 2013.
+ */
+static const struct drm_bridge_timings ti_ths8135_bridge_timings = {
+ /* From timing diagram, datasheet page 14 */
+ .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
+ /* From datasheet, page 16 */
+ .setup_time_ps = 2000,
+ .hold_time_ps = 500,
+};
+
+static const struct of_device_id simple_bridge_match[] = {
+ {
+ .compatible = "dumb-vga-dac",
+ .data = &(const struct simple_bridge_info) {
+ .connector_type = DRM_MODE_CONNECTOR_VGA,
+ },
+ }, {
+ .compatible = "adi,adv7123",
+ .data = &(const struct simple_bridge_info) {
+ .timings = &default_bridge_timings,
+ .connector_type = DRM_MODE_CONNECTOR_VGA,
+ },
+ }, {
+ .compatible = "ti,opa362",
+ .data = &(const struct simple_bridge_info) {
+ .connector_type = DRM_MODE_CONNECTOR_Composite,
+ },
+ }, {
+ .compatible = "ti,ths8135",
+ .data = &(const struct simple_bridge_info) {
+ .timings = &ti_ths8135_bridge_timings,
+ .connector_type = DRM_MODE_CONNECTOR_VGA,
+ },
+ }, {
+ .compatible = "ti,ths8134",
+ .data = &(const struct simple_bridge_info) {
+ .timings = &ti_ths8134_bridge_timings,
+ .connector_type = DRM_MODE_CONNECTOR_VGA,
+ },
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, simple_bridge_match);
+
+static struct platform_driver simple_bridge_driver = {
+ .probe = simple_bridge_probe,
+ .remove = simple_bridge_remove,
+ .driver = {
+ .name = "simple-bridge",
+ .of_match_table = simple_bridge_match,
+ },
+};
+module_platform_driver(simple_bridge_driver);
+
+MODULE_AUTHOR("Maxime Ripard <[email protected]>");
+MODULE_DESCRIPTION("Simple DRM bridge driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 67fca439bbfb..f85c15ad8486 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -1814,13 +1814,32 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
int hblank, vblank, h_de_hs, v_de_vs, hsync_len, vsync_len;
unsigned int vdisplay, hdisplay;
- vmode->mtmdsclock = vmode->mpixelclock = mode->clock * 1000;
+ vmode->mpixelclock = mode->clock * 1000;
dev_dbg(hdmi->dev, "final pixclk = %d\n", vmode->mpixelclock);
+ vmode->mtmdsclock = vmode->mpixelclock;
+
+ if (!hdmi_bus_fmt_is_yuv422(hdmi->hdmi_data.enc_out_bus_format)) {
+ switch (hdmi_bus_fmt_color_depth(
+ hdmi->hdmi_data.enc_out_bus_format)) {
+ case 16:
+ vmode->mtmdsclock = vmode->mpixelclock * 2;
+ break;
+ case 12:
+ vmode->mtmdsclock = vmode->mpixelclock * 3 / 2;
+ break;
+ case 10:
+ vmode->mtmdsclock = vmode->mpixelclock * 5 / 4;
+ break;
+ }
+ }
+
if (hdmi_bus_fmt_is_yuv420(hdmi->hdmi_data.enc_out_bus_format))
vmode->mtmdsclock /= 2;
+ dev_dbg(hdmi->dev, "final tmdsclock = %d\n", vmode->mtmdsclock);
+
/* Set up HDMI_FC_INVIDCONF */
inv_val = (hdmi->hdmi_data.hdcp_enable ||
(dw_hdmi_support_scdc(hdmi) &&
@@ -2078,11 +2097,10 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
hdmi->hdmi_data.video_mode.mpixelrepetitionoutput = 0;
hdmi->hdmi_data.video_mode.mpixelrepetitioninput = 0;
- /* TOFIX: Get input format from plat data or fallback to RGB888 */
if (hdmi->plat_data->input_bus_format)
hdmi->hdmi_data.enc_in_bus_format =
hdmi->plat_data->input_bus_format;
- else
+ else if (hdmi->hdmi_data.enc_in_bus_format == MEDIA_BUS_FMT_FIXED)
hdmi->hdmi_data.enc_in_bus_format = MEDIA_BUS_FMT_RGB888_1X24;
/* TOFIX: Get input encoding from plat data or fallback to none */
@@ -2092,8 +2110,8 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
else
hdmi->hdmi_data.enc_in_encoding = V4L2_YCBCR_ENC_DEFAULT;
- /* TOFIX: Default to RGB888 output format */
- hdmi->hdmi_data.enc_out_bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ if (hdmi->hdmi_data.enc_out_bus_format == MEDIA_BUS_FMT_FIXED)
+ hdmi->hdmi_data.enc_out_bus_format = MEDIA_BUS_FMT_RGB888_1X24;
hdmi->hdmi_data.pix_repet_factor = 0;
hdmi->hdmi_data.hdcp_enable = 0;
@@ -2371,7 +2389,279 @@ static const struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs =
.atomic_check = dw_hdmi_connector_atomic_check,
};
-static int dw_hdmi_bridge_attach(struct drm_bridge *bridge)
+/*
+ * Possible output formats :
+ * - MEDIA_BUS_FMT_UYYVYY16_0_5X48,
+ * - MEDIA_BUS_FMT_UYYVYY12_0_5X36,
+ * - MEDIA_BUS_FMT_UYYVYY10_0_5X30,
+ * - MEDIA_BUS_FMT_UYYVYY8_0_5X24,
+ * - MEDIA_BUS_FMT_YUV16_1X48,
+ * - MEDIA_BUS_FMT_RGB161616_1X48,
+ * - MEDIA_BUS_FMT_UYVY12_1X24,
+ * - MEDIA_BUS_FMT_YUV12_1X36,
+ * - MEDIA_BUS_FMT_RGB121212_1X36,
+ * - MEDIA_BUS_FMT_UYVY10_1X20,
+ * - MEDIA_BUS_FMT_YUV10_1X30,
+ * - MEDIA_BUS_FMT_RGB101010_1X30,
+ * - MEDIA_BUS_FMT_UYVY8_1X16,
+ * - MEDIA_BUS_FMT_YUV8_1X24,
+ * - MEDIA_BUS_FMT_RGB888_1X24,
+ */
+
+/* Can return a maximum of 11 possible output formats for a mode/connector */
+#define MAX_OUTPUT_SEL_FORMATS 11
+
+static u32 *dw_hdmi_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ unsigned int *num_output_fmts)
+{
+ struct drm_connector *conn = conn_state->connector;
+ struct drm_display_info *info = &conn->display_info;
+ struct drm_display_mode *mode = &crtc_state->mode;
+ u8 max_bpc = conn_state->max_requested_bpc;
+ bool is_hdmi2_sink = info->hdmi.scdc.supported ||
+ (info->color_formats & DRM_COLOR_FORMAT_YCRCB420);
+ u32 *output_fmts;
+ unsigned int i = 0;
+
+ *num_output_fmts = 0;
+
+ output_fmts = kcalloc(MAX_OUTPUT_SEL_FORMATS, sizeof(*output_fmts),
+ GFP_KERNEL);
+ if (!output_fmts)
+ return NULL;
+
+ /* If dw-hdmi is the only bridge, avoid negociating with ourselves */
+ if (list_is_singular(&bridge->encoder->bridge_chain)) {
+ *num_output_fmts = 1;
+ output_fmts[0] = MEDIA_BUS_FMT_FIXED;
+
+ return output_fmts;
+ }
+
+ /*
+ * If the current mode enforces 4:2:0, force the output but format
+ * to 4:2:0 and do not add the YUV422/444/RGB formats
+ */
+ if (conn->ycbcr_420_allowed &&
+ (drm_mode_is_420_only(info, mode) ||
+ (is_hdmi2_sink && drm_mode_is_420_also(info, mode)))) {
+
+ /* Order bus formats from 16bit to 8bit if supported */
+ if (max_bpc >= 16 && info->bpc == 16 &&
+ (info->hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48))
+ output_fmts[i++] = MEDIA_BUS_FMT_UYYVYY16_0_5X48;
+
+ if (max_bpc >= 12 && info->bpc >= 12 &&
+ (info->hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36))
+ output_fmts[i++] = MEDIA_BUS_FMT_UYYVYY12_0_5X36;
+
+ if (max_bpc >= 10 && info->bpc >= 10 &&
+ (info->hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30))
+ output_fmts[i++] = MEDIA_BUS_FMT_UYYVYY10_0_5X30;
+
+ /* Default 8bit fallback */
+ output_fmts[i++] = MEDIA_BUS_FMT_UYYVYY8_0_5X24;
+
+ *num_output_fmts = i;
+
+ return output_fmts;
+ }
+
+ /*
+ * Order bus formats from 16bit to 8bit and from YUV422 to RGB
+ * if supported. In any case the default RGB888 format is added
+ */
+
+ if (max_bpc >= 16 && info->bpc == 16) {
+ if (info->color_formats & DRM_COLOR_FORMAT_YCRCB444)
+ output_fmts[i++] = MEDIA_BUS_FMT_YUV16_1X48;
+
+ output_fmts[i++] = MEDIA_BUS_FMT_RGB161616_1X48;
+ }
+
+ if (max_bpc >= 12 && info->bpc >= 12) {
+ if (info->color_formats & DRM_COLOR_FORMAT_YCRCB422)
+ output_fmts[i++] = MEDIA_BUS_FMT_UYVY12_1X24;
+
+ if (info->color_formats & DRM_COLOR_FORMAT_YCRCB444)
+ output_fmts[i++] = MEDIA_BUS_FMT_YUV12_1X36;
+
+ output_fmts[i++] = MEDIA_BUS_FMT_RGB121212_1X36;
+ }
+
+ if (max_bpc >= 10 && info->bpc >= 10) {
+ if (info->color_formats & DRM_COLOR_FORMAT_YCRCB422)
+ output_fmts[i++] = MEDIA_BUS_FMT_UYVY10_1X20;
+
+ if (info->color_formats & DRM_COLOR_FORMAT_YCRCB444)
+ output_fmts[i++] = MEDIA_BUS_FMT_YUV10_1X30;
+
+ output_fmts[i++] = MEDIA_BUS_FMT_RGB101010_1X30;
+ }
+
+ if (info->color_formats & DRM_COLOR_FORMAT_YCRCB422)
+ output_fmts[i++] = MEDIA_BUS_FMT_UYVY8_1X16;
+
+ if (info->color_formats & DRM_COLOR_FORMAT_YCRCB444)
+ output_fmts[i++] = MEDIA_BUS_FMT_YUV8_1X24;
+
+ /* Default 8bit RGB fallback */
+ output_fmts[i++] = MEDIA_BUS_FMT_RGB888_1X24;
+
+ *num_output_fmts = i;
+
+ return output_fmts;
+}
+
+/*
+ * Possible input formats :
+ * - MEDIA_BUS_FMT_RGB888_1X24
+ * - MEDIA_BUS_FMT_YUV8_1X24
+ * - MEDIA_BUS_FMT_UYVY8_1X16
+ * - MEDIA_BUS_FMT_UYYVYY8_0_5X24
+ * - MEDIA_BUS_FMT_RGB101010_1X30
+ * - MEDIA_BUS_FMT_YUV10_1X30
+ * - MEDIA_BUS_FMT_UYVY10_1X20
+ * - MEDIA_BUS_FMT_UYYVYY10_0_5X30
+ * - MEDIA_BUS_FMT_RGB121212_1X36
+ * - MEDIA_BUS_FMT_YUV12_1X36
+ * - MEDIA_BUS_FMT_UYVY12_1X24
+ * - MEDIA_BUS_FMT_UYYVYY12_0_5X36
+ * - MEDIA_BUS_FMT_RGB161616_1X48
+ * - MEDIA_BUS_FMT_YUV16_1X48
+ * - MEDIA_BUS_FMT_UYYVYY16_0_5X48
+ */
+
+/* Can return a maximum of 3 possible input formats for an output format */
+#define MAX_INPUT_SEL_FORMATS 3
+
+static u32 *dw_hdmi_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ u32 *input_fmts;
+ unsigned int i = 0;
+
+ *num_input_fmts = 0;
+
+ input_fmts = kcalloc(MAX_INPUT_SEL_FORMATS, sizeof(*input_fmts),
+ GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+
+ switch (output_fmt) {
+ /* If MEDIA_BUS_FMT_FIXED is tested, return default bus format */
+ case MEDIA_BUS_FMT_FIXED:
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB888_1X24;
+ break;
+ /* 8bit */
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB888_1X24;
+ input_fmts[i++] = MEDIA_BUS_FMT_YUV8_1X24;
+ input_fmts[i++] = MEDIA_BUS_FMT_UYVY8_1X16;
+ break;
+ case MEDIA_BUS_FMT_YUV8_1X24:
+ input_fmts[i++] = MEDIA_BUS_FMT_YUV8_1X24;
+ input_fmts[i++] = MEDIA_BUS_FMT_UYVY8_1X16;
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB888_1X24;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ input_fmts[i++] = MEDIA_BUS_FMT_UYVY8_1X16;
+ input_fmts[i++] = MEDIA_BUS_FMT_YUV8_1X24;
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB888_1X24;
+ break;
+
+ /* 10bit */
+ case MEDIA_BUS_FMT_RGB101010_1X30:
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB101010_1X30;
+ input_fmts[i++] = MEDIA_BUS_FMT_YUV10_1X30;
+ input_fmts[i++] = MEDIA_BUS_FMT_UYVY10_1X20;
+ break;
+ case MEDIA_BUS_FMT_YUV10_1X30:
+ input_fmts[i++] = MEDIA_BUS_FMT_YUV10_1X30;
+ input_fmts[i++] = MEDIA_BUS_FMT_UYVY10_1X20;
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB101010_1X30;
+ break;
+ case MEDIA_BUS_FMT_UYVY10_1X20:
+ input_fmts[i++] = MEDIA_BUS_FMT_UYVY10_1X20;
+ input_fmts[i++] = MEDIA_BUS_FMT_YUV10_1X30;
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB101010_1X30;
+ break;
+
+ /* 12bit */
+ case MEDIA_BUS_FMT_RGB121212_1X36:
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB121212_1X36;
+ input_fmts[i++] = MEDIA_BUS_FMT_YUV12_1X36;
+ input_fmts[i++] = MEDIA_BUS_FMT_UYVY12_1X24;
+ break;
+ case MEDIA_BUS_FMT_YUV12_1X36:
+ input_fmts[i++] = MEDIA_BUS_FMT_YUV12_1X36;
+ input_fmts[i++] = MEDIA_BUS_FMT_UYVY12_1X24;
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB121212_1X36;
+ break;
+ case MEDIA_BUS_FMT_UYVY12_1X24:
+ input_fmts[i++] = MEDIA_BUS_FMT_UYVY12_1X24;
+ input_fmts[i++] = MEDIA_BUS_FMT_YUV12_1X36;
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB121212_1X36;
+ break;
+
+ /* 16bit */
+ case MEDIA_BUS_FMT_RGB161616_1X48:
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB161616_1X48;
+ input_fmts[i++] = MEDIA_BUS_FMT_YUV16_1X48;
+ break;
+ case MEDIA_BUS_FMT_YUV16_1X48:
+ input_fmts[i++] = MEDIA_BUS_FMT_YUV16_1X48;
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB161616_1X48;
+ break;
+
+ /*YUV 4:2:0 */
+ case MEDIA_BUS_FMT_UYYVYY8_0_5X24:
+ case MEDIA_BUS_FMT_UYYVYY10_0_5X30:
+ case MEDIA_BUS_FMT_UYYVYY12_0_5X36:
+ case MEDIA_BUS_FMT_UYYVYY16_0_5X48:
+ input_fmts[i++] = output_fmt;
+ break;
+ }
+
+ *num_input_fmts = i;
+
+ if (*num_input_fmts == 0) {
+ kfree(input_fmts);
+ input_fmts = NULL;
+ }
+
+ return input_fmts;
+}
+
+static int dw_hdmi_bridge_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct dw_hdmi *hdmi = bridge->driver_private;
+
+ hdmi->hdmi_data.enc_out_bus_format =
+ bridge_state->output_bus_cfg.format;
+
+ hdmi->hdmi_data.enc_in_bus_format =
+ bridge_state->input_bus_cfg.format;
+
+ dev_dbg(hdmi->dev, "input format 0x%04x, output format 0x%04x\n",
+ bridge_state->input_bus_cfg.format,
+ bridge_state->output_bus_cfg.format);
+
+ return 0;
+}
+
+static int dw_hdmi_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct dw_hdmi *hdmi = bridge->driver_private;
struct drm_encoder *encoder = bridge->encoder;
@@ -2379,6 +2669,11 @@ static int dw_hdmi_bridge_attach(struct drm_bridge *bridge)
struct cec_connector_info conn_info;
struct cec_notifier *notifier;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
connector->interlace_allowed = 1;
connector->polled = DRM_CONNECTOR_POLL_HPD;
@@ -2389,6 +2684,14 @@ static int dw_hdmi_bridge_attach(struct drm_bridge *bridge)
DRM_MODE_CONNECTOR_HDMIA,
hdmi->ddc);
+ /*
+ * drm_connector_attach_max_bpc_property() requires the
+ * connector to have a state.
+ */
+ drm_atomic_helper_connector_reset(connector);
+
+ drm_connector_attach_max_bpc_property(connector, 8, 16);
+
if (hdmi->version >= 0x200a && hdmi->plat_data->use_drm_infoframe)
drm_object_attach_property(&connector->base,
connector->dev->mode_config.hdr_output_metadata_property, 0);
@@ -2473,8 +2776,14 @@ static void dw_hdmi_bridge_enable(struct drm_bridge *bridge)
}
static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = {
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
.attach = dw_hdmi_bridge_attach,
.detach = dw_hdmi_bridge_detach,
+ .atomic_check = dw_hdmi_bridge_atomic_check,
+ .atomic_get_output_bus_fmts = dw_hdmi_bridge_atomic_get_output_bus_fmts,
+ .atomic_get_input_bus_fmts = dw_hdmi_bridge_atomic_get_input_bus_fmts,
.enable = dw_hdmi_bridge_enable,
.disable = dw_hdmi_bridge_disable,
.mode_set = dw_hdmi_bridge_mode_set,
@@ -2943,6 +3252,12 @@ __dw_hdmi_probe(struct platform_device *pdev,
hdmi->bridge.of_node = pdev->dev.of_node;
#endif
+ if (hdmi->version >= 0x200a)
+ hdmi->connector.ycbcr_420_allowed =
+ hdmi->plat_data->ycbcr_420_allowed;
+ else
+ hdmi->connector.ycbcr_420_allowed = false;
+
memset(&pdevinfo, 0, sizeof(pdevinfo));
pdevinfo.parent = dev;
pdevinfo.id = PLATFORM_DEVID_AUTO;
@@ -3076,7 +3391,7 @@ struct dw_hdmi *dw_hdmi_bind(struct platform_device *pdev,
if (IS_ERR(hdmi))
return hdmi;
- ret = drm_bridge_attach(encoder, &hdmi->bridge, NULL);
+ ret = drm_bridge_attach(encoder, &hdmi->bridge, NULL, 0);
if (ret) {
dw_hdmi_remove(hdmi);
DRM_ERROR("Failed to initialize bridge with drm\n");
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index b18351b6760a..5ef0f154aa7b 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -824,7 +824,8 @@ static void dw_mipi_dsi_bridge_post_disable(struct drm_bridge *bridge)
* This needs to be fixed in the drm_bridge framework and the API
* needs to be updated to manage our own call chains...
*/
- dsi->panel_bridge->funcs->post_disable(dsi->panel_bridge);
+ if (dsi->panel_bridge->funcs->post_disable)
+ dsi->panel_bridge->funcs->post_disable(dsi->panel_bridge);
if (phy_ops->power_off)
phy_ops->power_off(dsi->plat_data->priv_data);
@@ -935,7 +936,8 @@ dw_mipi_dsi_bridge_mode_valid(struct drm_bridge *bridge,
return mode_status;
}
-static int dw_mipi_dsi_bridge_attach(struct drm_bridge *bridge)
+static int dw_mipi_dsi_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
@@ -948,7 +950,8 @@ static int dw_mipi_dsi_bridge_attach(struct drm_bridge *bridge)
bridge->encoder->encoder_type = DRM_MODE_ENCODER_DSI;
/* Attach the panel-bridge to the dsi bridge */
- return drm_bridge_attach(bridge->encoder, dsi->panel_bridge, bridge);
+ return drm_bridge_attach(bridge->encoder, dsi->panel_bridge, bridge,
+ flags);
}
static const struct drm_bridge_funcs dw_mipi_dsi_bridge_funcs = {
@@ -1119,7 +1122,7 @@ int dw_mipi_dsi_bind(struct dw_mipi_dsi *dsi, struct drm_encoder *encoder)
{
int ret;
- ret = drm_bridge_attach(encoder, &dsi->bridge, NULL);
+ ret = drm_bridge_attach(encoder, &dsi->bridge, NULL, 0);
if (ret) {
DRM_ERROR("Failed to initialize bridge with drm\n");
return ret;
diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c
index 96207fcfde19..5ac1430fab04 100644
--- a/drivers/gpu/drm/bridge/tc358764.c
+++ b/drivers/gpu/drm/bridge/tc358764.c
@@ -349,12 +349,18 @@ static void tc358764_enable(struct drm_bridge *bridge)
dev_err(ctx->dev, "error enabling panel (%d)\n", ret);
}
-static int tc358764_attach(struct drm_bridge *bridge)
+static int tc358764_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct tc358764 *ctx = bridge_to_tc358764(bridge);
struct drm_device *drm = bridge->dev;
int ret;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
ctx->connector.polled = DRM_CONNECTOR_POLL_HPD;
ret = drm_connector_init(drm, &ctx->connector,
&tc358764_connector_funcs,
@@ -369,7 +375,6 @@ static int tc358764_attach(struct drm_bridge *bridge)
drm_connector_attach_encoder(&ctx->connector, bridge->encoder);
drm_panel_attach(ctx->panel, &ctx->connector);
ctx->connector.funcs->reset(&ctx->connector);
- drm_fb_helper_add_one_connector(drm->fb_helper, &ctx->connector);
drm_connector_register(&ctx->connector);
return 0;
@@ -378,10 +383,8 @@ static int tc358764_attach(struct drm_bridge *bridge)
static void tc358764_detach(struct drm_bridge *bridge)
{
struct tc358764 *ctx = bridge_to_tc358764(bridge);
- struct drm_device *drm = bridge->dev;
drm_connector_unregister(&ctx->connector);
- drm_fb_helper_remove_one_connector(drm->fb_helper, &ctx->connector);
drm_panel_detach(ctx->panel);
ctx->panel = NULL;
drm_connector_put(&ctx->connector);
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 3709e5ace724..e4c0ea03ae3a 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -31,6 +31,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
/* Registers */
@@ -297,7 +298,7 @@ static inline int tc_poll_timeout(struct tc_data *tc, unsigned int addr,
static int tc_aux_wait_busy(struct tc_data *tc)
{
- return tc_poll_timeout(tc, DP0_AUXSTATUS, AUX_BUSY, 0, 1000, 100000);
+ return tc_poll_timeout(tc, DP0_AUXSTATUS, AUX_BUSY, 0, 100, 100000);
}
static int tc_aux_write_data(struct tc_data *tc, const void *data,
@@ -640,7 +641,7 @@ static int tc_aux_link_setup(struct tc_data *tc)
if (ret)
goto err;
- ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 1, 1000);
+ ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 100, 100000);
if (ret == -ETIMEDOUT) {
dev_err(tc->dev, "Timeout waiting for PHY to become ready");
return ret;
@@ -876,7 +877,7 @@ static int tc_wait_link_training(struct tc_data *tc)
int ret;
ret = tc_poll_timeout(tc, DP0_LTSTAT, LT_LOOPDONE,
- LT_LOOPDONE, 1, 1000);
+ LT_LOOPDONE, 500, 100000);
if (ret) {
dev_err(tc->dev, "Link training timeout waiting for LT_LOOPDONE!\n");
return ret;
@@ -949,7 +950,7 @@ static int tc_main_link_enable(struct tc_data *tc)
dp_phy_ctrl &= ~(DP_PHY_RST | PHY_M1_RST | PHY_M0_RST);
ret = regmap_write(tc->regmap, DP_PHY_CTRL, dp_phy_ctrl);
- ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 1, 1000);
+ ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 500, 100000);
if (ret) {
dev_err(dev, "timeout waiting for phy become ready");
return ret;
@@ -1403,13 +1404,19 @@ static const struct drm_connector_funcs tc_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int tc_bridge_attach(struct drm_bridge *bridge)
+static int tc_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
struct tc_data *tc = bridge_to_tc(bridge);
struct drm_device *drm = bridge->dev;
int ret;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
/* Create DP/eDP connector */
drm_connector_helper_add(&tc->connector, &tc_connector_helper_funcs);
ret = drm_connector_init(drm, &tc->connector, &tc_connector_funcs,
diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c
new file mode 100644
index 000000000000..1b39e8d37834
--- /dev/null
+++ b/drivers/gpu/drm/bridge/tc358768.c
@@ -0,0 +1,1046 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Peter Ujfalusi <[email protected]>
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <video/mipi_display.h>
+#include <video/videomode.h>
+
+/* Global (16-bit addressable) */
+#define TC358768_CHIPID 0x0000
+#define TC358768_SYSCTL 0x0002
+#define TC358768_CONFCTL 0x0004
+#define TC358768_VSDLY 0x0006
+#define TC358768_DATAFMT 0x0008
+#define TC358768_GPIOEN 0x000E
+#define TC358768_GPIODIR 0x0010
+#define TC358768_GPIOIN 0x0012
+#define TC358768_GPIOOUT 0x0014
+#define TC358768_PLLCTL0 0x0016
+#define TC358768_PLLCTL1 0x0018
+#define TC358768_CMDBYTE 0x0022
+#define TC358768_PP_MISC 0x0032
+#define TC358768_DSITX_DT 0x0050
+#define TC358768_FIFOSTATUS 0x00F8
+
+/* Debug (16-bit addressable) */
+#define TC358768_VBUFCTRL 0x00E0
+#define TC358768_DBG_WIDTH 0x00E2
+#define TC358768_DBG_VBLANK 0x00E4
+#define TC358768_DBG_DATA 0x00E8
+
+/* TX PHY (32-bit addressable) */
+#define TC358768_CLW_DPHYCONTTX 0x0100
+#define TC358768_D0W_DPHYCONTTX 0x0104
+#define TC358768_D1W_DPHYCONTTX 0x0108
+#define TC358768_D2W_DPHYCONTTX 0x010C
+#define TC358768_D3W_DPHYCONTTX 0x0110
+#define TC358768_CLW_CNTRL 0x0140
+#define TC358768_D0W_CNTRL 0x0144
+#define TC358768_D1W_CNTRL 0x0148
+#define TC358768_D2W_CNTRL 0x014C
+#define TC358768_D3W_CNTRL 0x0150
+
+/* TX PPI (32-bit addressable) */
+#define TC358768_STARTCNTRL 0x0204
+#define TC358768_DSITXSTATUS 0x0208
+#define TC358768_LINEINITCNT 0x0210
+#define TC358768_LPTXTIMECNT 0x0214
+#define TC358768_TCLK_HEADERCNT 0x0218
+#define TC358768_TCLK_TRAILCNT 0x021C
+#define TC358768_THS_HEADERCNT 0x0220
+#define TC358768_TWAKEUP 0x0224
+#define TC358768_TCLK_POSTCNT 0x0228
+#define TC358768_THS_TRAILCNT 0x022C
+#define TC358768_HSTXVREGCNT 0x0230
+#define TC358768_HSTXVREGEN 0x0234
+#define TC358768_TXOPTIONCNTRL 0x0238
+#define TC358768_BTACNTRL1 0x023C
+
+/* TX CTRL (32-bit addressable) */
+#define TC358768_DSI_CONTROL 0x040C
+#define TC358768_DSI_STATUS 0x0410
+#define TC358768_DSI_INT 0x0414
+#define TC358768_DSI_INT_ENA 0x0418
+#define TC358768_DSICMD_RDFIFO 0x0430
+#define TC358768_DSI_ACKERR 0x0434
+#define TC358768_DSI_ACKERR_INTENA 0x0438
+#define TC358768_DSI_ACKERR_HALT 0x043c
+#define TC358768_DSI_RXERR 0x0440
+#define TC358768_DSI_RXERR_INTENA 0x0444
+#define TC358768_DSI_RXERR_HALT 0x0448
+#define TC358768_DSI_ERR 0x044C
+#define TC358768_DSI_ERR_INTENA 0x0450
+#define TC358768_DSI_ERR_HALT 0x0454
+#define TC358768_DSI_CONFW 0x0500
+#define TC358768_DSI_LPCMD 0x0500
+#define TC358768_DSI_RESET 0x0504
+#define TC358768_DSI_INT_CLR 0x050C
+#define TC358768_DSI_START 0x0518
+
+/* DSITX CTRL (16-bit addressable) */
+#define TC358768_DSICMD_TX 0x0600
+#define TC358768_DSICMD_TYPE 0x0602
+#define TC358768_DSICMD_WC 0x0604
+#define TC358768_DSICMD_WD0 0x0610
+#define TC358768_DSICMD_WD1 0x0612
+#define TC358768_DSICMD_WD2 0x0614
+#define TC358768_DSICMD_WD3 0x0616
+#define TC358768_DSI_EVENT 0x0620
+#define TC358768_DSI_VSW 0x0622
+#define TC358768_DSI_VBPR 0x0624
+#define TC358768_DSI_VACT 0x0626
+#define TC358768_DSI_HSW 0x0628
+#define TC358768_DSI_HBPR 0x062A
+#define TC358768_DSI_HACT 0x062C
+
+/* TC358768_DSI_CONTROL (0x040C) register */
+#define TC358768_DSI_CONTROL_DIS_MODE BIT(15)
+#define TC358768_DSI_CONTROL_TXMD BIT(7)
+#define TC358768_DSI_CONTROL_HSCKMD BIT(5)
+#define TC358768_DSI_CONTROL_EOTDIS BIT(0)
+
+/* TC358768_DSI_CONFW (0x0500) register */
+#define TC358768_DSI_CONFW_MODE_SET (5 << 29)
+#define TC358768_DSI_CONFW_MODE_CLR (6 << 29)
+#define TC358768_DSI_CONFW_ADDR_DSI_CONTROL (0x3 << 24)
+
+static const char * const tc358768_supplies[] = {
+ "vddc", "vddmipi", "vddio"
+};
+
+struct tc358768_dsi_output {
+ struct mipi_dsi_device *dev;
+ struct drm_panel *panel;
+ struct drm_bridge *bridge;
+};
+
+struct tc358768_priv {
+ struct device *dev;
+ struct regmap *regmap;
+ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data supplies[ARRAY_SIZE(tc358768_supplies)];
+ struct clk *refclk;
+ int enabled;
+ int error;
+
+ struct mipi_dsi_host dsi_host;
+ struct drm_bridge bridge;
+ struct tc358768_dsi_output output;
+
+ u32 pd_lines; /* number of Parallel Port Input Data Lines */
+ u32 dsi_lanes; /* number of DSI Lanes */
+
+ /* Parameters for PLL programming */
+ u32 fbd; /* PLL feedback divider */
+ u32 prd; /* PLL input divider */
+ u32 frs; /* PLL Freqency range for HSCK (post divider) */
+
+ u32 dsiclk; /* pll_clk / 2 */
+};
+
+static inline struct tc358768_priv *dsi_host_to_tc358768(struct mipi_dsi_host
+ *host)
+{
+ return container_of(host, struct tc358768_priv, dsi_host);
+}
+
+static inline struct tc358768_priv *bridge_to_tc358768(struct drm_bridge
+ *bridge)
+{
+ return container_of(bridge, struct tc358768_priv, bridge);
+}
+
+static int tc358768_clear_error(struct tc358768_priv *priv)
+{
+ int ret = priv->error;
+
+ priv->error = 0;
+ return ret;
+}
+
+static void tc358768_write(struct tc358768_priv *priv, u32 reg, u32 val)
+{
+ size_t count = 2;
+
+ if (priv->error)
+ return;
+
+ /* 16-bit register? */
+ if (reg < 0x100 || reg >= 0x600)
+ count = 1;
+
+ priv->error = regmap_bulk_write(priv->regmap, reg, &val, count);
+}
+
+static void tc358768_read(struct tc358768_priv *priv, u32 reg, u32 *val)
+{
+ size_t count = 2;
+
+ if (priv->error)
+ return;
+
+ /* 16-bit register? */
+ if (reg < 0x100 || reg >= 0x600) {
+ *val = 0;
+ count = 1;
+ }
+
+ priv->error = regmap_bulk_read(priv->regmap, reg, val, count);
+}
+
+static void tc358768_update_bits(struct tc358768_priv *priv, u32 reg, u32 mask,
+ u32 val)
+{
+ u32 tmp, orig;
+
+ tc358768_read(priv, reg, &orig);
+ tmp = orig & ~mask;
+ tmp |= val & mask;
+ if (tmp != orig)
+ tc358768_write(priv, reg, tmp);
+}
+
+static int tc358768_sw_reset(struct tc358768_priv *priv)
+{
+ /* Assert Reset */
+ tc358768_write(priv, TC358768_SYSCTL, 1);
+ /* Release Reset, Exit Sleep */
+ tc358768_write(priv, TC358768_SYSCTL, 0);
+
+ return tc358768_clear_error(priv);
+}
+
+static void tc358768_hw_enable(struct tc358768_priv *priv)
+{
+ int ret;
+
+ if (priv->enabled)
+ return;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(priv->supplies), priv->supplies);
+ if (ret < 0)
+ dev_err(priv->dev, "error enabling regulators (%d)\n", ret);
+
+ if (priv->reset_gpio)
+ usleep_range(200, 300);
+
+ /*
+ * The RESX is active low (GPIO_ACTIVE_LOW).
+ * DEASSERT (value = 0) the reset_gpio to enable the chip
+ */
+ gpiod_set_value_cansleep(priv->reset_gpio, 0);
+
+ /* wait for encoder clocks to stabilize */
+ usleep_range(1000, 2000);
+
+ priv->enabled = true;
+}
+
+static void tc358768_hw_disable(struct tc358768_priv *priv)
+{
+ int ret;
+
+ if (!priv->enabled)
+ return;
+
+ /*
+ * The RESX is active low (GPIO_ACTIVE_LOW).
+ * ASSERT (value = 1) the reset_gpio to disable the chip
+ */
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(priv->supplies),
+ priv->supplies);
+ if (ret < 0)
+ dev_err(priv->dev, "error disabling regulators (%d)\n", ret);
+
+ priv->enabled = false;
+}
+
+static u32 tc358768_pll_to_pclk(struct tc358768_priv *priv, u32 pll_clk)
+{
+ return (u32)div_u64((u64)pll_clk * priv->dsi_lanes, priv->pd_lines);
+}
+
+static u32 tc358768_pclk_to_pll(struct tc358768_priv *priv, u32 pclk)
+{
+ return (u32)div_u64((u64)pclk * priv->pd_lines, priv->dsi_lanes);
+}
+
+static int tc358768_calc_pll(struct tc358768_priv *priv,
+ const struct drm_display_mode *mode,
+ bool verify_only)
+{
+ const u32 frs_limits[] = {
+ 1000000000,
+ 500000000,
+ 250000000,
+ 125000000,
+ 62500000
+ };
+ unsigned long refclk;
+ u32 prd, target_pll, i, max_pll, min_pll;
+ u32 frs, best_diff, best_pll, best_prd, best_fbd;
+
+ target_pll = tc358768_pclk_to_pll(priv, mode->clock * 1000);
+
+ /* pll_clk = RefClk * [(FBD + 1)/ (PRD + 1)] * [1 / (2^FRS)] */
+
+ for (i = 0; i < ARRAY_SIZE(frs_limits); i++)
+ if (target_pll >= frs_limits[i])
+ break;
+
+ if (i == ARRAY_SIZE(frs_limits) || i == 0)
+ return -EINVAL;
+
+ frs = i - 1;
+ max_pll = frs_limits[i - 1];
+ min_pll = frs_limits[i];
+
+ refclk = clk_get_rate(priv->refclk);
+
+ best_diff = UINT_MAX;
+ best_pll = 0;
+ best_prd = 0;
+ best_fbd = 0;
+
+ for (prd = 0; prd < 16; ++prd) {
+ u32 divisor = (prd + 1) * (1 << frs);
+ u32 fbd;
+
+ for (fbd = 0; fbd < 512; ++fbd) {
+ u32 pll, diff;
+
+ pll = (u32)div_u64((u64)refclk * (fbd + 1), divisor);
+
+ if (pll >= max_pll || pll < min_pll)
+ continue;
+
+ diff = max(pll, target_pll) - min(pll, target_pll);
+
+ if (diff < best_diff) {
+ best_diff = diff;
+ best_pll = pll;
+ best_prd = prd;
+ best_fbd = fbd;
+
+ if (best_diff == 0)
+ goto found;
+ }
+ }
+ }
+
+ if (best_diff == UINT_MAX) {
+ dev_err(priv->dev, "could not find suitable PLL setup\n");
+ return -EINVAL;
+ }
+
+found:
+ if (verify_only)
+ return 0;
+
+ priv->fbd = best_fbd;
+ priv->prd = best_prd;
+ priv->frs = frs;
+ priv->dsiclk = best_pll / 2;
+
+ return 0;
+}
+
+static int tc358768_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dev)
+{
+ struct tc358768_priv *priv = dsi_host_to_tc358768(host);
+ struct drm_bridge *bridge;
+ struct drm_panel *panel;
+ struct device_node *ep;
+ int ret;
+
+ if (dev->lanes > 4) {
+ dev_err(priv->dev, "unsupported number of data lanes(%u)\n",
+ dev->lanes);
+ return -EINVAL;
+ }
+
+ /*
+ * tc358768 supports both Video and Pulse mode, but the driver only
+ * implements Video (event) mode currently
+ */
+ if (!(dev->mode_flags & MIPI_DSI_MODE_VIDEO)) {
+ dev_err(priv->dev, "Only MIPI_DSI_MODE_VIDEO is supported\n");
+ return -ENOTSUPP;
+ }
+
+ /*
+ * tc358768 supports RGB888, RGB666, RGB666_PACKED and RGB565, but only
+ * RGB888 is verified.
+ */
+ if (dev->format != MIPI_DSI_FMT_RGB888) {
+ dev_warn(priv->dev, "Only MIPI_DSI_FMT_RGB888 tested!\n");
+ return -ENOTSUPP;
+ }
+
+ ret = drm_of_find_panel_or_bridge(host->dev->of_node, 1, 0, &panel,
+ &bridge);
+ if (ret)
+ return ret;
+
+ if (panel) {
+ bridge = drm_panel_bridge_add_typed(panel,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
+ }
+
+ priv->output.dev = dev;
+ priv->output.bridge = bridge;
+ priv->output.panel = panel;
+
+ priv->dsi_lanes = dev->lanes;
+
+ /* get input ep (port0/endpoint0) */
+ ret = -EINVAL;
+ ep = of_graph_get_endpoint_by_regs(host->dev->of_node, 0, 0);
+ if (ep) {
+ ret = of_property_read_u32(ep, "data-lines", &priv->pd_lines);
+
+ of_node_put(ep);
+ }
+
+ if (ret)
+ priv->pd_lines = mipi_dsi_pixel_format_to_bpp(dev->format);
+
+ drm_bridge_add(&priv->bridge);
+
+ return 0;
+}
+
+static int tc358768_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dev)
+{
+ struct tc358768_priv *priv = dsi_host_to_tc358768(host);
+
+ drm_bridge_remove(&priv->bridge);
+ if (priv->output.panel)
+ drm_panel_bridge_remove(priv->output.bridge);
+
+ return 0;
+}
+
+static ssize_t tc358768_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct tc358768_priv *priv = dsi_host_to_tc358768(host);
+ struct mipi_dsi_packet packet;
+ int ret;
+
+ if (!priv->enabled) {
+ dev_err(priv->dev, "Bridge is not enabled\n");
+ return -ENODEV;
+ }
+
+ if (msg->rx_len) {
+ dev_warn(priv->dev, "MIPI rx is not supported\n");
+ return -ENOTSUPP;
+ }
+
+ if (msg->tx_len > 8) {
+ dev_warn(priv->dev, "Maximum 8 byte MIPI tx is supported\n");
+ return -ENOTSUPP;
+ }
+
+ ret = mipi_dsi_create_packet(&packet, msg);
+ if (ret)
+ return ret;
+
+ if (mipi_dsi_packet_format_is_short(msg->type)) {
+ tc358768_write(priv, TC358768_DSICMD_TYPE,
+ (0x10 << 8) | (packet.header[0] & 0x3f));
+ tc358768_write(priv, TC358768_DSICMD_WC, 0);
+ tc358768_write(priv, TC358768_DSICMD_WD0,
+ (packet.header[2] << 8) | packet.header[1]);
+ } else {
+ int i;
+
+ tc358768_write(priv, TC358768_DSICMD_TYPE,
+ (0x40 << 8) | (packet.header[0] & 0x3f));
+ tc358768_write(priv, TC358768_DSICMD_WC, packet.payload_length);
+ for (i = 0; i < packet.payload_length; i += 2) {
+ u16 val = packet.payload[i];
+
+ if (i + 1 < packet.payload_length)
+ val |= packet.payload[i + 1] << 8;
+
+ tc358768_write(priv, TC358768_DSICMD_WD0 + i, val);
+ }
+ }
+
+ /* start transfer */
+ tc358768_write(priv, TC358768_DSICMD_TX, 1);
+
+ ret = tc358768_clear_error(priv);
+ if (ret)
+ dev_warn(priv->dev, "Software disable failed: %d\n", ret);
+ else
+ ret = packet.size;
+
+ return ret;
+}
+
+static const struct mipi_dsi_host_ops tc358768_dsi_host_ops = {
+ .attach = tc358768_dsi_host_attach,
+ .detach = tc358768_dsi_host_detach,
+ .transfer = tc358768_dsi_host_transfer,
+};
+
+static int tc358768_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct tc358768_priv *priv = bridge_to_tc358768(bridge);
+
+ if (!drm_core_check_feature(bridge->dev, DRIVER_ATOMIC)) {
+ dev_err(priv->dev, "needs atomic updates support\n");
+ return -ENOTSUPP;
+ }
+
+ return drm_bridge_attach(bridge->encoder, priv->output.bridge, bridge,
+ flags);
+}
+
+static enum drm_mode_status
+tc358768_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode)
+{
+ struct tc358768_priv *priv = bridge_to_tc358768(bridge);
+
+ if (tc358768_calc_pll(priv, mode, true))
+ return MODE_CLOCK_RANGE;
+
+ return MODE_OK;
+}
+
+static void tc358768_bridge_disable(struct drm_bridge *bridge)
+{
+ struct tc358768_priv *priv = bridge_to_tc358768(bridge);
+ int ret;
+
+ /* set FrmStop */
+ tc358768_update_bits(priv, TC358768_PP_MISC, BIT(15), BIT(15));
+
+ /* wait at least for one frame */
+ msleep(50);
+
+ /* clear PP_en */
+ tc358768_update_bits(priv, TC358768_CONFCTL, BIT(6), 0);
+
+ /* set RstPtr */
+ tc358768_update_bits(priv, TC358768_PP_MISC, BIT(14), BIT(14));
+
+ ret = tc358768_clear_error(priv);
+ if (ret)
+ dev_warn(priv->dev, "Software disable failed: %d\n", ret);
+}
+
+static void tc358768_bridge_post_disable(struct drm_bridge *bridge)
+{
+ struct tc358768_priv *priv = bridge_to_tc358768(bridge);
+
+ tc358768_hw_disable(priv);
+}
+
+static int tc358768_setup_pll(struct tc358768_priv *priv,
+ const struct drm_display_mode *mode)
+{
+ u32 fbd, prd, frs;
+ int ret;
+
+ ret = tc358768_calc_pll(priv, mode, false);
+ if (ret) {
+ dev_err(priv->dev, "PLL calculation failed: %d\n", ret);
+ return ret;
+ }
+
+ fbd = priv->fbd;
+ prd = priv->prd;
+ frs = priv->frs;
+
+ dev_dbg(priv->dev, "PLL: refclk %lu, fbd %u, prd %u, frs %u\n",
+ clk_get_rate(priv->refclk), fbd, prd, frs);
+ dev_dbg(priv->dev, "PLL: pll_clk: %u, DSIClk %u, DSIByteClk %u\n",
+ priv->dsiclk * 2, priv->dsiclk, priv->dsiclk / 4);
+ dev_dbg(priv->dev, "PLL: pclk %u (panel: %u)\n",
+ tc358768_pll_to_pclk(priv, priv->dsiclk * 2),
+ mode->clock * 1000);
+
+ /* PRD[15:12] FBD[8:0] */
+ tc358768_write(priv, TC358768_PLLCTL0, (prd << 12) | fbd);
+
+ /* FRS[11:10] LBWS[9:8] CKEN[4] RESETB[1] EN[0] */
+ tc358768_write(priv, TC358768_PLLCTL1,
+ (frs << 10) | (0x2 << 8) | BIT(1) | BIT(0));
+
+ /* wait for lock */
+ usleep_range(1000, 2000);
+
+ /* FRS[11:10] LBWS[9:8] CKEN[4] PLL_CKEN[4] RESETB[1] EN[0] */
+ tc358768_write(priv, TC358768_PLLCTL1,
+ (frs << 10) | (0x2 << 8) | BIT(4) | BIT(1) | BIT(0));
+
+ return tc358768_clear_error(priv);
+}
+
+#define TC358768_PRECISION 1000
+static u32 tc358768_ns_to_cnt(u32 ns, u32 period_nsk)
+{
+ return (ns * TC358768_PRECISION + period_nsk) / period_nsk;
+}
+
+static u32 tc358768_to_ns(u32 nsk)
+{
+ return (nsk / TC358768_PRECISION);
+}
+
+static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+{
+ struct tc358768_priv *priv = bridge_to_tc358768(bridge);
+ struct mipi_dsi_device *dsi_dev = priv->output.dev;
+ u32 val, val2, lptxcnt, hact, data_type;
+ const struct drm_display_mode *mode;
+ u32 dsibclk_nsk, dsiclk_nsk, ui_nsk, phy_delay_nsk;
+ u32 dsiclk, dsibclk;
+ int ret, i;
+
+ tc358768_hw_enable(priv);
+
+ ret = tc358768_sw_reset(priv);
+ if (ret) {
+ dev_err(priv->dev, "Software reset failed: %d\n", ret);
+ tc358768_hw_disable(priv);
+ return;
+ }
+
+ mode = &bridge->encoder->crtc->state->adjusted_mode;
+ ret = tc358768_setup_pll(priv, mode);
+ if (ret) {
+ dev_err(priv->dev, "PLL setup failed: %d\n", ret);
+ tc358768_hw_disable(priv);
+ return;
+ }
+
+ dsiclk = priv->dsiclk;
+ dsibclk = dsiclk / 4;
+
+ /* Data Format Control Register */
+ val = BIT(2) | BIT(1) | BIT(0); /* rdswap_en | dsitx_en | txdt_en */
+ switch (dsi_dev->format) {
+ case MIPI_DSI_FMT_RGB888:
+ val |= (0x3 << 4);
+ hact = mode->hdisplay * 3;
+ data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24;
+ break;
+ case MIPI_DSI_FMT_RGB666:
+ val |= (0x4 << 4);
+ hact = mode->hdisplay * 3;
+ data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18;
+ break;
+
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ val |= (0x4 << 4) | BIT(3);
+ hact = mode->hdisplay * 18 / 8;
+ data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
+ break;
+
+ case MIPI_DSI_FMT_RGB565:
+ val |= (0x5 << 4);
+ hact = mode->hdisplay * 2;
+ data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16;
+ break;
+ default:
+ dev_err(priv->dev, "Invalid data format (%u)\n",
+ dsi_dev->format);
+ tc358768_hw_disable(priv);
+ return;
+ }
+
+ /* VSDly[9:0] */
+ tc358768_write(priv, TC358768_VSDLY, 1);
+
+ tc358768_write(priv, TC358768_DATAFMT, val);
+ tc358768_write(priv, TC358768_DSITX_DT, data_type);
+
+ /* Enable D-PHY (HiZ->LP11) */
+ tc358768_write(priv, TC358768_CLW_CNTRL, 0x0000);
+ /* Enable lanes */
+ for (i = 0; i < dsi_dev->lanes; i++)
+ tc358768_write(priv, TC358768_D0W_CNTRL + i * 4, 0x0000);
+
+ /* DSI Timings */
+ dsibclk_nsk = (u32)div_u64((u64)1000000000 * TC358768_PRECISION,
+ dsibclk);
+ dsiclk_nsk = (u32)div_u64((u64)1000000000 * TC358768_PRECISION, dsiclk);
+ ui_nsk = dsiclk_nsk / 2;
+ phy_delay_nsk = dsibclk_nsk + 2 * dsiclk_nsk;
+ dev_dbg(priv->dev, "dsiclk_nsk: %u\n", dsiclk_nsk);
+ dev_dbg(priv->dev, "ui_nsk: %u\n", ui_nsk);
+ dev_dbg(priv->dev, "dsibclk_nsk: %u\n", dsibclk_nsk);
+ dev_dbg(priv->dev, "phy_delay_nsk: %u\n", phy_delay_nsk);
+
+ /* LP11 > 100us for D-PHY Rx Init */
+ val = tc358768_ns_to_cnt(100 * 1000, dsibclk_nsk) - 1;
+ dev_dbg(priv->dev, "LINEINITCNT: 0x%x\n", val);
+ tc358768_write(priv, TC358768_LINEINITCNT, val);
+
+ /* LPTimeCnt > 50ns */
+ val = tc358768_ns_to_cnt(50, dsibclk_nsk) - 1;
+ lptxcnt = val;
+ dev_dbg(priv->dev, "LPTXTIMECNT: 0x%x\n", val);
+ tc358768_write(priv, TC358768_LPTXTIMECNT, val);
+
+ /* 38ns < TCLK_PREPARE < 95ns */
+ val = tc358768_ns_to_cnt(65, dsibclk_nsk) - 1;
+ /* TCLK_PREPARE > 300ns */
+ val2 = tc358768_ns_to_cnt(300 + tc358768_to_ns(3 * ui_nsk),
+ dsibclk_nsk);
+ val |= (val2 - tc358768_to_ns(phy_delay_nsk - dsibclk_nsk)) << 8;
+ dev_dbg(priv->dev, "TCLK_HEADERCNT: 0x%x\n", val);
+ tc358768_write(priv, TC358768_TCLK_HEADERCNT, val);
+
+ /* TCLK_TRAIL > 60ns + 3*UI */
+ val = 60 + tc358768_to_ns(3 * ui_nsk);
+ val = tc358768_ns_to_cnt(val, dsibclk_nsk) - 5;
+ dev_dbg(priv->dev, "TCLK_TRAILCNT: 0x%x\n", val);
+ tc358768_write(priv, TC358768_TCLK_TRAILCNT, val);
+
+ /* 40ns + 4*UI < THS_PREPARE < 85ns + 6*UI */
+ val = 50 + tc358768_to_ns(4 * ui_nsk);
+ val = tc358768_ns_to_cnt(val, dsibclk_nsk) - 1;
+ /* THS_ZERO > 145ns + 10*UI */
+ val2 = tc358768_ns_to_cnt(145 - tc358768_to_ns(ui_nsk), dsibclk_nsk);
+ val |= (val2 - tc358768_to_ns(phy_delay_nsk)) << 8;
+ dev_dbg(priv->dev, "THS_HEADERCNT: 0x%x\n", val);
+ tc358768_write(priv, TC358768_THS_HEADERCNT, val);
+
+ /* TWAKEUP > 1ms in lptxcnt steps */
+ val = tc358768_ns_to_cnt(1020000, dsibclk_nsk);
+ val = val / (lptxcnt + 1) - 1;
+ dev_dbg(priv->dev, "TWAKEUP: 0x%x\n", val);
+ tc358768_write(priv, TC358768_TWAKEUP, val);
+
+ /* TCLK_POSTCNT > 60ns + 52*UI */
+ val = tc358768_ns_to_cnt(60 + tc358768_to_ns(52 * ui_nsk),
+ dsibclk_nsk) - 3;
+ dev_dbg(priv->dev, "TCLK_POSTCNT: 0x%x\n", val);
+ tc358768_write(priv, TC358768_TCLK_POSTCNT, val);
+
+ /* 60ns + 4*UI < THS_PREPARE < 105ns + 12*UI */
+ val = tc358768_ns_to_cnt(60 + tc358768_to_ns(15 * ui_nsk),
+ dsibclk_nsk) - 5;
+ dev_dbg(priv->dev, "THS_TRAILCNT: 0x%x\n", val);
+ tc358768_write(priv, TC358768_THS_TRAILCNT, val);
+
+ val = BIT(0);
+ for (i = 0; i < dsi_dev->lanes; i++)
+ val |= BIT(i + 1);
+ tc358768_write(priv, TC358768_HSTXVREGEN, val);
+
+ if (!(dsi_dev->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
+ tc358768_write(priv, TC358768_TXOPTIONCNTRL, 0x1);
+
+ /* TXTAGOCNT[26:16] RXTASURECNT[10:0] */
+ val = tc358768_to_ns((lptxcnt + 1) * dsibclk_nsk * 4);
+ val = tc358768_ns_to_cnt(val, dsibclk_nsk) - 1;
+ val2 = tc358768_ns_to_cnt(tc358768_to_ns((lptxcnt + 1) * dsibclk_nsk),
+ dsibclk_nsk) - 2;
+ val |= val2 << 16;
+ dev_dbg(priv->dev, "BTACNTRL1: 0x%x\n", val);
+ tc358768_write(priv, TC358768_BTACNTRL1, val);
+
+ /* START[0] */
+ tc358768_write(priv, TC358768_STARTCNTRL, 1);
+
+ /* Set event mode */
+ tc358768_write(priv, TC358768_DSI_EVENT, 1);
+
+ /* vsw (+ vbp) */
+ tc358768_write(priv, TC358768_DSI_VSW,
+ mode->vtotal - mode->vsync_start);
+ /* vbp (not used in event mode) */
+ tc358768_write(priv, TC358768_DSI_VBPR, 0);
+ /* vact */
+ tc358768_write(priv, TC358768_DSI_VACT, mode->vdisplay);
+
+ /* (hsw + hbp) * byteclk * ndl / pclk */
+ val = (u32)div_u64((mode->htotal - mode->hsync_start) *
+ ((u64)priv->dsiclk / 4) * priv->dsi_lanes,
+ mode->clock * 1000);
+ tc358768_write(priv, TC358768_DSI_HSW, val);
+ /* hbp (not used in event mode) */
+ tc358768_write(priv, TC358768_DSI_HBPR, 0);
+ /* hact (bytes) */
+ tc358768_write(priv, TC358768_DSI_HACT, hact);
+
+ /* VSYNC polarity */
+ if (!(mode->flags & DRM_MODE_FLAG_NVSYNC))
+ tc358768_update_bits(priv, TC358768_CONFCTL, BIT(5), BIT(5));
+ /* HSYNC polarity */
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ tc358768_update_bits(priv, TC358768_PP_MISC, BIT(0), BIT(0));
+
+ /* Start DSI Tx */
+ tc358768_write(priv, TC358768_DSI_START, 0x1);
+
+ /* Configure DSI_Control register */
+ val = TC358768_DSI_CONFW_MODE_CLR | TC358768_DSI_CONFW_ADDR_DSI_CONTROL;
+ val |= TC358768_DSI_CONTROL_TXMD | TC358768_DSI_CONTROL_HSCKMD |
+ 0x3 << 1 | TC358768_DSI_CONTROL_EOTDIS;
+ tc358768_write(priv, TC358768_DSI_CONFW, val);
+
+ val = TC358768_DSI_CONFW_MODE_SET | TC358768_DSI_CONFW_ADDR_DSI_CONTROL;
+ val |= (dsi_dev->lanes - 1) << 1;
+
+ if (!(dsi_dev->mode_flags & MIPI_DSI_MODE_LPM))
+ val |= TC358768_DSI_CONTROL_TXMD;
+
+ if (!(dsi_dev->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
+ val |= TC358768_DSI_CONTROL_HSCKMD;
+
+ if (dsi_dev->mode_flags & MIPI_DSI_MODE_EOT_PACKET)
+ val |= TC358768_DSI_CONTROL_EOTDIS;
+
+ tc358768_write(priv, TC358768_DSI_CONFW, val);
+
+ val = TC358768_DSI_CONFW_MODE_CLR | TC358768_DSI_CONFW_ADDR_DSI_CONTROL;
+ val |= TC358768_DSI_CONTROL_DIS_MODE; /* DSI mode */
+ tc358768_write(priv, TC358768_DSI_CONFW, val);
+
+ ret = tc358768_clear_error(priv);
+ if (ret) {
+ dev_err(priv->dev, "Bridge pre_enable failed: %d\n", ret);
+ tc358768_bridge_disable(bridge);
+ tc358768_bridge_post_disable(bridge);
+ }
+}
+
+static void tc358768_bridge_enable(struct drm_bridge *bridge)
+{
+ struct tc358768_priv *priv = bridge_to_tc358768(bridge);
+ int ret;
+
+ if (!priv->enabled) {
+ dev_err(priv->dev, "Bridge is not enabled\n");
+ return;
+ }
+
+ /* clear FrmStop and RstPtr */
+ tc358768_update_bits(priv, TC358768_PP_MISC, 0x3 << 14, 0);
+
+ /* set PP_en */
+ tc358768_update_bits(priv, TC358768_CONFCTL, BIT(6), BIT(6));
+
+ ret = tc358768_clear_error(priv);
+ if (ret) {
+ dev_err(priv->dev, "Bridge enable failed: %d\n", ret);
+ tc358768_bridge_disable(bridge);
+ tc358768_bridge_post_disable(bridge);
+ }
+}
+
+static const struct drm_bridge_funcs tc358768_bridge_funcs = {
+ .attach = tc358768_bridge_attach,
+ .mode_valid = tc358768_bridge_mode_valid,
+ .pre_enable = tc358768_bridge_pre_enable,
+ .enable = tc358768_bridge_enable,
+ .disable = tc358768_bridge_disable,
+ .post_disable = tc358768_bridge_post_disable,
+};
+
+static const struct drm_bridge_timings default_tc358768_timings = {
+ .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE
+ | DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE
+ | DRM_BUS_FLAG_DE_HIGH,
+};
+
+static bool tc358768_is_reserved_reg(unsigned int reg)
+{
+ switch (reg) {
+ case 0x114 ... 0x13f:
+ case 0x200:
+ case 0x20c:
+ case 0x400 ... 0x408:
+ case 0x41c ... 0x42f:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool tc358768_writeable_reg(struct device *dev, unsigned int reg)
+{
+ if (tc358768_is_reserved_reg(reg))
+ return false;
+
+ switch (reg) {
+ case TC358768_CHIPID:
+ case TC358768_FIFOSTATUS:
+ case TC358768_DSITXSTATUS ... (TC358768_DSITXSTATUS + 2):
+ case TC358768_DSI_CONTROL ... (TC358768_DSI_INT_ENA + 2):
+ case TC358768_DSICMD_RDFIFO ... (TC358768_DSI_ERR_HALT + 2):
+ return false;
+ default:
+ return true;
+ }
+}
+
+static bool tc358768_readable_reg(struct device *dev, unsigned int reg)
+{
+ if (tc358768_is_reserved_reg(reg))
+ return false;
+
+ switch (reg) {
+ case TC358768_STARTCNTRL:
+ case TC358768_DSI_CONFW ... (TC358768_DSI_CONFW + 2):
+ case TC358768_DSI_INT_CLR ... (TC358768_DSI_INT_CLR + 2):
+ case TC358768_DSI_START ... (TC358768_DSI_START + 2):
+ case TC358768_DBG_DATA:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static const struct regmap_config tc358768_regmap_config = {
+ .name = "tc358768",
+ .reg_bits = 16,
+ .val_bits = 16,
+ .max_register = TC358768_DSI_HACT,
+ .cache_type = REGCACHE_NONE,
+ .writeable_reg = tc358768_writeable_reg,
+ .readable_reg = tc358768_readable_reg,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+};
+
+static const struct i2c_device_id tc358768_i2c_ids[] = {
+ { "tc358768", 0 },
+ { "tc358778", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tc358768_i2c_ids);
+
+static const struct of_device_id tc358768_of_ids[] = {
+ { .compatible = "toshiba,tc358768", },
+ { .compatible = "toshiba,tc358778", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tc358768_of_ids);
+
+static int tc358768_get_regulators(struct tc358768_priv *priv)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(priv->supplies); ++i)
+ priv->supplies[i].supply = tc358768_supplies[i];
+
+ ret = devm_regulator_bulk_get(priv->dev, ARRAY_SIZE(priv->supplies),
+ priv->supplies);
+ if (ret < 0)
+ dev_err(priv->dev, "failed to get regulators: %d\n", ret);
+
+ return ret;
+}
+
+static int tc358768_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tc358768_priv *priv;
+ struct device *dev = &client->dev;
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ if (!np)
+ return -ENODEV;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, priv);
+ priv->dev = dev;
+
+ ret = tc358768_get_regulators(priv);
+ if (ret)
+ return ret;
+
+ priv->refclk = devm_clk_get(dev, "refclk");
+ if (IS_ERR(priv->refclk))
+ return PTR_ERR(priv->refclk);
+
+ /*
+ * RESX is low active, to disable tc358768 initially (keep in reset)
+ * the gpio line must be LOW. This is the ASSERTED state of
+ * GPIO_ACTIVE_LOW (GPIOD_OUT_HIGH == ASSERTED).
+ */
+ priv->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->reset_gpio))
+ return PTR_ERR(priv->reset_gpio);
+
+ priv->regmap = devm_regmap_init_i2c(client, &tc358768_regmap_config);
+ if (IS_ERR(priv->regmap)) {
+ dev_err(dev, "Failed to init regmap\n");
+ return PTR_ERR(priv->regmap);
+ }
+
+ priv->dsi_host.dev = dev;
+ priv->dsi_host.ops = &tc358768_dsi_host_ops;
+
+ priv->bridge.funcs = &tc358768_bridge_funcs;
+ priv->bridge.timings = &default_tc358768_timings;
+ priv->bridge.of_node = np;
+
+ i2c_set_clientdata(client, priv);
+
+ return mipi_dsi_host_register(&priv->dsi_host);
+}
+
+static int tc358768_i2c_remove(struct i2c_client *client)
+{
+ struct tc358768_priv *priv = i2c_get_clientdata(client);
+
+ mipi_dsi_host_unregister(&priv->dsi_host);
+
+ return 0;
+}
+
+static struct i2c_driver tc358768_driver = {
+ .driver = {
+ .name = "tc358768",
+ .of_match_table = tc358768_of_ids,
+ },
+ .id_table = tc358768_i2c_ids,
+ .probe = tc358768_i2c_probe,
+ .remove = tc358768_i2c_remove,
+};
+module_i2c_driver(tc358768_driver);
+
+MODULE_AUTHOR("Peter Ujfalusi <[email protected]>");
+MODULE_DESCRIPTION("TC358768AXBG/TC358778XBG DSI bridge");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/thc63lvd1024.c b/drivers/gpu/drm/bridge/thc63lvd1024.c
index 3d74129b2995..97d8129760e9 100644
--- a/drivers/gpu/drm/bridge/thc63lvd1024.c
+++ b/drivers/gpu/drm/bridge/thc63lvd1024.c
@@ -42,11 +42,12 @@ static inline struct thc63_dev *to_thc63(struct drm_bridge *bridge)
return container_of(bridge, struct thc63_dev, bridge);
}
-static int thc63_attach(struct drm_bridge *bridge)
+static int thc63_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct thc63_dev *thc63 = to_thc63(bridge);
- return drm_bridge_attach(bridge->encoder, thc63->next, bridge);
+ return drm_bridge_attach(bridge->encoder, thc63->next, bridge, flags);
}
static enum drm_mode_status thc63_mode_valid(struct drm_bridge *bridge,
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 9a2dd986afa5..6ad688b320ae 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -51,6 +51,7 @@
#define SN_ENH_FRAME_REG 0x5A
#define VSTREAM_ENABLE BIT(3)
#define SN_DATA_FORMAT_REG 0x5B
+#define BPP_18_RGB BIT(0)
#define SN_HPD_DISABLE_REG 0x5C
#define HPD_DISABLE BIT(0)
#define SN_AUX_WDATA_REG(x) (0x64 + (x))
@@ -100,6 +101,7 @@ struct ti_sn_bridge {
struct drm_panel *panel;
struct gpio_desc *enable_gpio;
struct regulator_bulk_data supplies[SN_REGULATOR_SUPPLY_NUM];
+ int dp_lanes;
};
static const struct regmap_range ti_sn_bridge_volatile_ranges[] = {
@@ -264,7 +266,8 @@ static int ti_sn_bridge_parse_regulators(struct ti_sn_bridge *pdata)
pdata->supplies);
}
-static int ti_sn_bridge_attach(struct drm_bridge *bridge)
+static int ti_sn_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
int ret, val;
struct ti_sn_bridge *pdata = bridge_to_ti_sn_bridge(bridge);
@@ -275,6 +278,11 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge)
.node = NULL,
};
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
ret = drm_connector_init(bridge->dev, &pdata->connector,
&ti_sn_bridge_connector_funcs,
DRM_MODE_CONNECTOR_eDP);
@@ -312,7 +320,7 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge)
goto err_dsi_host;
}
- /* TODO: setting to 4 lanes always for now */
+ /* TODO: setting to 4 MIPI lanes always for now */
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO;
@@ -417,6 +425,32 @@ static void ti_sn_bridge_set_refclk_freq(struct ti_sn_bridge *pdata)
REFCLK_FREQ(i));
}
+static void ti_sn_bridge_set_dsi_rate(struct ti_sn_bridge *pdata)
+{
+ unsigned int bit_rate_mhz, clk_freq_mhz;
+ unsigned int val;
+ struct drm_display_mode *mode =
+ &pdata->bridge.encoder->crtc->state->adjusted_mode;
+
+ /* set DSIA clk frequency */
+ bit_rate_mhz = (mode->clock / 1000) *
+ mipi_dsi_pixel_format_to_bpp(pdata->dsi->format);
+ clk_freq_mhz = bit_rate_mhz / (pdata->dsi->lanes * 2);
+
+ /* for each increment in val, frequency increases by 5MHz */
+ val = (MIN_DSI_CLK_FREQ_MHZ / 5) +
+ (((clk_freq_mhz - MIN_DSI_CLK_FREQ_MHZ) / 5) & 0xFF);
+ regmap_write(pdata->regmap, SN_DSIA_CLK_FREQ_REG, val);
+}
+
+static unsigned int ti_sn_bridge_get_bpp(struct ti_sn_bridge *pdata)
+{
+ if (pdata->connector.display_info.bpc <= 6)
+ return 18;
+ else
+ return 24;
+}
+
/**
* LUT index corresponds to register value and
* LUT values corresponds to dp data rate supported
@@ -426,32 +460,106 @@ static const unsigned int ti_sn_bridge_dp_rate_lut[] = {
0, 1620, 2160, 2430, 2700, 3240, 4320, 5400
};
-static void ti_sn_bridge_set_dsi_dp_rate(struct ti_sn_bridge *pdata)
+static int ti_sn_bridge_calc_min_dp_rate_idx(struct ti_sn_bridge *pdata)
{
- unsigned int bit_rate_mhz, clk_freq_mhz, dp_rate_mhz;
- unsigned int val, i;
+ unsigned int bit_rate_khz, dp_rate_mhz;
+ unsigned int i;
struct drm_display_mode *mode =
&pdata->bridge.encoder->crtc->state->adjusted_mode;
- /* set DSIA clk frequency */
- bit_rate_mhz = (mode->clock / 1000) *
- mipi_dsi_pixel_format_to_bpp(pdata->dsi->format);
- clk_freq_mhz = bit_rate_mhz / (pdata->dsi->lanes * 2);
+ /* Calculate minimum bit rate based on our pixel clock. */
+ bit_rate_khz = mode->clock * ti_sn_bridge_get_bpp(pdata);
- /* for each increment in val, frequency increases by 5MHz */
- val = (MIN_DSI_CLK_FREQ_MHZ / 5) +
- (((clk_freq_mhz - MIN_DSI_CLK_FREQ_MHZ) / 5) & 0xFF);
- regmap_write(pdata->regmap, SN_DSIA_CLK_FREQ_REG, val);
+ /* Calculate minimum DP data rate, taking 80% as per DP spec */
+ dp_rate_mhz = DIV_ROUND_UP(bit_rate_khz * DP_CLK_FUDGE_NUM,
+ 1000 * pdata->dp_lanes * DP_CLK_FUDGE_DEN);
- /* set DP data rate */
- dp_rate_mhz = ((bit_rate_mhz / pdata->dsi->lanes) * DP_CLK_FUDGE_NUM) /
- DP_CLK_FUDGE_DEN;
- for (i = 0; i < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut) - 1; i++)
+ for (i = 1; i < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut) - 1; i++)
if (ti_sn_bridge_dp_rate_lut[i] > dp_rate_mhz)
break;
- regmap_update_bits(pdata->regmap, SN_DATARATE_CONFIG_REG,
- DP_DATARATE_MASK, DP_DATARATE(i));
+ return i;
+}
+
+static void ti_sn_bridge_read_valid_rates(struct ti_sn_bridge *pdata,
+ bool rate_valid[])
+{
+ unsigned int rate_per_200khz;
+ unsigned int rate_mhz;
+ u8 dpcd_val;
+ int ret;
+ int i, j;
+
+ ret = drm_dp_dpcd_readb(&pdata->aux, DP_EDP_DPCD_REV, &dpcd_val);
+ if (ret != 1) {
+ DRM_DEV_ERROR(pdata->dev,
+ "Can't read eDP rev (%d), assuming 1.1\n", ret);
+ dpcd_val = DP_EDP_11;
+ }
+
+ if (dpcd_val >= DP_EDP_14) {
+ /* eDP 1.4 devices must provide a custom table */
+ __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
+
+ ret = drm_dp_dpcd_read(&pdata->aux, DP_SUPPORTED_LINK_RATES,
+ sink_rates, sizeof(sink_rates));
+
+ if (ret != sizeof(sink_rates)) {
+ DRM_DEV_ERROR(pdata->dev,
+ "Can't read supported rate table (%d)\n", ret);
+
+ /* By zeroing we'll fall back to DP_MAX_LINK_RATE. */
+ memset(sink_rates, 0, sizeof(sink_rates));
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
+ rate_per_200khz = le16_to_cpu(sink_rates[i]);
+
+ if (!rate_per_200khz)
+ break;
+
+ rate_mhz = rate_per_200khz * 200 / 1000;
+ for (j = 0;
+ j < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut);
+ j++) {
+ if (ti_sn_bridge_dp_rate_lut[j] == rate_mhz)
+ rate_valid[j] = true;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut); i++) {
+ if (rate_valid[i])
+ return;
+ }
+ DRM_DEV_ERROR(pdata->dev,
+ "No matching eDP rates in table; falling back\n");
+ }
+
+ /* On older versions best we can do is use DP_MAX_LINK_RATE */
+ ret = drm_dp_dpcd_readb(&pdata->aux, DP_MAX_LINK_RATE, &dpcd_val);
+ if (ret != 1) {
+ DRM_DEV_ERROR(pdata->dev,
+ "Can't read max rate (%d); assuming 5.4 GHz\n",
+ ret);
+ dpcd_val = DP_LINK_BW_5_4;
+ }
+
+ switch (dpcd_val) {
+ default:
+ DRM_DEV_ERROR(pdata->dev,
+ "Unexpected max rate (%#x); assuming 5.4 GHz\n",
+ (int)dpcd_val);
+ /* fall through */
+ case DP_LINK_BW_5_4:
+ rate_valid[7] = 1;
+ /* fall through */
+ case DP_LINK_BW_2_7:
+ rate_valid[4] = 1;
+ /* fall through */
+ case DP_LINK_BW_1_62:
+ rate_valid[1] = 1;
+ break;
+ }
}
static void ti_sn_bridge_set_video_timings(struct ti_sn_bridge *pdata)
@@ -493,24 +601,30 @@ static void ti_sn_bridge_set_video_timings(struct ti_sn_bridge *pdata)
usleep_range(10000, 10500); /* 10ms delay recommended by spec */
}
-static void ti_sn_bridge_enable(struct drm_bridge *bridge)
+static unsigned int ti_sn_get_max_lanes(struct ti_sn_bridge *pdata)
{
- struct ti_sn_bridge *pdata = bridge_to_ti_sn_bridge(bridge);
- unsigned int val;
+ u8 data;
int ret;
- /* DSI_A lane config */
- val = CHA_DSI_LANES(4 - pdata->dsi->lanes);
- regmap_update_bits(pdata->regmap, SN_DSI_LANES_REG,
- CHA_DSI_LANES_MASK, val);
+ ret = drm_dp_dpcd_readb(&pdata->aux, DP_MAX_LANE_COUNT, &data);
+ if (ret != 1) {
+ DRM_DEV_ERROR(pdata->dev,
+ "Can't read lane count (%d); assuming 4\n", ret);
+ return 4;
+ }
- /* DP lane config */
- val = DP_NUM_LANES(pdata->dsi->lanes - 1);
- regmap_update_bits(pdata->regmap, SN_SSC_CONFIG_REG, DP_NUM_LANES_MASK,
- val);
+ return data & DP_LANE_COUNT_MASK;
+}
- /* set dsi/dp clk frequency value */
- ti_sn_bridge_set_dsi_dp_rate(pdata);
+static int ti_sn_link_training(struct ti_sn_bridge *pdata, int dp_rate_idx,
+ const char **last_err_str)
+{
+ unsigned int val;
+ int ret;
+
+ /* set dp clk frequency value */
+ regmap_update_bits(pdata->regmap, SN_DATARATE_CONFIG_REG,
+ DP_DATARATE_MASK, DP_DATARATE(dp_rate_idx));
/* enable DP PLL */
regmap_write(pdata->regmap, SN_PLL_ENABLE_REG, 1);
@@ -519,10 +633,62 @@ static void ti_sn_bridge_enable(struct drm_bridge *bridge)
val & DPPLL_SRC_DP_PLL_LOCK, 1000,
50 * 1000);
if (ret) {
- DRM_ERROR("DP_PLL_LOCK polling failed (%d)\n", ret);
- return;
+ *last_err_str = "DP_PLL_LOCK polling failed";
+ goto exit;
+ }
+
+ /* Semi auto link training mode */
+ regmap_write(pdata->regmap, SN_ML_TX_MODE_REG, 0x0A);
+ ret = regmap_read_poll_timeout(pdata->regmap, SN_ML_TX_MODE_REG, val,
+ val == ML_TX_MAIN_LINK_OFF ||
+ val == ML_TX_NORMAL_MODE, 1000,
+ 500 * 1000);
+ if (ret) {
+ *last_err_str = "Training complete polling failed";
+ } else if (val == ML_TX_MAIN_LINK_OFF) {
+ *last_err_str = "Link training failed, link is off";
+ ret = -EIO;
}
+exit:
+ /* Disable the PLL if we failed */
+ if (ret)
+ regmap_write(pdata->regmap, SN_PLL_ENABLE_REG, 0);
+
+ return ret;
+}
+
+static void ti_sn_bridge_enable(struct drm_bridge *bridge)
+{
+ struct ti_sn_bridge *pdata = bridge_to_ti_sn_bridge(bridge);
+ bool rate_valid[ARRAY_SIZE(ti_sn_bridge_dp_rate_lut)] = { };
+ const char *last_err_str = "No supported DP rate";
+ int dp_rate_idx;
+ unsigned int val;
+ int ret = -EINVAL;
+
+ /*
+ * Run with the maximum number of lanes that the DP sink supports.
+ *
+ * Depending use cases, we might want to revisit this later because:
+ * - It's plausible that someone may have run fewer lines to the
+ * sink than the sink actually supports, assuming that the lines
+ * will just be driven at a higher rate.
+ * - The DP spec seems to indicate that it's more important to minimize
+ * the number of lanes than the link rate.
+ *
+ * If we do revisit, it would be important to measure the power impact.
+ */
+ pdata->dp_lanes = ti_sn_get_max_lanes(pdata);
+
+ /* DSI_A lane config */
+ val = CHA_DSI_LANES(4 - pdata->dsi->lanes);
+ regmap_update_bits(pdata->regmap, SN_DSI_LANES_REG,
+ CHA_DSI_LANES_MASK, val);
+
+ /* set dsi clk frequency value */
+ ti_sn_bridge_set_dsi_rate(pdata);
+
/**
* The SN65DSI86 only supports ASSR Display Authentication method and
* this method is enabled by default. An eDP panel must support this
@@ -532,17 +698,30 @@ static void ti_sn_bridge_enable(struct drm_bridge *bridge)
drm_dp_dpcd_writeb(&pdata->aux, DP_EDP_CONFIGURATION_SET,
DP_ALTERNATE_SCRAMBLER_RESET_ENABLE);
- /* Semi auto link training mode */
- regmap_write(pdata->regmap, SN_ML_TX_MODE_REG, 0x0A);
- ret = regmap_read_poll_timeout(pdata->regmap, SN_ML_TX_MODE_REG, val,
- val == ML_TX_MAIN_LINK_OFF ||
- val == ML_TX_NORMAL_MODE, 1000,
- 500 * 1000);
+ /* Set the DP output format (18 bpp or 24 bpp) */
+ val = (ti_sn_bridge_get_bpp(pdata) == 18) ? BPP_18_RGB : 0;
+ regmap_update_bits(pdata->regmap, SN_DATA_FORMAT_REG, BPP_18_RGB, val);
+
+ /* DP lane config */
+ val = DP_NUM_LANES(min(pdata->dp_lanes, 3));
+ regmap_update_bits(pdata->regmap, SN_SSC_CONFIG_REG, DP_NUM_LANES_MASK,
+ val);
+
+ ti_sn_bridge_read_valid_rates(pdata, rate_valid);
+
+ /* Train until we run out of rates */
+ for (dp_rate_idx = ti_sn_bridge_calc_min_dp_rate_idx(pdata);
+ dp_rate_idx < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut);
+ dp_rate_idx++) {
+ if (!rate_valid[dp_rate_idx])
+ continue;
+
+ ret = ti_sn_link_training(pdata, dp_rate_idx, &last_err_str);
+ if (!ret)
+ break;
+ }
if (ret) {
- DRM_ERROR("Training complete polling failed (%d)\n", ret);
- return;
- } else if (val == ML_TX_MAIN_LINK_OFF) {
- DRM_ERROR("Link training failed, link is off\n");
+ DRM_DEV_ERROR(pdata->dev, "%s (%d)\n", last_err_str, ret);
return;
}
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index 6f6d6d1e60ae..e3eb6364c0f7 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -4,14 +4,12 @@
* Author: Jyri Sarha <[email protected]>
*/
-#include <linux/delay.h>
-#include <linux/fwnode.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
-#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
+#include <linux/workqueue.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
@@ -24,16 +22,13 @@
struct tfp410 {
struct drm_bridge bridge;
struct drm_connector connector;
- unsigned int connector_type;
u32 bus_format;
- struct i2c_adapter *ddc;
- struct gpio_desc *hpd;
- int hpd_irq;
struct delayed_work hpd_work;
struct gpio_desc *powerdown;
struct drm_bridge_timings timings;
+ struct drm_bridge *next_bridge;
struct device *dev;
};
@@ -56,13 +51,18 @@ static int tfp410_get_modes(struct drm_connector *connector)
struct edid *edid;
int ret;
- if (!dvi->ddc)
- goto fallback;
+ edid = drm_bridge_get_edid(dvi->next_bridge, connector);
+ if (IS_ERR_OR_NULL(edid)) {
+ if (edid != ERR_PTR(-ENOTSUPP))
+ DRM_INFO("EDID read failed. Fallback to standard modes\n");
- edid = drm_get_edid(connector, dvi->ddc);
- if (!edid) {
- DRM_INFO("EDID read failed. Fallback to standard modes\n");
- goto fallback;
+ /*
+ * No EDID, fallback on the XGA standard modes and prefer a mode
+ * pretty much anything can handle.
+ */
+ ret = drm_add_modes_noedid(connector, 1920, 1200);
+ drm_set_preferred_mode(connector, 1024, 768);
+ return ret;
}
drm_connector_update_edid_property(connector, edid);
@@ -72,15 +72,6 @@ static int tfp410_get_modes(struct drm_connector *connector)
kfree(edid);
return ret;
-
-fallback:
- /* No EDID, fallback on the XGA standard modes */
- ret = drm_add_modes_noedid(connector, 1920, 1200);
-
- /* And prefer a mode pretty much anything can handle */
- drm_set_preferred_mode(connector, 1024, 768);
-
- return ret;
}
static const struct drm_connector_helper_funcs tfp410_con_helper_funcs = {
@@ -92,21 +83,7 @@ tfp410_connector_detect(struct drm_connector *connector, bool force)
{
struct tfp410 *dvi = drm_connector_to_tfp410(connector);
- if (dvi->hpd) {
- if (gpiod_get_value_cansleep(dvi->hpd))
- return connector_status_connected;
- else
- return connector_status_disconnected;
- }
-
- if (dvi->ddc) {
- if (drm_probe_ddc(dvi->ddc))
- return connector_status_connected;
- else
- return connector_status_disconnected;
- }
-
- return connector_status_unknown;
+ return drm_bridge_detect(dvi->next_bridge);
}
static const struct drm_connector_funcs tfp410_con_funcs = {
@@ -118,41 +95,84 @@ static const struct drm_connector_funcs tfp410_con_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int tfp410_attach(struct drm_bridge *bridge)
+static void tfp410_hpd_work_func(struct work_struct *work)
+{
+ struct tfp410 *dvi;
+
+ dvi = container_of(work, struct tfp410, hpd_work.work);
+
+ if (dvi->bridge.dev)
+ drm_helper_hpd_irq_event(dvi->bridge.dev);
+}
+
+static void tfp410_hpd_callback(void *arg, enum drm_connector_status status)
+{
+ struct tfp410 *dvi = arg;
+
+ mod_delayed_work(system_wq, &dvi->hpd_work,
+ msecs_to_jiffies(HOTPLUG_DEBOUNCE_MS));
+}
+
+static int tfp410_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct tfp410 *dvi = drm_bridge_to_tfp410(bridge);
int ret;
+ ret = drm_bridge_attach(bridge->encoder, dvi->next_bridge, bridge,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret < 0)
+ return ret;
+
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
+ return 0;
+
if (!bridge->encoder) {
dev_err(dvi->dev, "Missing encoder\n");
return -ENODEV;
}
- if (dvi->hpd_irq >= 0)
+ if (dvi->next_bridge->ops & DRM_BRIDGE_OP_DETECT)
dvi->connector.polled = DRM_CONNECTOR_POLL_HPD;
else
dvi->connector.polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+ if (dvi->next_bridge->ops & DRM_BRIDGE_OP_HPD) {
+ INIT_DELAYED_WORK(&dvi->hpd_work, tfp410_hpd_work_func);
+ drm_bridge_hpd_enable(dvi->next_bridge, tfp410_hpd_callback,
+ dvi);
+ }
+
drm_connector_helper_add(&dvi->connector,
&tfp410_con_helper_funcs);
ret = drm_connector_init_with_ddc(bridge->dev, &dvi->connector,
&tfp410_con_funcs,
- dvi->connector_type,
- dvi->ddc);
+ dvi->next_bridge->type,
+ dvi->next_bridge->ddc);
if (ret) {
- dev_err(dvi->dev, "drm_connector_init() failed: %d\n", ret);
+ dev_err(dvi->dev, "drm_connector_init_with_ddc() failed: %d\n",
+ ret);
return ret;
}
drm_display_info_set_bus_formats(&dvi->connector.display_info,
&dvi->bus_format, 1);
- drm_connector_attach_encoder(&dvi->connector,
- bridge->encoder);
+ drm_connector_attach_encoder(&dvi->connector, bridge->encoder);
return 0;
}
+static void tfp410_detach(struct drm_bridge *bridge)
+{
+ struct tfp410 *dvi = drm_bridge_to_tfp410(bridge);
+
+ if (dvi->connector.dev && dvi->next_bridge->ops & DRM_BRIDGE_OP_HPD) {
+ drm_bridge_hpd_disable(dvi->next_bridge);
+ cancel_delayed_work_sync(&dvi->hpd_work);
+ }
+}
+
static void tfp410_enable(struct drm_bridge *bridge)
{
struct tfp410 *dvi = drm_bridge_to_tfp410(bridge);
@@ -167,31 +187,25 @@ static void tfp410_disable(struct drm_bridge *bridge)
gpiod_set_value_cansleep(dvi->powerdown, 1);
}
-static const struct drm_bridge_funcs tfp410_bridge_funcs = {
- .attach = tfp410_attach,
- .enable = tfp410_enable,
- .disable = tfp410_disable,
-};
-
-static void tfp410_hpd_work_func(struct work_struct *work)
+static enum drm_mode_status tfp410_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode)
{
- struct tfp410 *dvi;
+ if (mode->clock < 25000)
+ return MODE_CLOCK_LOW;
- dvi = container_of(work, struct tfp410, hpd_work.work);
+ if (mode->clock > 165000)
+ return MODE_CLOCK_HIGH;
- if (dvi->bridge.dev)
- drm_helper_hpd_irq_event(dvi->bridge.dev);
+ return MODE_OK;
}
-static irqreturn_t tfp410_hpd_irq_thread(int irq, void *arg)
-{
- struct tfp410 *dvi = arg;
-
- mod_delayed_work(system_wq, &dvi->hpd_work,
- msecs_to_jiffies(HOTPLUG_DEBOUNCE_MS));
-
- return IRQ_HANDLED;
-}
+static const struct drm_bridge_funcs tfp410_bridge_funcs = {
+ .attach = tfp410_attach,
+ .detach = tfp410_detach,
+ .enable = tfp410_enable,
+ .disable = tfp410_disable,
+ .mode_valid = tfp410_mode_valid,
+};
static const struct drm_bridge_timings tfp410_default_timings = {
.input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE
@@ -270,51 +284,9 @@ static int tfp410_parse_timings(struct tfp410 *dvi, bool i2c)
return 0;
}
-static int tfp410_get_connector_properties(struct tfp410 *dvi)
-{
- struct device_node *connector_node, *ddc_phandle;
- int ret = 0;
-
- /* port@1 is the connector node */
- connector_node = of_graph_get_remote_node(dvi->dev->of_node, 1, -1);
- if (!connector_node)
- return -ENODEV;
-
- if (of_device_is_compatible(connector_node, "hdmi-connector"))
- dvi->connector_type = DRM_MODE_CONNECTOR_HDMIA;
- else
- dvi->connector_type = DRM_MODE_CONNECTOR_DVID;
-
- dvi->hpd = fwnode_gpiod_get_index(&connector_node->fwnode,
- "hpd", 0, GPIOD_IN, "hpd");
- if (IS_ERR(dvi->hpd)) {
- ret = PTR_ERR(dvi->hpd);
- dvi->hpd = NULL;
- if (ret == -ENOENT)
- ret = 0;
- else
- goto fail;
- }
-
- ddc_phandle = of_parse_phandle(connector_node, "ddc-i2c-bus", 0);
- if (!ddc_phandle)
- goto fail;
-
- dvi->ddc = of_get_i2c_adapter_by_node(ddc_phandle);
- if (dvi->ddc)
- dev_info(dvi->dev, "Connector's ddc i2c bus found\n");
- else
- ret = -EPROBE_DEFER;
-
- of_node_put(ddc_phandle);
-
-fail:
- of_node_put(connector_node);
- return ret;
-}
-
static int tfp410_init(struct device *dev, bool i2c)
{
+ struct device_node *node;
struct tfp410 *dvi;
int ret;
@@ -326,21 +298,31 @@ static int tfp410_init(struct device *dev, bool i2c)
dvi = devm_kzalloc(dev, sizeof(*dvi), GFP_KERNEL);
if (!dvi)
return -ENOMEM;
+
+ dvi->dev = dev;
dev_set_drvdata(dev, dvi);
dvi->bridge.funcs = &tfp410_bridge_funcs;
dvi->bridge.of_node = dev->of_node;
dvi->bridge.timings = &dvi->timings;
- dvi->dev = dev;
+ dvi->bridge.type = DRM_MODE_CONNECTOR_DVID;
ret = tfp410_parse_timings(dvi, i2c);
if (ret)
- goto fail;
+ return ret;
- ret = tfp410_get_connector_properties(dvi);
- if (ret)
- goto fail;
+ /* Get the next bridge, connected to port@1. */
+ node = of_graph_get_remote_node(dev->of_node, 1, -1);
+ if (!node)
+ return -ENODEV;
+
+ dvi->next_bridge = of_drm_find_bridge(node);
+ of_node_put(node);
+ if (!dvi->next_bridge)
+ return -EPROBE_DEFER;
+
+ /* Get the powerdown GPIO. */
dvi->powerdown = devm_gpiod_get_optional(dev, "powerdown",
GPIOD_OUT_HIGH);
if (IS_ERR(dvi->powerdown)) {
@@ -348,48 +330,18 @@ static int tfp410_init(struct device *dev, bool i2c)
return PTR_ERR(dvi->powerdown);
}
- if (dvi->hpd)
- dvi->hpd_irq = gpiod_to_irq(dvi->hpd);
- else
- dvi->hpd_irq = -ENXIO;
-
- if (dvi->hpd_irq >= 0) {
- INIT_DELAYED_WORK(&dvi->hpd_work, tfp410_hpd_work_func);
-
- ret = devm_request_threaded_irq(dev, dvi->hpd_irq,
- NULL, tfp410_hpd_irq_thread, IRQF_TRIGGER_RISING |
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- "hdmi-hpd", dvi);
- if (ret) {
- DRM_ERROR("failed to register hpd interrupt\n");
- goto fail;
- }
- }
-
+ /* Register the DRM bridge. */
drm_bridge_add(&dvi->bridge);
return 0;
-fail:
- i2c_put_adapter(dvi->ddc);
- if (dvi->hpd)
- gpiod_put(dvi->hpd);
- return ret;
}
static int tfp410_fini(struct device *dev)
{
struct tfp410 *dvi = dev_get_drvdata(dev);
- if (dvi->hpd_irq >= 0)
- cancel_delayed_work_sync(&dvi->hpd_work);
-
drm_bridge_remove(&dvi->bridge);
- if (dvi->ddc)
- i2c_put_adapter(dvi->ddc);
- if (dvi->hpd)
- gpiod_put(dvi->hpd);
-
return 0;
}
diff --git a/drivers/gpu/drm/bridge/ti-tpd12s015.c b/drivers/gpu/drm/bridge/ti-tpd12s015.c
new file mode 100644
index 000000000000..514cbf0eac75
--- /dev/null
+++ b/drivers/gpu/drm/bridge/ti-tpd12s015.c
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TPD12S015 HDMI ESD protection & level shifter chip driver
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated
+ *
+ * Based on the omapdrm-specific encoder-opa362 driver
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated
+ * Author: Tomi Valkeinen <[email protected]>
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_bridge.h>
+
+struct tpd12s015_device {
+ struct drm_bridge bridge;
+
+ struct gpio_desc *ct_cp_hpd_gpio;
+ struct gpio_desc *ls_oe_gpio;
+ struct gpio_desc *hpd_gpio;
+ int hpd_irq;
+
+ struct drm_bridge *next_bridge;
+};
+
+static inline struct tpd12s015_device *to_tpd12s015(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct tpd12s015_device, bridge);
+}
+
+static int tpd12s015_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct tpd12s015_device *tpd = to_tpd12s015(bridge);
+ int ret;
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+ return -EINVAL;
+
+ ret = drm_bridge_attach(bridge->encoder, tpd->next_bridge,
+ bridge, flags);
+ if (ret < 0)
+ return ret;
+
+ gpiod_set_value_cansleep(tpd->ls_oe_gpio, 1);
+
+ /* DC-DC converter needs at max 300us to get to 90% of 5V. */
+ usleep_range(300, 1000);
+
+ return 0;
+}
+
+static void tpd12s015_detach(struct drm_bridge *bridge)
+{
+ struct tpd12s015_device *tpd = to_tpd12s015(bridge);
+
+ gpiod_set_value_cansleep(tpd->ls_oe_gpio, 0);
+}
+
+static enum drm_connector_status tpd12s015_detect(struct drm_bridge *bridge)
+{
+ struct tpd12s015_device *tpd = to_tpd12s015(bridge);
+
+ if (gpiod_get_value_cansleep(tpd->hpd_gpio))
+ return connector_status_connected;
+ else
+ return connector_status_disconnected;
+}
+
+static void tpd12s015_hpd_enable(struct drm_bridge *bridge)
+{
+ struct tpd12s015_device *tpd = to_tpd12s015(bridge);
+
+ gpiod_set_value_cansleep(tpd->ct_cp_hpd_gpio, 1);
+}
+
+static void tpd12s015_hpd_disable(struct drm_bridge *bridge)
+{
+ struct tpd12s015_device *tpd = to_tpd12s015(bridge);
+
+ gpiod_set_value_cansleep(tpd->ct_cp_hpd_gpio, 0);
+}
+
+static const struct drm_bridge_funcs tpd12s015_bridge_funcs = {
+ .attach = tpd12s015_attach,
+ .detach = tpd12s015_detach,
+ .detect = tpd12s015_detect,
+ .hpd_enable = tpd12s015_hpd_enable,
+ .hpd_disable = tpd12s015_hpd_disable,
+};
+
+static irqreturn_t tpd12s015_hpd_isr(int irq, void *data)
+{
+ struct tpd12s015_device *tpd = data;
+ struct drm_bridge *bridge = &tpd->bridge;
+
+ drm_bridge_hpd_notify(bridge, tpd12s015_detect(bridge));
+
+ return IRQ_HANDLED;
+}
+
+static int tpd12s015_probe(struct platform_device *pdev)
+{
+ struct tpd12s015_device *tpd;
+ struct device_node *node;
+ struct gpio_desc *gpio;
+ int ret;
+
+ tpd = devm_kzalloc(&pdev->dev, sizeof(*tpd), GFP_KERNEL);
+ if (!tpd)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, tpd);
+
+ tpd->bridge.funcs = &tpd12s015_bridge_funcs;
+ tpd->bridge.of_node = pdev->dev.of_node;
+ tpd->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
+ tpd->bridge.ops = DRM_BRIDGE_OP_DETECT;
+
+ /* Get the next bridge, connected to port@1. */
+ node = of_graph_get_remote_node(pdev->dev.of_node, 1, -1);
+ if (!node)
+ return -ENODEV;
+
+ tpd->next_bridge = of_drm_find_bridge(node);
+ of_node_put(node);
+
+ if (!tpd->next_bridge)
+ return -EPROBE_DEFER;
+
+ /* Get the control and HPD GPIOs. */
+ gpio = devm_gpiod_get_index_optional(&pdev->dev, NULL, 0,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
+
+ tpd->ct_cp_hpd_gpio = gpio;
+
+ gpio = devm_gpiod_get_index_optional(&pdev->dev, NULL, 1,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
+
+ tpd->ls_oe_gpio = gpio;
+
+ gpio = devm_gpiod_get_index(&pdev->dev, NULL, 2, GPIOD_IN);
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
+
+ tpd->hpd_gpio = gpio;
+
+ /* Register the IRQ if the HPD GPIO is IRQ-capable. */
+ tpd->hpd_irq = gpiod_to_irq(tpd->hpd_gpio);
+ if (tpd->hpd_irq) {
+ ret = devm_request_threaded_irq(&pdev->dev, tpd->hpd_irq, NULL,
+ tpd12s015_hpd_isr,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ "tpd12s015 hpd", tpd);
+ if (ret)
+ return ret;
+
+ tpd->bridge.ops |= DRM_BRIDGE_OP_HPD;
+ }
+
+ /* Register the DRM bridge. */
+ drm_bridge_add(&tpd->bridge);
+
+ return 0;
+}
+
+static int __exit tpd12s015_remove(struct platform_device *pdev)
+{
+ struct tpd12s015_device *tpd = platform_get_drvdata(pdev);
+
+ drm_bridge_remove(&tpd->bridge);
+
+ return 0;
+}
+
+static const struct of_device_id tpd12s015_of_match[] = {
+ { .compatible = "ti,tpd12s015", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, tpd12s015_of_match);
+
+static struct platform_driver tpd12s015_driver = {
+ .probe = tpd12s015_probe,
+ .remove = __exit_p(tpd12s015_remove),
+ .driver = {
+ .name = "tpd12s015",
+ .of_match_table = tpd12s015_of_match,
+ },
+};
+
+module_platform_driver(tpd12s015_driver);
+
+MODULE_AUTHOR("Tomi Valkeinen <[email protected]>");
+MODULE_DESCRIPTION("TPD12S015 HDMI level shifter and ESD protection driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/cirrus/cirrus.c b/drivers/gpu/drm/cirrus/cirrus.c
index 248c9f765c45..d2ff63ce8eaf 100644
--- a/drivers/gpu/drm/cirrus/cirrus.c
+++ b/drivers/gpu/drm/cirrus/cirrus.c
@@ -38,7 +38,6 @@
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
-#include <drm/drm_vblank.h>
#define DRIVER_NAME "cirrus"
#define DRIVER_DESC "qemu cirrus vga"
@@ -152,9 +151,13 @@ static int cirrus_pitch(struct drm_framebuffer *fb)
static void cirrus_set_start_address(struct cirrus_device *cirrus, u32 offset)
{
+ int idx;
u32 addr;
u8 tmp;
+ if (!drm_dev_enter(&cirrus->dev, &idx))
+ return;
+
addr = offset >> 2;
wreg_crt(cirrus, 0x0c, (u8)((addr >> 8) & 0xff));
wreg_crt(cirrus, 0x0d, (u8)(addr & 0xff));
@@ -169,6 +172,8 @@ static void cirrus_set_start_address(struct cirrus_device *cirrus, u32 offset)
tmp &= 0x7f;
tmp |= (addr >> 12) & 0x80;
wreg_crt(cirrus, 0x1d, tmp);
+
+ drm_dev_exit(idx);
}
static int cirrus_mode_set(struct cirrus_device *cirrus,
@@ -177,9 +182,12 @@ static int cirrus_mode_set(struct cirrus_device *cirrus,
{
int hsyncstart, hsyncend, htotal, hdispend;
int vtotal, vdispend;
- int tmp;
+ int tmp, idx;
int sr07 = 0, hdr = 0;
+ if (!drm_dev_enter(&cirrus->dev, &idx))
+ return -1;
+
htotal = mode->htotal / 8;
hsyncend = mode->hsync_end / 8;
hsyncstart = mode->hsync_start / 8;
@@ -265,6 +273,7 @@ static int cirrus_mode_set(struct cirrus_device *cirrus,
hdr = 0xc5;
break;
default:
+ drm_dev_exit(idx);
return -1;
}
@@ -293,6 +302,8 @@ static int cirrus_mode_set(struct cirrus_device *cirrus,
/* Unblank (needed on S3 resume, vgabios doesn't do it then) */
outb(0x20, 0x3c0);
+
+ drm_dev_exit(idx);
return 0;
}
@@ -301,10 +312,16 @@ static int cirrus_fb_blit_rect(struct drm_framebuffer *fb,
{
struct cirrus_device *cirrus = fb->dev->dev_private;
void *vmap;
+ int idx, ret;
+
+ ret = -ENODEV;
+ if (!drm_dev_enter(&cirrus->dev, &idx))
+ goto out;
+ ret = -ENOMEM;
vmap = drm_gem_shmem_vmap(fb->obj[0]);
if (!vmap)
- return -ENOMEM;
+ goto out_dev_exit;
if (cirrus->cpp == fb->format->cpp[0])
drm_fb_memcpy_dstclip(cirrus->vram,
@@ -324,7 +341,12 @@ static int cirrus_fb_blit_rect(struct drm_framebuffer *fb,
WARN_ON_ONCE("cpp mismatch");
drm_gem_shmem_vunmap(fb->obj[0], vmap);
- return 0;
+ ret = 0;
+
+out_dev_exit:
+ drm_dev_exit(idx);
+out:
+ return ret;
}
static int cirrus_fb_blit_fullscreen(struct drm_framebuffer *fb)
@@ -434,13 +456,6 @@ static void cirrus_pipe_update(struct drm_simple_display_pipe *pipe,
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
cirrus_fb_blit_rect(pipe->plane.state->fb, &rect);
-
- if (crtc->state->event) {
- spin_lock_irq(&crtc->dev->event_lock);
- drm_crtc_send_vblank_event(crtc, crtc->state->event);
- crtc->state->event = NULL;
- spin_unlock_irq(&crtc->dev->event_lock);
- }
}
static const struct drm_simple_display_pipe_funcs cirrus_pipe_funcs = {
@@ -510,6 +525,14 @@ static void cirrus_mode_config_init(struct cirrus_device *cirrus)
/* ------------------------------------------------------------------ */
+static void cirrus_release(struct drm_device *dev)
+{
+ struct cirrus_device *cirrus = dev->dev_private;
+
+ drm_mode_config_cleanup(dev);
+ kfree(cirrus);
+}
+
DEFINE_DRM_GEM_FOPS(cirrus_fops);
static struct drm_driver cirrus_driver = {
@@ -523,6 +546,7 @@ static struct drm_driver cirrus_driver = {
.fops = &cirrus_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
+ .release = cirrus_release,
};
static int cirrus_pci_probe(struct pci_dev *pdev,
@@ -606,12 +630,11 @@ static void cirrus_pci_remove(struct pci_dev *pdev)
struct drm_device *dev = pci_get_drvdata(pdev);
struct cirrus_device *cirrus = dev->dev_private;
- drm_dev_unregister(dev);
- drm_mode_config_cleanup(dev);
+ drm_dev_unplug(dev);
+ drm_atomic_helper_shutdown(dev);
iounmap(cirrus->mmio);
iounmap(cirrus->vram);
drm_dev_put(dev);
- kfree(cirrus);
pci_release_regions(pdev);
}
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index d33691512a8e..9ccfbf213d72 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -30,6 +30,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
@@ -1018,6 +1019,122 @@ static void drm_atomic_connector_print_state(struct drm_printer *p,
}
/**
+ * drm_atomic_get_bridge_state - get bridge state
+ * @state: global atomic state object
+ * @bridge: bridge to get state object for
+ *
+ * This function returns the bridge state for the given bridge, allocating it
+ * if needed. It will also grab the relevant bridge lock to make sure that the
+ * state is consistent.
+ *
+ * Returns:
+ *
+ * Either the allocated state or the error code encoded into the pointer. When
+ * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
+ * entire atomic sequence must be restarted.
+ */
+struct drm_bridge_state *
+drm_atomic_get_bridge_state(struct drm_atomic_state *state,
+ struct drm_bridge *bridge)
+{
+ struct drm_private_state *obj_state;
+
+ obj_state = drm_atomic_get_private_obj_state(state, &bridge->base);
+ if (IS_ERR(obj_state))
+ return ERR_CAST(obj_state);
+
+ return drm_priv_to_bridge_state(obj_state);
+}
+EXPORT_SYMBOL(drm_atomic_get_bridge_state);
+
+/**
+ * drm_atomic_get_old_bridge_state - get old bridge state, if it exists
+ * @state: global atomic state object
+ * @bridge: bridge to grab
+ *
+ * This function returns the old bridge state for the given bridge, or NULL if
+ * the bridge is not part of the global atomic state.
+ */
+struct drm_bridge_state *
+drm_atomic_get_old_bridge_state(struct drm_atomic_state *state,
+ struct drm_bridge *bridge)
+{
+ struct drm_private_state *obj_state;
+
+ obj_state = drm_atomic_get_old_private_obj_state(state, &bridge->base);
+ if (!obj_state)
+ return NULL;
+
+ return drm_priv_to_bridge_state(obj_state);
+}
+EXPORT_SYMBOL(drm_atomic_get_old_bridge_state);
+
+/**
+ * drm_atomic_get_new_bridge_state - get new bridge state, if it exists
+ * @state: global atomic state object
+ * @bridge: bridge to grab
+ *
+ * This function returns the new bridge state for the given bridge, or NULL if
+ * the bridge is not part of the global atomic state.
+ */
+struct drm_bridge_state *
+drm_atomic_get_new_bridge_state(struct drm_atomic_state *state,
+ struct drm_bridge *bridge)
+{
+ struct drm_private_state *obj_state;
+
+ obj_state = drm_atomic_get_new_private_obj_state(state, &bridge->base);
+ if (!obj_state)
+ return NULL;
+
+ return drm_priv_to_bridge_state(obj_state);
+}
+EXPORT_SYMBOL(drm_atomic_get_new_bridge_state);
+
+/**
+ * drm_atomic_add_encoder_bridges - add bridges attached to an encoder
+ * @state: atomic state
+ * @encoder: DRM encoder
+ *
+ * This function adds all bridges attached to @encoder. This is needed to add
+ * bridge states to @state and make them available when
+ * &drm_bridge_funcs.atomic_check(), &drm_bridge_funcs.atomic_pre_enable(),
+ * &drm_bridge_funcs.atomic_enable(),
+ * &drm_bridge_funcs.atomic_disable_post_disable() are called.
+ *
+ * Returns:
+ * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
+ * then the w/w mutex code has detected a deadlock and the entire atomic
+ * sequence must be restarted. All other errors are fatal.
+ */
+int
+drm_atomic_add_encoder_bridges(struct drm_atomic_state *state,
+ struct drm_encoder *encoder)
+{
+ struct drm_bridge_state *bridge_state;
+ struct drm_bridge *bridge;
+
+ if (!encoder)
+ return 0;
+
+ DRM_DEBUG_ATOMIC("Adding all bridges for [encoder:%d:%s] to %p\n",
+ encoder->base.id, encoder->name, state);
+
+ drm_for_each_bridge_in_chain(encoder, bridge) {
+ /* Skip bridges that don't implement the atomic state hooks. */
+ if (!bridge->funcs->atomic_duplicate_state)
+ continue;
+
+ bridge_state = drm_atomic_get_bridge_state(state, bridge);
+ if (IS_ERR(bridge_state))
+ return PTR_ERR(bridge_state);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_atomic_add_encoder_bridges);
+
+/**
* drm_atomic_add_affected_connectors - add connectors for CRTC
* @state: atomic state
* @crtc: DRM CRTC
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 4511c2e07bb9..85d163f16801 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -437,12 +437,12 @@ mode_fixup(struct drm_atomic_state *state)
funcs = encoder->helper_private;
bridge = drm_bridge_chain_get_first_bridge(encoder);
- ret = drm_bridge_chain_mode_fixup(bridge,
- &new_crtc_state->mode,
- &new_crtc_state->adjusted_mode);
- if (!ret) {
- DRM_DEBUG_ATOMIC("Bridge fixup failed\n");
- return -EINVAL;
+ ret = drm_atomic_bridge_chain_check(bridge,
+ new_crtc_state,
+ new_conn_state);
+ if (ret) {
+ DRM_DEBUG_ATOMIC("Bridge atomic check failed\n");
+ return ret;
}
if (funcs && funcs->atomic_check) {
@@ -583,6 +583,7 @@ mode_valid(struct drm_atomic_state *state)
* &drm_crtc_state.connectors_changed is set when a connector is added or
* removed from the CRTC. &drm_crtc_state.active_changed is set when
* &drm_crtc_state.active changes, which is used for DPMS.
+ * &drm_crtc_state.no_vblank is set from the result of drm_dev_has_vblank().
* See also: drm_atomic_crtc_needs_modeset()
*
* IMPORTANT:
@@ -649,6 +650,11 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
return -EINVAL;
}
+
+ if (drm_dev_has_vblank(dev))
+ new_crtc_state->no_vblank = false;
+ else
+ new_crtc_state->no_vblank = true;
}
ret = handle_conflicting_encoders(state, false);
@@ -730,6 +736,26 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
return ret;
}
+ /*
+ * Iterate over all connectors again, and add all affected bridges to
+ * the state.
+ */
+ for_each_oldnew_connector_in_state(state, connector,
+ old_connector_state,
+ new_connector_state, i) {
+ struct drm_encoder *encoder;
+
+ encoder = old_connector_state->best_encoder;
+ ret = drm_atomic_add_encoder_bridges(state, encoder);
+ if (ret)
+ return ret;
+
+ encoder = new_connector_state->best_encoder;
+ ret = drm_atomic_add_encoder_bridges(state, encoder);
+ if (ret)
+ return ret;
+ }
+
ret = mode_valid(state);
if (ret)
return ret;
@@ -2215,7 +2241,9 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
* when a job is queued, and any change to the pipeline that does not touch the
* connector is leading to timeouts when calling
* drm_atomic_helper_wait_for_vblanks() or
- * drm_atomic_helper_wait_for_flip_done().
+ * drm_atomic_helper_wait_for_flip_done(). In addition to writeback
+ * connectors, this function can also fake VBLANK events for CRTCs without
+ * VBLANK interrupt.
*
* This is part of the atomic helper support for nonblocking commits, see
* drm_atomic_helper_setup_commit() for an overview.
@@ -3508,3 +3536,44 @@ fail:
return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_legacy_gamma_set);
+
+/**
+ * drm_atomic_helper_bridge_propagate_bus_fmt() - Propagate output format to
+ * the input end of a bridge
+ * @bridge: bridge control structure
+ * @bridge_state: new bridge state
+ * @crtc_state: new CRTC state
+ * @conn_state: new connector state
+ * @output_fmt: tested output bus format
+ * @num_input_fmts: will contain the size of the returned array
+ *
+ * This helper is a pluggable implementation of the
+ * &drm_bridge_funcs.atomic_get_input_bus_fmts operation for bridges that don't
+ * modify the bus configuration between their input and their output. It
+ * returns an array of input formats with a single element set to @output_fmt.
+ *
+ * RETURNS:
+ * a valid format array of size @num_input_fmts, or NULL if the allocation
+ * failed
+ */
+u32 *
+drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ u32 *input_fmts;
+
+ input_fmts = kzalloc(sizeof(*input_fmts), GFP_KERNEL);
+ if (!input_fmts) {
+ *num_input_fmts = 0;
+ return NULL;
+ }
+
+ *num_input_fmts = 1;
+ input_fmts[0] = output_fmt;
+ return input_fmts;
+}
+EXPORT_SYMBOL(drm_atomic_helper_bridge_propagate_bus_fmt);
diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c
index 7cf3cf936547..8fce6a115dfe 100644
--- a/drivers/gpu/drm/drm_atomic_state_helper.c
+++ b/drivers/gpu/drm/drm_atomic_state_helper.c
@@ -26,6 +26,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_connector.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
@@ -551,3 +552,104 @@ void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj
memcpy(state, obj->state, sizeof(*state));
}
EXPORT_SYMBOL(__drm_atomic_helper_private_obj_duplicate_state);
+
+/**
+ * __drm_atomic_helper_bridge_duplicate_state() - Copy atomic bridge state
+ * @bridge: bridge object
+ * @state: atomic bridge state
+ *
+ * Copies atomic state from a bridge's current state and resets inferred values.
+ * This is useful for drivers that subclass the bridge state.
+ */
+void __drm_atomic_helper_bridge_duplicate_state(struct drm_bridge *bridge,
+ struct drm_bridge_state *state)
+{
+ __drm_atomic_helper_private_obj_duplicate_state(&bridge->base,
+ &state->base);
+ state->bridge = bridge;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_bridge_duplicate_state);
+
+/**
+ * drm_atomic_helper_bridge_duplicate_state() - Duplicate a bridge state object
+ * @bridge: bridge object
+ *
+ * Allocates a new bridge state and initializes it with the current bridge
+ * state values. This helper is meant to be used as a bridge
+ * &drm_bridge_funcs.atomic_duplicate_state hook for bridges that don't
+ * subclass the bridge state.
+ */
+struct drm_bridge_state *
+drm_atomic_helper_bridge_duplicate_state(struct drm_bridge *bridge)
+{
+ struct drm_bridge_state *new;
+
+ if (WARN_ON(!bridge->base.state))
+ return NULL;
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (new)
+ __drm_atomic_helper_bridge_duplicate_state(bridge, new);
+
+ return new;
+}
+EXPORT_SYMBOL(drm_atomic_helper_bridge_duplicate_state);
+
+/**
+ * drm_atomic_helper_bridge_destroy_state() - Destroy a bridge state object
+ * @bridge: the bridge this state refers to
+ * @state: bridge state to destroy
+ *
+ * Destroys a bridge state previously created by
+ * &drm_atomic_helper_bridge_reset() or
+ * &drm_atomic_helper_bridge_duplicate_state(). This helper is meant to be
+ * used as a bridge &drm_bridge_funcs.atomic_destroy_state hook for bridges
+ * that don't subclass the bridge state.
+ */
+void drm_atomic_helper_bridge_destroy_state(struct drm_bridge *bridge,
+ struct drm_bridge_state *state)
+{
+ kfree(state);
+}
+EXPORT_SYMBOL(drm_atomic_helper_bridge_destroy_state);
+
+/**
+ * __drm_atomic_helper_bridge_reset() - Initialize a bridge state to its
+ * default
+ * @bridge: the bridge this state refers to
+ * @state: bridge state to initialize
+ *
+ * Initializes the bridge state to default values. This is meant to be called
+ * by the bridge &drm_bridge_funcs.atomic_reset hook for bridges that subclass
+ * the bridge state.
+ */
+void __drm_atomic_helper_bridge_reset(struct drm_bridge *bridge,
+ struct drm_bridge_state *state)
+{
+ memset(state, 0, sizeof(*state));
+ state->bridge = bridge;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_bridge_reset);
+
+/**
+ * drm_atomic_helper_bridge_reset() - Allocate and initialize a bridge state
+ * to its default
+ * @bridge: the bridge this state refers to
+ *
+ * Allocates the bridge state and initializes it to default values. This helper
+ * is meant to be used as a bridge &drm_bridge_funcs.atomic_reset hook for
+ * bridges that don't subclass the bridge state.
+ */
+struct drm_bridge_state *
+drm_atomic_helper_bridge_reset(struct drm_bridge *bridge)
+{
+ struct drm_bridge_state *bridge_state;
+
+ bridge_state = kzalloc(sizeof(*bridge_state), GFP_KERNEL);
+ if (!bridge_state)
+ return ERR_PTR(-ENOMEM);
+
+ __drm_atomic_helper_bridge_reset(bridge, bridge_state);
+ return bridge_state;
+}
+EXPORT_SYMBOL(drm_atomic_helper_bridge_reset);
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index cc9acd986c68..531b876d0ed8 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -153,11 +153,6 @@ static int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv)
return -ENOMEM;
}
- if (dev->driver->master_create) {
- ret = dev->driver->master_create(dev, fpriv->master);
- if (ret)
- goto out_err;
- }
fpriv->is_master = 1;
fpriv->authenticated = 1;
@@ -332,9 +327,6 @@ static void drm_master_destroy(struct kref *kref)
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_lease_destroy(master);
- if (dev->driver->master_destroy)
- dev->driver->master_destroy(dev, master);
-
drm_legacy_master_rmmaps(dev, master);
idr_destroy(&master->magic_map);
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index c2cf0c90fa26..afdec8e5fc68 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
+#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_encoder.h>
@@ -38,26 +39,56 @@
* encoder chain.
*
* A bridge is always attached to a single &drm_encoder at a time, but can be
- * either connected to it directly, or through an intermediate bridge::
+ * either connected to it directly, or through a chain of bridges::
*
- * encoder ---> bridge B ---> bridge A
+ * [ CRTC ---> ] Encoder ---> Bridge A ---> Bridge B
*
- * Here, the output of the encoder feeds to bridge B, and that furthers feeds to
- * bridge A.
+ * Here, the output of the encoder feeds to bridge A, and that furthers feeds to
+ * bridge B. Bridge chains can be arbitrarily long, and shall be fully linear:
+ * Chaining multiple bridges to the output of a bridge, or the same bridge to
+ * the output of different bridges, is not supported.
*
- * The driver using the bridge is responsible to make the associations between
- * the encoder and bridges. Once these links are made, the bridges will
- * participate along with encoder functions to perform mode_set/enable/disable
- * through the ops provided in &drm_bridge_funcs.
+ * Display drivers are responsible for linking encoders with the first bridge
+ * in the chains. This is done by acquiring the appropriate bridge with
+ * of_drm_find_bridge() or drm_of_find_panel_or_bridge(), or creating it for a
+ * panel with drm_panel_bridge_add_typed() (or the managed version
+ * devm_drm_panel_bridge_add_typed()). Once acquired, the bridge shall be
+ * attached to the encoder with a call to drm_bridge_attach().
*
- * drm_bridge, like drm_panel, aren't drm_mode_object entities like planes,
+ * Bridges are responsible for linking themselves with the next bridge in the
+ * chain, if any. This is done the same way as for encoders, with the call to
+ * drm_bridge_attach() occurring in the &drm_bridge_funcs.attach operation.
+ *
+ * Once these links are created, the bridges can participate along with encoder
+ * functions to perform mode validation and fixup (through
+ * drm_bridge_chain_mode_valid() and drm_atomic_bridge_chain_check()), mode
+ * setting (through drm_bridge_chain_mode_set()), enable (through
+ * drm_atomic_bridge_chain_pre_enable() and drm_atomic_bridge_chain_enable())
+ * and disable (through drm_atomic_bridge_chain_disable() and
+ * drm_atomic_bridge_chain_post_disable()). Those functions call the
+ * corresponding operations provided in &drm_bridge_funcs in sequence for all
+ * bridges in the chain.
+ *
+ * For display drivers that use the atomic helpers
+ * drm_atomic_helper_check_modeset(),
+ * drm_atomic_helper_commit_modeset_enables() and
+ * drm_atomic_helper_commit_modeset_disables() (either directly in hand-rolled
+ * commit check and commit tail handlers, or through the higher-level
+ * drm_atomic_helper_check() and drm_atomic_helper_commit_tail() or
+ * drm_atomic_helper_commit_tail_rpm() helpers), this is done transparently and
+ * requires no intervention from the driver. For other drivers, the relevant
+ * DRM bridge chain functions shall be called manually.
+ *
+ * Bridges also participate in implementing the &drm_connector at the end of
+ * the bridge chain. Display drivers may use the drm_bridge_connector_init()
+ * helper to create the &drm_connector, or implement it manually on top of the
+ * connector-related operations exposed by the bridge (see the overview
+ * documentation of bridge operations for more details).
+ *
+ * &drm_bridge, like &drm_panel, aren't &drm_mode_object entities like planes,
* CRTCs, encoders or connectors and hence are not visible to userspace. They
* just provide additional hooks to get the desired output at the end of the
* encoder chain.
- *
- * Bridges can also be chained up using the &drm_bridge.chain_node field.
- *
- * Both legacy CRTC helpers and the new atomic modeset helpers support bridges.
*/
static DEFINE_MUTEX(bridge_lock);
@@ -70,6 +101,8 @@ static LIST_HEAD(bridge_list);
*/
void drm_bridge_add(struct drm_bridge *bridge)
{
+ mutex_init(&bridge->hpd_mutex);
+
mutex_lock(&bridge_lock);
list_add_tail(&bridge->list, &bridge_list);
mutex_unlock(&bridge_lock);
@@ -86,15 +119,43 @@ void drm_bridge_remove(struct drm_bridge *bridge)
mutex_lock(&bridge_lock);
list_del_init(&bridge->list);
mutex_unlock(&bridge_lock);
+
+ mutex_destroy(&bridge->hpd_mutex);
}
EXPORT_SYMBOL(drm_bridge_remove);
+static struct drm_private_state *
+drm_bridge_atomic_duplicate_priv_state(struct drm_private_obj *obj)
+{
+ struct drm_bridge *bridge = drm_priv_to_bridge(obj);
+ struct drm_bridge_state *state;
+
+ state = bridge->funcs->atomic_duplicate_state(bridge);
+ return state ? &state->base : NULL;
+}
+
+static void
+drm_bridge_atomic_destroy_priv_state(struct drm_private_obj *obj,
+ struct drm_private_state *s)
+{
+ struct drm_bridge_state *state = drm_priv_to_bridge_state(s);
+ struct drm_bridge *bridge = drm_priv_to_bridge(obj);
+
+ bridge->funcs->atomic_destroy_state(bridge, state);
+}
+
+static const struct drm_private_state_funcs drm_bridge_priv_state_funcs = {
+ .atomic_duplicate_state = drm_bridge_atomic_duplicate_priv_state,
+ .atomic_destroy_state = drm_bridge_atomic_destroy_priv_state,
+};
+
/**
* drm_bridge_attach - attach the bridge to an encoder's chain
*
* @encoder: DRM encoder
* @bridge: bridge to attach
* @previous: previous bridge in the chain (optional)
+ * @flags: DRM_BRIDGE_ATTACH_* flags
*
* Called by a kms driver to link the bridge to an encoder's chain. The previous
* argument specifies the previous bridge in the chain. If NULL, the bridge is
@@ -112,7 +173,8 @@ EXPORT_SYMBOL(drm_bridge_remove);
* Zero on success, error code on failure
*/
int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
- struct drm_bridge *previous)
+ struct drm_bridge *previous,
+ enum drm_bridge_attach_flags flags)
{
int ret;
@@ -134,16 +196,36 @@ int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
list_add(&bridge->chain_node, &encoder->bridge_chain);
if (bridge->funcs->attach) {
- ret = bridge->funcs->attach(bridge);
- if (ret < 0) {
- list_del(&bridge->chain_node);
- bridge->dev = NULL;
- bridge->encoder = NULL;
- return ret;
+ ret = bridge->funcs->attach(bridge, flags);
+ if (ret < 0)
+ goto err_reset_bridge;
+ }
+
+ if (bridge->funcs->atomic_reset) {
+ struct drm_bridge_state *state;
+
+ state = bridge->funcs->atomic_reset(bridge);
+ if (IS_ERR(state)) {
+ ret = PTR_ERR(state);
+ goto err_detach_bridge;
}
+
+ drm_atomic_private_obj_init(bridge->dev, &bridge->base,
+ &state->base,
+ &drm_bridge_priv_state_funcs);
}
return 0;
+
+err_detach_bridge:
+ if (bridge->funcs->detach)
+ bridge->funcs->detach(bridge);
+
+err_reset_bridge:
+ bridge->dev = NULL;
+ bridge->encoder = NULL;
+ list_del(&bridge->chain_node);
+ return ret;
}
EXPORT_SYMBOL(drm_bridge_attach);
@@ -155,6 +237,9 @@ void drm_bridge_detach(struct drm_bridge *bridge)
if (WARN_ON(!bridge->dev))
return;
+ if (bridge->funcs->atomic_reset)
+ drm_atomic_private_obj_fini(&bridge->base);
+
if (bridge->funcs->detach)
bridge->funcs->detach(bridge);
@@ -163,14 +248,92 @@ void drm_bridge_detach(struct drm_bridge *bridge)
}
/**
- * DOC: bridge callbacks
+ * DOC: bridge operations
+ *
+ * Bridge drivers expose operations through the &drm_bridge_funcs structure.
+ * The DRM internals (atomic and CRTC helpers) use the helpers defined in
+ * drm_bridge.c to call bridge operations. Those operations are divided in
+ * three big categories to support different parts of the bridge usage.
+ *
+ * - The encoder-related operations support control of the bridges in the
+ * chain, and are roughly counterparts to the &drm_encoder_helper_funcs
+ * operations. They are used by the legacy CRTC and the atomic modeset
+ * helpers to perform mode validation, fixup and setting, and enable and
+ * disable the bridge automatically.
+ *
+ * The enable and disable operations are split in
+ * &drm_bridge_funcs.pre_enable, &drm_bridge_funcs.enable,
+ * &drm_bridge_funcs.disable and &drm_bridge_funcs.post_disable to provide
+ * finer-grained control.
+ *
+ * Bridge drivers may implement the legacy version of those operations, or
+ * the atomic version (prefixed with atomic\_), in which case they shall also
+ * implement the atomic state bookkeeping operations
+ * (&drm_bridge_funcs.atomic_duplicate_state,
+ * &drm_bridge_funcs.atomic_destroy_state and &drm_bridge_funcs.reset).
+ * Mixing atomic and non-atomic versions of the operations is not supported.
+ *
+ * - The bus format negotiation operations
+ * &drm_bridge_funcs.atomic_get_output_bus_fmts and
+ * &drm_bridge_funcs.atomic_get_input_bus_fmts allow bridge drivers to
+ * negotiate the formats transmitted between bridges in the chain when
+ * multiple formats are supported. Negotiation for formats is performed
+ * transparently for display drivers by the atomic modeset helpers. Only
+ * atomic versions of those operations exist, bridge drivers that need to
+ * implement them shall thus also implement the atomic version of the
+ * encoder-related operations. This feature is not supported by the legacy
+ * CRTC helpers.
+ *
+ * - The connector-related operations support implementing a &drm_connector
+ * based on a chain of bridges. DRM bridges traditionally create a
+ * &drm_connector for bridges meant to be used at the end of the chain. This
+ * puts additional burden on bridge drivers, especially for bridges that may
+ * be used in the middle of a chain or at the end of it. Furthermore, it
+ * requires all operations of the &drm_connector to be handled by a single
+ * bridge, which doesn't always match the hardware architecture.
+ *
+ * To simplify bridge drivers and make the connector implementation more
+ * flexible, a new model allows bridges to unconditionally skip creation of
+ * &drm_connector and instead expose &drm_bridge_funcs operations to support
+ * an externally-implemented &drm_connector. Those operations are
+ * &drm_bridge_funcs.detect, &drm_bridge_funcs.get_modes,
+ * &drm_bridge_funcs.get_edid, &drm_bridge_funcs.hpd_notify,
+ * &drm_bridge_funcs.hpd_enable and &drm_bridge_funcs.hpd_disable. When
+ * implemented, display drivers shall create a &drm_connector instance for
+ * each chain of bridges, and implement those connector instances based on
+ * the bridge connector operations.
*
- * The &drm_bridge_funcs ops are populated by the bridge driver. The DRM
- * internals (atomic and CRTC helpers) use the helpers defined in drm_bridge.c
- * These helpers call a specific &drm_bridge_funcs op for all the bridges
- * during encoder configuration.
+ * Bridge drivers shall implement the connector-related operations for all
+ * the features that the bridge hardware support. For instance, if a bridge
+ * supports reading EDID, the &drm_bridge_funcs.get_edid shall be
+ * implemented. This however doesn't mean that the DDC lines are wired to the
+ * bridge on a particular platform, as they could also be connected to an I2C
+ * controller of the SoC. Support for the connector-related operations on the
+ * running platform is reported through the &drm_bridge.ops flags. Bridge
+ * drivers shall detect which operations they can support on the platform
+ * (usually this information is provided by ACPI or DT), and set the
+ * &drm_bridge.ops flags for all supported operations. A flag shall only be
+ * set if the corresponding &drm_bridge_funcs operation is implemented, but
+ * an implemented operation doesn't necessarily imply that the corresponding
+ * flag will be set. Display drivers shall use the &drm_bridge.ops flags to
+ * decide which bridge to delegate a connector operation to. This mechanism
+ * allows providing a single static const &drm_bridge_funcs instance in
+ * bridge drivers, improving security by storing function pointers in
+ * read-only memory.
*
- * For detailed specification of the bridge callbacks see &drm_bridge_funcs.
+ * In order to ease transition, bridge drivers may support both the old and
+ * new models by making connector creation optional and implementing the
+ * connected-related bridge operations. Connector creation is then controlled
+ * by the flags argument to the drm_bridge_attach() function. Display drivers
+ * that support the new model and create connectors themselves shall set the
+ * %DRM_BRIDGE_ATTACH_NO_CONNECTOR flag, and bridge drivers shall then skip
+ * connector creation. For intermediate bridges in the chain, the flag shall
+ * be passed to the drm_bridge_attach() call for the downstream bridge.
+ * Bridge drivers that implement the new model only shall return an error
+ * from their &drm_bridge_funcs.attach handler when the
+ * %DRM_BRIDGE_ATTACH_NO_CONNECTOR flag is not set. New display drivers
+ * should use the new model, and convert the bridge drivers they use if
+ * needed, in order to gradually transition to the new model.
*/
/**
@@ -409,10 +572,19 @@ void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge,
encoder = bridge->encoder;
list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
- if (iter->funcs->atomic_disable)
- iter->funcs->atomic_disable(iter, old_state);
- else if (iter->funcs->disable)
+ if (iter->funcs->atomic_disable) {
+ struct drm_bridge_state *old_bridge_state;
+
+ old_bridge_state =
+ drm_atomic_get_old_bridge_state(old_state,
+ iter);
+ if (WARN_ON(!old_bridge_state))
+ return;
+
+ iter->funcs->atomic_disable(iter, old_bridge_state);
+ } else if (iter->funcs->disable) {
iter->funcs->disable(iter);
+ }
if (iter == bridge)
break;
@@ -443,10 +615,20 @@ void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
encoder = bridge->encoder;
list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
- if (bridge->funcs->atomic_post_disable)
- bridge->funcs->atomic_post_disable(bridge, old_state);
- else if (bridge->funcs->post_disable)
+ if (bridge->funcs->atomic_post_disable) {
+ struct drm_bridge_state *old_bridge_state;
+
+ old_bridge_state =
+ drm_atomic_get_old_bridge_state(old_state,
+ bridge);
+ if (WARN_ON(!old_bridge_state))
+ return;
+
+ bridge->funcs->atomic_post_disable(bridge,
+ old_bridge_state);
+ } else if (bridge->funcs->post_disable) {
bridge->funcs->post_disable(bridge);
+ }
}
}
EXPORT_SYMBOL(drm_atomic_bridge_chain_post_disable);
@@ -475,10 +657,19 @@ void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
encoder = bridge->encoder;
list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
- if (iter->funcs->atomic_pre_enable)
- iter->funcs->atomic_pre_enable(iter, old_state);
- else if (iter->funcs->pre_enable)
+ if (iter->funcs->atomic_pre_enable) {
+ struct drm_bridge_state *old_bridge_state;
+
+ old_bridge_state =
+ drm_atomic_get_old_bridge_state(old_state,
+ iter);
+ if (WARN_ON(!old_bridge_state))
+ return;
+
+ iter->funcs->atomic_pre_enable(iter, old_bridge_state);
+ } else if (iter->funcs->pre_enable) {
iter->funcs->pre_enable(iter);
+ }
if (iter == bridge)
break;
@@ -508,14 +699,498 @@ void drm_atomic_bridge_chain_enable(struct drm_bridge *bridge,
encoder = bridge->encoder;
list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
- if (bridge->funcs->atomic_enable)
- bridge->funcs->atomic_enable(bridge, old_state);
- else if (bridge->funcs->enable)
+ if (bridge->funcs->atomic_enable) {
+ struct drm_bridge_state *old_bridge_state;
+
+ old_bridge_state =
+ drm_atomic_get_old_bridge_state(old_state,
+ bridge);
+ if (WARN_ON(!old_bridge_state))
+ return;
+
+ bridge->funcs->atomic_enable(bridge, old_bridge_state);
+ } else if (bridge->funcs->enable) {
bridge->funcs->enable(bridge);
+ }
}
}
EXPORT_SYMBOL(drm_atomic_bridge_chain_enable);
+static int drm_atomic_bridge_check(struct drm_bridge *bridge,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ if (bridge->funcs->atomic_check) {
+ struct drm_bridge_state *bridge_state;
+ int ret;
+
+ bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
+ bridge);
+ if (WARN_ON(!bridge_state))
+ return -EINVAL;
+
+ ret = bridge->funcs->atomic_check(bridge, bridge_state,
+ crtc_state, conn_state);
+ if (ret)
+ return ret;
+ } else if (bridge->funcs->mode_fixup) {
+ if (!bridge->funcs->mode_fixup(bridge, &crtc_state->mode,
+ &crtc_state->adjusted_mode))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int select_bus_fmt_recursive(struct drm_bridge *first_bridge,
+ struct drm_bridge *cur_bridge,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 out_bus_fmt)
+{
+ struct drm_bridge_state *cur_state;
+ unsigned int num_in_bus_fmts, i;
+ struct drm_bridge *prev_bridge;
+ u32 *in_bus_fmts;
+ int ret;
+
+ prev_bridge = drm_bridge_get_prev_bridge(cur_bridge);
+ cur_state = drm_atomic_get_new_bridge_state(crtc_state->state,
+ cur_bridge);
+
+ /*
+ * If bus format negotiation is not supported by this bridge, let's
+ * pass MEDIA_BUS_FMT_FIXED to the previous bridge in the chain and
+ * hope that it can handle this situation gracefully (by providing
+ * appropriate default values).
+ */
+ if (!cur_bridge->funcs->atomic_get_input_bus_fmts) {
+ if (cur_bridge != first_bridge) {
+ ret = select_bus_fmt_recursive(first_bridge,
+ prev_bridge, crtc_state,
+ conn_state,
+ MEDIA_BUS_FMT_FIXED);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Driver does not implement the atomic state hooks, but that's
+ * fine, as long as it does not access the bridge state.
+ */
+ if (cur_state) {
+ cur_state->input_bus_cfg.format = MEDIA_BUS_FMT_FIXED;
+ cur_state->output_bus_cfg.format = out_bus_fmt;
+ }
+
+ return 0;
+ }
+
+ /*
+ * If the driver implements ->atomic_get_input_bus_fmts() it
+ * should also implement the atomic state hooks.
+ */
+ if (WARN_ON(!cur_state))
+ return -EINVAL;
+
+ in_bus_fmts = cur_bridge->funcs->atomic_get_input_bus_fmts(cur_bridge,
+ cur_state,
+ crtc_state,
+ conn_state,
+ out_bus_fmt,
+ &num_in_bus_fmts);
+ if (!num_in_bus_fmts)
+ return -ENOTSUPP;
+ else if (!in_bus_fmts)
+ return -ENOMEM;
+
+ if (first_bridge == cur_bridge) {
+ cur_state->input_bus_cfg.format = in_bus_fmts[0];
+ cur_state->output_bus_cfg.format = out_bus_fmt;
+ kfree(in_bus_fmts);
+ return 0;
+ }
+
+ for (i = 0; i < num_in_bus_fmts; i++) {
+ ret = select_bus_fmt_recursive(first_bridge, prev_bridge,
+ crtc_state, conn_state,
+ in_bus_fmts[i]);
+ if (ret != -ENOTSUPP)
+ break;
+ }
+
+ if (!ret) {
+ cur_state->input_bus_cfg.format = in_bus_fmts[i];
+ cur_state->output_bus_cfg.format = out_bus_fmt;
+ }
+
+ kfree(in_bus_fmts);
+ return ret;
+}
+
+/*
+ * This function is called by &drm_atomic_bridge_chain_check() just before
+ * calling &drm_bridge_funcs.atomic_check() on all elements of the chain.
+ * It performs bus format negotiation between bridge elements. The negotiation
+ * happens in reverse order, starting from the last element in the chain up to
+ * @bridge.
+ *
+ * Negotiation starts by retrieving supported output bus formats on the last
+ * bridge element and testing them one by one. The test is recursive, meaning
+ * that for each tested output format, the whole chain will be walked backward,
+ * and each element will have to choose an input bus format that can be
+ * transcoded to the requested output format. When a bridge element does not
+ * support transcoding into a specific output format -ENOTSUPP is returned and
+ * the next bridge element will have to try a different format. If none of the
+ * combinations worked, -ENOTSUPP is returned and the atomic modeset will fail.
+ *
+ * This implementation is relying on
+ * &drm_bridge_funcs.atomic_get_output_bus_fmts() and
+ * &drm_bridge_funcs.atomic_get_input_bus_fmts() to gather supported
+ * input/output formats.
+ *
+ * When &drm_bridge_funcs.atomic_get_output_bus_fmts() is not implemented by
+ * the last element of the chain, &drm_atomic_bridge_chain_select_bus_fmts()
+ * tries a single format: &drm_connector.display_info.bus_formats[0] if
+ * available, MEDIA_BUS_FMT_FIXED otherwise.
+ *
+ * When &drm_bridge_funcs.atomic_get_input_bus_fmts() is not implemented,
+ * &drm_atomic_bridge_chain_select_bus_fmts() skips the negotiation on the
+ * bridge element that lacks this hook and asks the previous element in the
+ * chain to try MEDIA_BUS_FMT_FIXED. It's up to bridge drivers to decide what
+ * to do in that case (fail if they want to enforce bus format negotiation, or
+ * provide a reasonable default if they need to support pipelines where not
+ * all elements support bus format negotiation).
+ */
+static int
+drm_atomic_bridge_chain_select_bus_fmts(struct drm_bridge *bridge,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_connector *conn = conn_state->connector;
+ struct drm_encoder *encoder = bridge->encoder;
+ struct drm_bridge_state *last_bridge_state;
+ unsigned int i, num_out_bus_fmts;
+ struct drm_bridge *last_bridge;
+ u32 *out_bus_fmts;
+ int ret = 0;
+
+ last_bridge = list_last_entry(&encoder->bridge_chain,
+ struct drm_bridge, chain_node);
+ last_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
+ last_bridge);
+
+ if (last_bridge->funcs->atomic_get_output_bus_fmts) {
+ const struct drm_bridge_funcs *funcs = last_bridge->funcs;
+
+ /*
+ * If the driver implements ->atomic_get_output_bus_fmts() it
+ * should also implement the atomic state hooks.
+ */
+ if (WARN_ON(!last_bridge_state))
+ return -EINVAL;
+
+ out_bus_fmts = funcs->atomic_get_output_bus_fmts(last_bridge,
+ last_bridge_state,
+ crtc_state,
+ conn_state,
+ &num_out_bus_fmts);
+ if (!num_out_bus_fmts)
+ return -ENOTSUPP;
+ else if (!out_bus_fmts)
+ return -ENOMEM;
+ } else {
+ num_out_bus_fmts = 1;
+ out_bus_fmts = kmalloc(sizeof(*out_bus_fmts), GFP_KERNEL);
+ if (!out_bus_fmts)
+ return -ENOMEM;
+
+ if (conn->display_info.num_bus_formats &&
+ conn->display_info.bus_formats)
+ out_bus_fmts[0] = conn->display_info.bus_formats[0];
+ else
+ out_bus_fmts[0] = MEDIA_BUS_FMT_FIXED;
+ }
+
+ for (i = 0; i < num_out_bus_fmts; i++) {
+ ret = select_bus_fmt_recursive(bridge, last_bridge, crtc_state,
+ conn_state, out_bus_fmts[i]);
+ if (ret != -ENOTSUPP)
+ break;
+ }
+
+ kfree(out_bus_fmts);
+
+ return ret;
+}
+
+static void
+drm_atomic_bridge_propagate_bus_flags(struct drm_bridge *bridge,
+ struct drm_connector *conn,
+ struct drm_atomic_state *state)
+{
+ struct drm_bridge_state *bridge_state, *next_bridge_state;
+ struct drm_bridge *next_bridge;
+ u32 output_flags = 0;
+
+ bridge_state = drm_atomic_get_new_bridge_state(state, bridge);
+
+ /* No bridge state attached to this bridge => nothing to propagate. */
+ if (!bridge_state)
+ return;
+
+ next_bridge = drm_bridge_get_next_bridge(bridge);
+
+ /*
+ * Let's try to apply the most common case here, that is, propagate
+ * display_info flags for the last bridge, and propagate the input
+ * flags of the next bridge element to the output end of the current
+ * bridge when the bridge is not the last one.
+ * There are exceptions to this rule, like when signal inversion is
+ * happening at the board level, but that's something drivers can deal
+ * with from their &drm_bridge_funcs.atomic_check() implementation by
+ * simply overriding the flags value we've set here.
+ */
+ if (!next_bridge) {
+ output_flags = conn->display_info.bus_flags;
+ } else {
+ next_bridge_state = drm_atomic_get_new_bridge_state(state,
+ next_bridge);
+ /*
+ * No bridge state attached to the next bridge, just leave the
+ * flags to 0.
+ */
+ if (next_bridge_state)
+ output_flags = next_bridge_state->input_bus_cfg.flags;
+ }
+
+ bridge_state->output_bus_cfg.flags = output_flags;
+
+ /*
+ * Propage the output flags to the input end of the bridge. Again, it's
+ * not necessarily what all bridges want, but that's what most of them
+ * do, and by doing that by default we avoid forcing drivers to
+ * duplicate the "dummy propagation" logic.
+ */
+ bridge_state->input_bus_cfg.flags = output_flags;
+}
+
+/**
+ * drm_atomic_bridge_chain_check() - Do an atomic check on the bridge chain
+ * @bridge: bridge control structure
+ * @crtc_state: new CRTC state
+ * @conn_state: new connector state
+ *
+ * First trigger a bus format negotiation before calling
+ * &drm_bridge_funcs.atomic_check() (falls back on
+ * &drm_bridge_funcs.mode_fixup()) op for all the bridges in the encoder chain,
+ * starting from the last bridge to the first. These are called before calling
+ * &drm_encoder_helper_funcs.atomic_check()
+ *
+ * RETURNS:
+ * 0 on success, a negative error code on failure
+ */
+int drm_atomic_bridge_chain_check(struct drm_bridge *bridge,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_connector *conn = conn_state->connector;
+ struct drm_encoder *encoder;
+ struct drm_bridge *iter;
+ int ret;
+
+ if (!bridge)
+ return 0;
+
+ ret = drm_atomic_bridge_chain_select_bus_fmts(bridge, crtc_state,
+ conn_state);
+ if (ret)
+ return ret;
+
+ encoder = bridge->encoder;
+ list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
+ int ret;
+
+ /*
+ * Bus flags are propagated by default. If a bridge needs to
+ * tweak the input bus flags for any reason, it should happen
+ * in its &drm_bridge_funcs.atomic_check() implementation such
+ * that preceding bridges in the chain can propagate the new
+ * bus flags.
+ */
+ drm_atomic_bridge_propagate_bus_flags(iter, conn,
+ crtc_state->state);
+
+ ret = drm_atomic_bridge_check(iter, crtc_state, conn_state);
+ if (ret)
+ return ret;
+
+ if (iter == bridge)
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_atomic_bridge_chain_check);
+
+/**
+ * drm_bridge_detect - check if anything is attached to the bridge output
+ * @bridge: bridge control structure
+ *
+ * If the bridge supports output detection, as reported by the
+ * DRM_BRIDGE_OP_DETECT bridge ops flag, call &drm_bridge_funcs.detect for the
+ * bridge and return the connection status. Otherwise return
+ * connector_status_unknown.
+ *
+ * RETURNS:
+ * The detection status on success, or connector_status_unknown if the bridge
+ * doesn't support output detection.
+ */
+enum drm_connector_status drm_bridge_detect(struct drm_bridge *bridge)
+{
+ if (!(bridge->ops & DRM_BRIDGE_OP_DETECT))
+ return connector_status_unknown;
+
+ return bridge->funcs->detect(bridge);
+}
+EXPORT_SYMBOL_GPL(drm_bridge_detect);
+
+/**
+ * drm_bridge_get_modes - fill all modes currently valid for the sink into the
+ * @connector
+ * @bridge: bridge control structure
+ * @connector: the connector to fill with modes
+ *
+ * If the bridge supports output modes retrieval, as reported by the
+ * DRM_BRIDGE_OP_MODES bridge ops flag, call &drm_bridge_funcs.get_modes to
+ * fill the connector with all valid modes and return the number of modes
+ * added. Otherwise return 0.
+ *
+ * RETURNS:
+ * The number of modes added to the connector.
+ */
+int drm_bridge_get_modes(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ if (!(bridge->ops & DRM_BRIDGE_OP_MODES))
+ return 0;
+
+ return bridge->funcs->get_modes(bridge, connector);
+}
+EXPORT_SYMBOL_GPL(drm_bridge_get_modes);
+
+/**
+ * drm_bridge_get_edid - get the EDID data of the connected display
+ * @bridge: bridge control structure
+ * @connector: the connector to read EDID for
+ *
+ * If the bridge supports output EDID retrieval, as reported by the
+ * DRM_BRIDGE_OP_EDID bridge ops flag, call &drm_bridge_funcs.get_edid to
+ * get the EDID and return it. Otherwise return ERR_PTR(-ENOTSUPP).
+ *
+ * RETURNS:
+ * The retrieved EDID on success, or an error pointer otherwise.
+ */
+struct edid *drm_bridge_get_edid(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ if (!(bridge->ops & DRM_BRIDGE_OP_EDID))
+ return ERR_PTR(-ENOTSUPP);
+
+ return bridge->funcs->get_edid(bridge, connector);
+}
+EXPORT_SYMBOL_GPL(drm_bridge_get_edid);
+
+/**
+ * drm_bridge_hpd_enable - enable hot plug detection for the bridge
+ * @bridge: bridge control structure
+ * @cb: hot-plug detection callback
+ * @data: data to be passed to the hot-plug detection callback
+ *
+ * Call &drm_bridge_funcs.hpd_enable if implemented and register the given @cb
+ * and @data as hot plug notification callback. From now on the @cb will be
+ * called with @data when an output status change is detected by the bridge,
+ * until hot plug notification gets disabled with drm_bridge_hpd_disable().
+ *
+ * Hot plug detection is supported only if the DRM_BRIDGE_OP_HPD flag is set in
+ * bridge->ops. This function shall not be called when the flag is not set.
+ *
+ * Only one hot plug detection callback can be registered at a time, it is an
+ * error to call this function when hot plug detection is already enabled for
+ * the bridge.
+ */
+void drm_bridge_hpd_enable(struct drm_bridge *bridge,
+ void (*cb)(void *data,
+ enum drm_connector_status status),
+ void *data)
+{
+ if (!(bridge->ops & DRM_BRIDGE_OP_HPD))
+ return;
+
+ mutex_lock(&bridge->hpd_mutex);
+
+ if (WARN(bridge->hpd_cb, "Hot plug detection already enabled\n"))
+ goto unlock;
+
+ bridge->hpd_cb = cb;
+ bridge->hpd_data = data;
+
+ if (bridge->funcs->hpd_enable)
+ bridge->funcs->hpd_enable(bridge);
+
+unlock:
+ mutex_unlock(&bridge->hpd_mutex);
+}
+EXPORT_SYMBOL_GPL(drm_bridge_hpd_enable);
+
+/**
+ * drm_bridge_hpd_disable - disable hot plug detection for the bridge
+ * @bridge: bridge control structure
+ *
+ * Call &drm_bridge_funcs.hpd_disable if implemented and unregister the hot
+ * plug detection callback previously registered with drm_bridge_hpd_enable().
+ * Once this function returns the callback will not be called by the bridge
+ * when an output status change occurs.
+ *
+ * Hot plug detection is supported only if the DRM_BRIDGE_OP_HPD flag is set in
+ * bridge->ops. This function shall not be called when the flag is not set.
+ */
+void drm_bridge_hpd_disable(struct drm_bridge *bridge)
+{
+ if (!(bridge->ops & DRM_BRIDGE_OP_HPD))
+ return;
+
+ mutex_lock(&bridge->hpd_mutex);
+ if (bridge->funcs->hpd_disable)
+ bridge->funcs->hpd_disable(bridge);
+
+ bridge->hpd_cb = NULL;
+ bridge->hpd_data = NULL;
+ mutex_unlock(&bridge->hpd_mutex);
+}
+EXPORT_SYMBOL_GPL(drm_bridge_hpd_disable);
+
+/**
+ * drm_bridge_hpd_notify - notify hot plug detection events
+ * @bridge: bridge control structure
+ * @status: output connection status
+ *
+ * Bridge drivers shall call this function to report hot plug events when they
+ * detect a change in the output status, when hot plug detection has been
+ * enabled by drm_bridge_hpd_enable().
+ *
+ * This function shall be called in a context that can sleep.
+ */
+void drm_bridge_hpd_notify(struct drm_bridge *bridge,
+ enum drm_connector_status status)
+{
+ mutex_lock(&bridge->hpd_mutex);
+ if (bridge->hpd_cb)
+ bridge->hpd_cb(bridge->hpd_data, status);
+ mutex_unlock(&bridge->hpd_mutex);
+}
+EXPORT_SYMBOL_GPL(drm_bridge_hpd_notify);
+
#ifdef CONFIG_OF
/**
* of_drm_find_bridge - find the bridge corresponding to the device node in
diff --git a/drivers/gpu/drm/drm_bridge_connector.c b/drivers/gpu/drm/drm_bridge_connector.c
new file mode 100644
index 000000000000..c6994fe673f3
--- /dev/null
+++ b/drivers/gpu/drm/drm_bridge_connector.c
@@ -0,0 +1,379 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 Laurent Pinchart <[email protected]>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_device.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_probe_helper.h>
+
+/**
+ * DOC: overview
+ *
+ * The DRM bridge connector helper object provides a DRM connector
+ * implementation that wraps a chain of &struct drm_bridge. The connector
+ * operations are fully implemented based on the operations of the bridges in
+ * the chain, and don't require any intervention from the display controller
+ * driver at runtime.
+ *
+ * To use the helper, display controller drivers create a bridge connector with
+ * a call to drm_bridge_connector_init(). This associates the newly created
+ * connector with the chain of bridges passed to the function and registers it
+ * with the DRM device. At that point the connector becomes fully usable, no
+ * further operation is needed.
+ *
+ * The DRM bridge connector operations are implemented based on the operations
+ * provided by the bridges in the chain. Each connector operation is delegated
+ * to the bridge closest to the connector (at the end of the chain) that
+ * provides the relevant functionality.
+ *
+ * To make use of this helper, all bridges in the chain shall report bridge
+ * operation flags (&drm_bridge->ops) and bridge output type
+ * (&drm_bridge->type), as well as the DRM_BRIDGE_ATTACH_NO_CONNECTOR attach
+ * flag (none of the bridges shall create a DRM connector directly).
+ */
+
+/**
+ * struct drm_bridge_connector - A connector backed by a chain of bridges
+ */
+struct drm_bridge_connector {
+ /**
+ * @base: The base DRM connector
+ */
+ struct drm_connector base;
+ /**
+ * @encoder:
+ *
+ * The encoder at the start of the bridges chain.
+ */
+ struct drm_encoder *encoder;
+ /**
+ * @bridge_edid:
+ *
+ * The last bridge in the chain (closest to the connector) that provides
+ * EDID read support, if any (see &DRM_BRIDGE_OP_EDID).
+ */
+ struct drm_bridge *bridge_edid;
+ /**
+ * @bridge_hpd:
+ *
+ * The last bridge in the chain (closest to the connector) that provides
+ * hot-plug detection notification, if any (see &DRM_BRIDGE_OP_HPD).
+ */
+ struct drm_bridge *bridge_hpd;
+ /**
+ * @bridge_detect:
+ *
+ * The last bridge in the chain (closest to the connector) that provides
+ * connector detection, if any (see &DRM_BRIDGE_OP_DETECT).
+ */
+ struct drm_bridge *bridge_detect;
+ /**
+ * @bridge_modes:
+ *
+ * The last bridge in the chain (closest to the connector) that provides
+ * connector modes detection, if any (see &DRM_BRIDGE_OP_MODES).
+ */
+ struct drm_bridge *bridge_modes;
+};
+
+#define to_drm_bridge_connector(x) \
+ container_of(x, struct drm_bridge_connector, base)
+
+/* -----------------------------------------------------------------------------
+ * Bridge Connector Hot-Plug Handling
+ */
+
+static void drm_bridge_connector_hpd_notify(struct drm_connector *connector,
+ enum drm_connector_status status)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ /* Notify all bridges in the pipeline of hotplug events. */
+ drm_for_each_bridge_in_chain(bridge_connector->encoder, bridge) {
+ if (bridge->funcs->hpd_notify)
+ bridge->funcs->hpd_notify(bridge, status);
+ }
+}
+
+static void drm_bridge_connector_hpd_cb(void *cb_data,
+ enum drm_connector_status status)
+{
+ struct drm_bridge_connector *drm_bridge_connector = cb_data;
+ struct drm_connector *connector = &drm_bridge_connector->base;
+ struct drm_device *dev = connector->dev;
+ enum drm_connector_status old_status;
+
+ mutex_lock(&dev->mode_config.mutex);
+ old_status = connector->status;
+ connector->status = status;
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (old_status == status)
+ return;
+
+ drm_bridge_connector_hpd_notify(connector, status);
+
+ drm_kms_helper_hotplug_event(dev);
+}
+
+/**
+ * drm_bridge_connector_enable_hpd - Enable hot-plug detection for the connector
+ * @connector: The DRM bridge connector
+ *
+ * This function enables hot-plug detection for the given bridge connector.
+ * This is typically used by display drivers in their resume handler.
+ */
+void drm_bridge_connector_enable_hpd(struct drm_connector *connector)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *hpd = bridge_connector->bridge_hpd;
+
+ if (hpd)
+ drm_bridge_hpd_enable(hpd, drm_bridge_connector_hpd_cb,
+ bridge_connector);
+}
+EXPORT_SYMBOL_GPL(drm_bridge_connector_enable_hpd);
+
+/**
+ * drm_bridge_connector_disable_hpd - Disable hot-plug detection for the
+ * connector
+ * @connector: The DRM bridge connector
+ *
+ * This function disables hot-plug detection for the given bridge connector.
+ * This is typically used by display drivers in their suspend handler.
+ */
+void drm_bridge_connector_disable_hpd(struct drm_connector *connector)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *hpd = bridge_connector->bridge_hpd;
+
+ if (hpd)
+ drm_bridge_hpd_disable(hpd);
+}
+EXPORT_SYMBOL_GPL(drm_bridge_connector_disable_hpd);
+
+/* -----------------------------------------------------------------------------
+ * Bridge Connector Functions
+ */
+
+static enum drm_connector_status
+drm_bridge_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *detect = bridge_connector->bridge_detect;
+ enum drm_connector_status status;
+
+ if (detect) {
+ status = detect->funcs->detect(detect);
+
+ drm_bridge_connector_hpd_notify(connector, status);
+ } else {
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_DPI:
+ case DRM_MODE_CONNECTOR_LVDS:
+ case DRM_MODE_CONNECTOR_DSI:
+ status = connector_status_connected;
+ break;
+ default:
+ status = connector_status_unknown;
+ break;
+ }
+ }
+
+ return status;
+}
+
+static void drm_bridge_connector_destroy(struct drm_connector *connector)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+
+ if (bridge_connector->bridge_hpd) {
+ struct drm_bridge *hpd = bridge_connector->bridge_hpd;
+
+ drm_bridge_hpd_disable(hpd);
+ }
+
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+
+ kfree(bridge_connector);
+}
+
+static const struct drm_connector_funcs drm_bridge_connector_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .detect = drm_bridge_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_bridge_connector_destroy,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+/* -----------------------------------------------------------------------------
+ * Bridge Connector Helper Functions
+ */
+
+static int drm_bridge_connector_get_modes_edid(struct drm_connector *connector,
+ struct drm_bridge *bridge)
+{
+ enum drm_connector_status status;
+ struct edid *edid;
+ int n;
+
+ status = drm_bridge_connector_detect(connector, false);
+ if (status != connector_status_connected)
+ goto no_edid;
+
+ edid = bridge->funcs->get_edid(bridge, connector);
+ if (!edid || !drm_edid_is_valid(edid)) {
+ kfree(edid);
+ goto no_edid;
+ }
+
+ drm_connector_update_edid_property(connector, edid);
+ n = drm_add_edid_modes(connector, edid);
+
+ kfree(edid);
+ return n;
+
+no_edid:
+ drm_connector_update_edid_property(connector, NULL);
+ return 0;
+}
+
+static int drm_bridge_connector_get_modes(struct drm_connector *connector)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ /*
+ * If display exposes EDID, then we parse that in the normal way to
+ * build table of supported modes.
+ */
+ bridge = bridge_connector->bridge_edid;
+ if (bridge)
+ return drm_bridge_connector_get_modes_edid(connector, bridge);
+
+ /*
+ * Otherwise if the display pipeline reports modes (e.g. with a fixed
+ * resolution panel or an analog TV output), query it.
+ */
+ bridge = bridge_connector->bridge_modes;
+ if (bridge)
+ return bridge->funcs->get_modes(bridge, connector);
+
+ /*
+ * We can't retrieve modes, which can happen for instance for a DVI or
+ * VGA output with the DDC bus unconnected. The KMS core will add the
+ * default modes.
+ */
+ return 0;
+}
+
+static const struct drm_connector_helper_funcs drm_bridge_connector_helper_funcs = {
+ .get_modes = drm_bridge_connector_get_modes,
+ /* No need for .mode_valid(), the bridges are checked by the core. */
+};
+
+/* -----------------------------------------------------------------------------
+ * Bridge Connector Initialisation
+ */
+
+/**
+ * drm_bridge_connector_init - Initialise a connector for a chain of bridges
+ * @drm: the DRM device
+ * @encoder: the encoder where the bridge chain starts
+ *
+ * Allocate, initialise and register a &drm_bridge_connector with the @drm
+ * device. The connector is associated with a chain of bridges that starts at
+ * the @encoder. All bridges in the chain shall report bridge operation flags
+ * (&drm_bridge->ops) and bridge output type (&drm_bridge->type), and none of
+ * them may create a DRM connector directly.
+ *
+ * Returns a pointer to the new connector on success, or a negative error
+ * pointer otherwise.
+ */
+struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
+ struct drm_encoder *encoder)
+{
+ struct drm_bridge_connector *bridge_connector;
+ struct drm_connector *connector;
+ struct i2c_adapter *ddc = NULL;
+ struct drm_bridge *bridge;
+ int connector_type;
+
+ bridge_connector = kzalloc(sizeof(*bridge_connector), GFP_KERNEL);
+ if (!bridge_connector)
+ return ERR_PTR(-ENOMEM);
+
+ bridge_connector->encoder = encoder;
+
+ /*
+ * TODO: Handle doublescan_allowed, stereo_allowed and
+ * ycbcr_420_allowed.
+ */
+ connector = &bridge_connector->base;
+ connector->interlace_allowed = true;
+
+ /*
+ * Initialise connector status handling. First locate the furthest
+ * bridges in the pipeline that support HPD and output detection. Then
+ * initialise the connector polling mode, using HPD if available and
+ * falling back to polling if supported. If neither HPD nor output
+ * detection are available, we don't support hotplug detection at all.
+ */
+ connector_type = DRM_MODE_CONNECTOR_Unknown;
+ drm_for_each_bridge_in_chain(encoder, bridge) {
+ if (!bridge->interlace_allowed)
+ connector->interlace_allowed = false;
+
+ if (bridge->ops & DRM_BRIDGE_OP_EDID)
+ bridge_connector->bridge_edid = bridge;
+ if (bridge->ops & DRM_BRIDGE_OP_HPD)
+ bridge_connector->bridge_hpd = bridge;
+ if (bridge->ops & DRM_BRIDGE_OP_DETECT)
+ bridge_connector->bridge_detect = bridge;
+ if (bridge->ops & DRM_BRIDGE_OP_MODES)
+ bridge_connector->bridge_modes = bridge;
+
+ if (!drm_bridge_get_next_bridge(bridge))
+ connector_type = bridge->type;
+
+ if (bridge->ddc)
+ ddc = bridge->ddc;
+ }
+
+ if (connector_type == DRM_MODE_CONNECTOR_Unknown) {
+ kfree(bridge_connector);
+ return ERR_PTR(-EINVAL);
+ }
+
+ drm_connector_init_with_ddc(drm, connector, &drm_bridge_connector_funcs,
+ connector_type, ddc);
+ drm_connector_helper_add(connector, &drm_bridge_connector_helper_funcs);
+
+ if (bridge_connector->bridge_hpd)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ else if (bridge_connector->bridge_detect)
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT
+ | DRM_CONNECTOR_POLL_DISCONNECT;
+
+ return connector;
+}
+EXPORT_SYMBOL_GPL(drm_bridge_connector_init);
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 8ce9d73fab4f..dcabf5698333 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -134,7 +134,7 @@ static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
shift, add);
}
-/**
+/*
* Core function to create a range of memory available for mapping by a
* non-root process.
*
@@ -149,7 +149,6 @@ static int drm_addmap_core(struct drm_device *dev, resource_size_t offset,
{
struct drm_local_map *map;
struct drm_map_list *list;
- drm_dma_handle_t *dmah;
unsigned long user_token;
int ret;
@@ -324,14 +323,14 @@ static int drm_addmap_core(struct drm_device *dev, resource_size_t offset,
* As we're limiting the address to 2^32-1 (or less),
* casting it down to 32 bits is no problem, but we
* need to point to a 64bit variable first. */
- dmah = drm_pci_alloc(dev, map->size, map->size);
- if (!dmah) {
+ map->handle = dma_alloc_coherent(&dev->pdev->dev,
+ map->size,
+ &map->offset,
+ GFP_KERNEL);
+ if (!map->handle) {
kfree(map);
return -ENOMEM;
}
- map->handle = dmah->vaddr;
- map->offset = (unsigned long)dmah->busaddr;
- kfree(dmah);
break;
default:
kfree(map);
@@ -399,7 +398,7 @@ struct drm_local_map *drm_legacy_findmap(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_legacy_findmap);
-/**
+/*
* Ioctl to specify a range of memory that is available for mapping by a
* non-root process.
*
@@ -500,7 +499,7 @@ int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
return 0;
}
-/**
+/*
* Remove a map private from list and deallocate resources if the mapping
* isn't in use.
*
@@ -513,7 +512,6 @@ int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
{
struct drm_map_list *r_list = NULL, *list_t;
- drm_dma_handle_t dmah;
int found = 0;
struct drm_master *master;
@@ -554,10 +552,10 @@ int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
case _DRM_SCATTER_GATHER:
break;
case _DRM_CONSISTENT:
- dmah.vaddr = map->handle;
- dmah.busaddr = map->offset;
- dmah.size = map->size;
- __drm_legacy_pci_free(dev, &dmah);
+ dma_free_coherent(&dev->pdev->dev,
+ map->size,
+ map->handle,
+ map->offset);
break;
}
kfree(map);
@@ -661,7 +659,7 @@ int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
return ret;
}
-/**
+/*
* Cleanup after an error on one of the addbufs() functions.
*
* \param dev DRM device.
@@ -696,7 +694,7 @@ static void drm_cleanup_buf_error(struct drm_device *dev,
}
#if IS_ENABLED(CONFIG_AGP)
-/**
+/*
* Add AGP buffers for DMA transfers.
*
* \param dev struct drm_device to which the buffers are to be added.
@@ -1232,7 +1230,7 @@ static int drm_legacy_addbufs_sg(struct drm_device *dev,
return 0;
}
-/**
+/*
* Add buffers for DMA transfers (ioctl).
*
* \param inode device inode.
@@ -1273,7 +1271,7 @@ int drm_legacy_addbufs(struct drm_device *dev, void *data,
return ret;
}
-/**
+/*
* Get information about the buffer mappings.
*
* This was originally mean for debugging purposes, or by a sophisticated
@@ -1364,7 +1362,7 @@ int drm_legacy_infobufs(struct drm_device *dev, void *data,
return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf);
}
-/**
+/*
* Specifies a low and high water mark for buffer allocation
*
* \param inode device inode.
@@ -1413,7 +1411,7 @@ int drm_legacy_markbufs(struct drm_device *dev, void *data,
return 0;
}
-/**
+/*
* Unreserve the buffers in list, previously reserved using drmDMA.
*
* \param inode device inode.
@@ -1465,7 +1463,7 @@ int drm_legacy_freebufs(struct drm_device *dev, void *data,
return 0;
}
-/**
+/*
* Maps all of the DMA buffers into client-virtual space (ioctl).
*
* \param inode device inode.
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index b031b45aa8ef..6b0c6ef8b9b3 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0 or MIT
/*
* Copyright 2018 Noralf Trønnes
*/
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index 6d4a29e99ae2..7443114bd713 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -951,7 +951,8 @@ bool drm_client_rotation(struct drm_mode_set *modeset, unsigned int *rotation)
* depending on the hardware this may require the framebuffer
* to be in a specific tiling format.
*/
- if ((*rotation & DRM_MODE_ROTATE_MASK) != DRM_MODE_ROTATE_180 ||
+ if (((*rotation & DRM_MODE_ROTATE_MASK) != DRM_MODE_ROTATE_0 &&
+ (*rotation & DRM_MODE_ROTATE_MASK) != DRM_MODE_ROTATE_180) ||
!plane->rotation_property)
return false;
@@ -1094,15 +1095,17 @@ out:
}
/**
- * drm_client_modeset_commit_force() - Force commit CRTC configuration
+ * drm_client_modeset_commit_locked() - Force commit CRTC configuration
* @client: DRM client
*
- * Commit modeset configuration to crtcs without checking if there is a DRM master.
+ * Commit modeset configuration to crtcs without checking if there is a DRM
+ * master. The assumption is that the caller already holds an internal DRM
+ * master reference acquired with drm_master_internal_acquire().
*
* Returns:
* Zero on success or negative error code on failure.
*/
-int drm_client_modeset_commit_force(struct drm_client_dev *client)
+int drm_client_modeset_commit_locked(struct drm_client_dev *client)
{
struct drm_device *dev = client->dev;
int ret;
@@ -1116,7 +1119,7 @@ int drm_client_modeset_commit_force(struct drm_client_dev *client)
return ret;
}
-EXPORT_SYMBOL(drm_client_modeset_commit_force);
+EXPORT_SYMBOL(drm_client_modeset_commit_locked);
/**
* drm_client_modeset_commit() - Commit CRTC configuration
@@ -1135,7 +1138,7 @@ int drm_client_modeset_commit(struct drm_client_dev *client)
if (!drm_master_internal_acquire(dev))
return -EBUSY;
- ret = drm_client_modeset_commit_force(client);
+ ret = drm_client_modeset_commit_locked(client);
drm_master_internal_release(dev);
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 2166000ed057..644f0ad10671 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -112,6 +112,21 @@ void drm_connector_ida_destroy(void)
}
/**
+ * drm_get_connector_type_name - return a string for connector type
+ * @type: The connector type (DRM_MODE_CONNECTOR_*)
+ *
+ * Returns: the name of the connector type, or NULL if the type is not valid.
+ */
+const char *drm_get_connector_type_name(unsigned int type)
+{
+ if (type < ARRAY_SIZE(drm_connector_enum_list))
+ return drm_connector_enum_list[type].name;
+
+ return NULL;
+}
+EXPORT_SYMBOL(drm_get_connector_type_name);
+
+/**
* drm_connector_get_cmdline_mode - reads the user's cmdline mode
* @connector: connector to quwery
*
@@ -140,6 +155,13 @@ static void drm_connector_get_cmdline_mode(struct drm_connector *connector)
connector->force = mode->force;
}
+ if (mode->panel_orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN) {
+ DRM_INFO("cmdline forces connector %s panel_orientation to %d\n",
+ connector->name, mode->panel_orientation);
+ drm_connector_set_panel_orientation(connector,
+ mode->panel_orientation);
+ }
+
DRM_DEBUG_KMS("cmdline mode for connector %s %s %dx%d@%dHz%s%s%s\n",
connector->name, mode->name,
mode->xres, mode->yres,
@@ -1139,7 +1161,8 @@ static const struct drm_prop_enum_list dp_colorspaces[] = {
* coordinates, so if userspace rotates the picture to adjust for
* the orientation it must also apply the same transformation to the
* touchscreen input coordinates. This property is initialized by calling
- * drm_connector_init_panel_orientation_property().
+ * drm_connector_set_panel_orientation() or
+ * drm_connector_set_panel_orientation_with_quirk()
*
* scaling mode:
* This property defines how a non-native mode is upscaled to the native
@@ -2046,38 +2069,41 @@ void drm_connector_set_vrr_capable_property(
EXPORT_SYMBOL(drm_connector_set_vrr_capable_property);
/**
- * drm_connector_init_panel_orientation_property -
- * initialize the connecters panel_orientation property
- * @connector: connector for which to init the panel-orientation property.
- * @width: width in pixels of the panel, used for panel quirk detection
- * @height: height in pixels of the panel, used for panel quirk detection
+ * drm_connector_set_panel_orientation - sets the connecter's panel_orientation
+ * @connector: connector for which to set the panel-orientation property.
+ * @panel_orientation: drm_panel_orientation value to set
+ *
+ * This function sets the connector's panel_orientation and attaches
+ * a "panel orientation" property to the connector.
*
- * This function should only be called for built-in panels, after setting
- * connector->display_info.panel_orientation first (if known).
+ * Calling this function on a connector where the panel_orientation has
+ * already been set is a no-op (e.g. the orientation has been overridden with
+ * a kernel commandline option).
*
- * This function will check for platform specific (e.g. DMI based) quirks
- * overriding display_info.panel_orientation first, then if panel_orientation
- * is not DRM_MODE_PANEL_ORIENTATION_UNKNOWN it will attach the
- * "panel orientation" property to the connector.
+ * It is allowed to call this function with a panel_orientation of
+ * DRM_MODE_PANEL_ORIENTATION_UNKNOWN, in which case it is a no-op.
*
* Returns:
* Zero on success, negative errno on failure.
*/
-int drm_connector_init_panel_orientation_property(
- struct drm_connector *connector, int width, int height)
+int drm_connector_set_panel_orientation(
+ struct drm_connector *connector,
+ enum drm_panel_orientation panel_orientation)
{
struct drm_device *dev = connector->dev;
struct drm_display_info *info = &connector->display_info;
struct drm_property *prop;
- int orientation_quirk;
- orientation_quirk = drm_get_panel_orientation_quirk(width, height);
- if (orientation_quirk != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
- info->panel_orientation = orientation_quirk;
+ /* Already set? */
+ if (info->panel_orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
+ return 0;
- if (info->panel_orientation == DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
+ /* Don't attach the property if the orientation is unknown */
+ if (panel_orientation == DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
return 0;
+ info->panel_orientation = panel_orientation;
+
prop = dev->mode_config.panel_orientation_property;
if (!prop) {
prop = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
@@ -2094,7 +2120,37 @@ int drm_connector_init_panel_orientation_property(
info->panel_orientation);
return 0;
}
-EXPORT_SYMBOL(drm_connector_init_panel_orientation_property);
+EXPORT_SYMBOL(drm_connector_set_panel_orientation);
+
+/**
+ * drm_connector_set_panel_orientation_with_quirk -
+ * set the connecter's panel_orientation after checking for quirks
+ * @connector: connector for which to init the panel-orientation property.
+ * @panel_orientation: drm_panel_orientation value to set
+ * @width: width in pixels of the panel, used for panel quirk detection
+ * @height: height in pixels of the panel, used for panel quirk detection
+ *
+ * Like drm_connector_set_panel_orientation(), but with a check for platform
+ * specific (e.g. DMI based) quirks overriding the passed in panel_orientation.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_connector_set_panel_orientation_with_quirk(
+ struct drm_connector *connector,
+ enum drm_panel_orientation panel_orientation,
+ int width, int height)
+{
+ int orientation_quirk;
+
+ orientation_quirk = drm_get_panel_orientation_quirk(width, height);
+ if (orientation_quirk != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
+ panel_orientation = orientation_quirk;
+
+ return drm_connector_set_panel_orientation(connector,
+ panel_orientation);
+}
+EXPORT_SYMBOL(drm_connector_set_panel_orientation_with_quirk);
int drm_connector_set_obj_prop(struct drm_mode_object *obj,
struct drm_property *property,
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 1f802d8e5681..c99be950bf17 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -47,7 +47,7 @@ struct drm_ctx_list {
/** \name Context bitmap support */
/*@{*/
-/**
+/*
* Free a handle from the context bitmap.
*
* \param dev DRM device.
@@ -68,7 +68,7 @@ void drm_legacy_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
mutex_unlock(&dev->struct_mutex);
}
-/**
+/*
* Context bitmap allocation.
*
* \param dev DRM device.
@@ -88,7 +88,7 @@ static int drm_legacy_ctxbitmap_next(struct drm_device * dev)
return ret;
}
-/**
+/*
* Context bitmap initialization.
*
* \param dev DRM device.
@@ -104,7 +104,7 @@ void drm_legacy_ctxbitmap_init(struct drm_device * dev)
idr_init(&dev->ctx_idr);
}
-/**
+/*
* Context bitmap cleanup.
*
* \param dev DRM device.
@@ -163,7 +163,7 @@ void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file)
/** \name Per Context SAREA Support */
/*@{*/
-/**
+/*
* Get per-context SAREA.
*
* \param inode device inode.
@@ -211,7 +211,7 @@ int drm_legacy_getsareactx(struct drm_device *dev, void *data,
return 0;
}
-/**
+/*
* Set per-context SAREA.
*
* \param inode device inode.
@@ -263,7 +263,7 @@ int drm_legacy_setsareactx(struct drm_device *dev, void *data,
/** \name The actual DRM context handling routines */
/*@{*/
-/**
+/*
* Switch context.
*
* \param dev DRM device.
@@ -290,7 +290,7 @@ static int drm_context_switch(struct drm_device * dev, int old, int new)
return 0;
}
-/**
+/*
* Complete context switch.
*
* \param dev DRM device.
@@ -318,7 +318,7 @@ static int drm_context_switch_complete(struct drm_device *dev,
return 0;
}
-/**
+/*
* Reserve contexts.
*
* \param inode device inode.
@@ -351,7 +351,7 @@ int drm_legacy_resctx(struct drm_device *dev, void *data,
return 0;
}
-/**
+/*
* Add context.
*
* \param inode device inode.
@@ -404,7 +404,7 @@ int drm_legacy_addctx(struct drm_device *dev, void *data,
return 0;
}
-/**
+/*
* Get context.
*
* \param inode device inode.
@@ -428,7 +428,7 @@ int drm_legacy_getctx(struct drm_device *dev, void *data,
return 0;
}
-/**
+/*
* Switch context.
*
* \param inode device inode.
@@ -452,7 +452,7 @@ int drm_legacy_switchctx(struct drm_device *dev, void *data,
return drm_context_switch(dev, dev->last_context, ctx->handle);
}
-/**
+/*
* New context.
*
* \param inode device inode.
@@ -478,7 +478,7 @@ int drm_legacy_newctx(struct drm_device *dev, void *data,
return 0;
}
-/**
+/*
* Remove context.
*
* \param inode device inode.
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 93a4eec429e8..a4d36aca45ea 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -244,10 +244,6 @@ drm_crtc_prepare_encoders(struct drm_device *dev)
/* Disable unused encoders */
if (encoder->crtc == NULL)
drm_encoder_disable(encoder);
- /* Disable encoders whose CRTC is about to change */
- if (encoder_funcs->get_crtc &&
- encoder->crtc != (*encoder_funcs->get_crtc)(encoder))
- drm_encoder_disable(encoder);
}
}
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index c7d5e4c21423..16f2413403aa 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -216,6 +216,8 @@ int drm_mode_rmfb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
int drm_mode_getfb(struct drm_device *dev,
void *data, struct drm_file *file_priv);
+int drm_mode_getfb2_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index eab0f2687cd6..4e673d318503 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -182,8 +182,7 @@ int drm_debugfs_create_files(const struct drm_info_list *files, int count,
for (i = 0; i < count; i++) {
u32 features = files[i].driver_features;
- if (features != 0 &&
- (dev->driver->driver_features & features) != features)
+ if (features && !drm_core_check_all_features(dev, features))
continue;
tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
index e22b812c4b80..5d67a41f7c3a 100644
--- a/drivers/gpu/drm/drm_debugfs_crc.c
+++ b/drivers/gpu/drm/drm_debugfs_crc.c
@@ -372,7 +372,7 @@ void drm_debugfs_crtc_crc_add(struct drm_crtc *crtc)
crc_ent = debugfs_create_dir("crc", crtc->debugfs_entry);
- debugfs_create_file("control", S_IRUGO, crc_ent, crtc,
+ debugfs_create_file("control", S_IRUGO | S_IWUSR, crc_ent, crtc,
&drm_crtc_crc_control_fops);
debugfs_create_file("data", S_IRUGO, crc_ent, crtc,
&drm_crtc_crc_data_fops);
diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c
index e45b07890c5a..a7add55a85b4 100644
--- a/drivers/gpu/drm/drm_dma.c
+++ b/drivers/gpu/drm/drm_dma.c
@@ -42,10 +42,10 @@
#include "drm_legacy.h"
/**
- * Initialize the DMA data.
+ * drm_legacy_dma_setup() - Initialize the DMA data.
*
- * \param dev DRM device.
- * \return zero on success or a negative value on failure.
+ * @dev: DRM device.
+ * Return: zero on success or a negative value on failure.
*
* Allocate and initialize a drm_device_dma structure.
*/
@@ -71,9 +71,9 @@ int drm_legacy_dma_setup(struct drm_device *dev)
}
/**
- * Cleanup the DMA resources.
+ * drm_legacy_dma_takedown() - Cleanup the DMA resources.
*
- * \param dev DRM device.
+ * @dev: DRM device.
*
* Free all pages associated with DMA buffers, the buffers and pages lists, and
* finally the drm_device::dma structure itself.
@@ -120,10 +120,10 @@ void drm_legacy_dma_takedown(struct drm_device *dev)
}
/**
- * Free a buffer.
+ * drm_legacy_free_buffer() - Free a buffer.
*
- * \param dev DRM device.
- * \param buf buffer to free.
+ * @dev: DRM device.
+ * @buf: buffer to free.
*
* Resets the fields of \p buf.
*/
@@ -139,9 +139,10 @@ void drm_legacy_free_buffer(struct drm_device *dev, struct drm_buf * buf)
}
/**
- * Reclaim the buffers.
+ * drm_legacy_reclaim_buffers() - Reclaim the buffers.
*
- * \param file_priv DRM file private.
+ * @dev: DRM device.
+ * @file_priv: DRM file private.
*
* Frees each buffer associated with \p file_priv not already on the hardware.
*/
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index a5364b5192b8..c6fbe6e6bc9d 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -362,6 +362,65 @@ int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
EXPORT_SYMBOL(drm_dp_dpcd_read_link_status);
/**
+ * drm_dp_send_real_edid_checksum() - send back real edid checksum value
+ * @aux: DisplayPort AUX channel
+ * @real_edid_checksum: real edid checksum for the last block
+ *
+ * Returns:
+ * True on success
+ */
+bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux,
+ u8 real_edid_checksum)
+{
+ u8 link_edid_read = 0, auto_test_req = 0, test_resp = 0;
+
+ if (drm_dp_dpcd_read(aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
+ &auto_test_req, 1) < 1) {
+ DRM_ERROR("DPCD failed read at register 0x%x\n",
+ DP_DEVICE_SERVICE_IRQ_VECTOR);
+ return false;
+ }
+ auto_test_req &= DP_AUTOMATED_TEST_REQUEST;
+
+ if (drm_dp_dpcd_read(aux, DP_TEST_REQUEST, &link_edid_read, 1) < 1) {
+ DRM_ERROR("DPCD failed read at register 0x%x\n",
+ DP_TEST_REQUEST);
+ return false;
+ }
+ link_edid_read &= DP_TEST_LINK_EDID_READ;
+
+ if (!auto_test_req || !link_edid_read) {
+ DRM_DEBUG_KMS("Source DUT does not support TEST_EDID_READ\n");
+ return false;
+ }
+
+ if (drm_dp_dpcd_write(aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
+ &auto_test_req, 1) < 1) {
+ DRM_ERROR("DPCD failed write at register 0x%x\n",
+ DP_DEVICE_SERVICE_IRQ_VECTOR);
+ return false;
+ }
+
+ /* send back checksum for the last edid extension block data */
+ if (drm_dp_dpcd_write(aux, DP_TEST_EDID_CHECKSUM,
+ &real_edid_checksum, 1) < 1) {
+ DRM_ERROR("DPCD failed write at register 0x%x\n",
+ DP_TEST_EDID_CHECKSUM);
+ return false;
+ }
+
+ test_resp |= DP_TEST_EDID_CHECKSUM_WRITE;
+ if (drm_dp_dpcd_write(aux, DP_TEST_RESPONSE, &test_resp, 1) < 1) {
+ DRM_ERROR("DPCD failed write at register 0x%x\n",
+ DP_TEST_RESPONSE);
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(drm_dp_send_real_edid_checksum);
+
+/**
* drm_dp_downstream_max_clock() - extract branch device max
* pixel rate for legacy VGA
* converter or max TMDS clock
@@ -470,8 +529,7 @@ void drm_dp_downstream_debug(struct seq_file *m,
int len;
uint8_t rev[2];
int type = port_cap[0] & DP_DS_PORT_TYPE_MASK;
- bool branch_device = dpcd[DP_DOWNSTREAMPORT_PRESENT] &
- DP_DWN_STRM_PORT_PRESENT;
+ bool branch_device = drm_dp_is_branch(dpcd);
seq_printf(m, "\tDP branch device present: %s\n",
branch_device ? "yes" : "no");
@@ -1222,6 +1280,85 @@ drm_dp_get_quirks(const struct drm_dp_dpcd_ident *ident, bool is_branch)
#undef DEVICE_ID_ANY
#undef DEVICE_ID
+struct edid_quirk {
+ u8 mfg_id[2];
+ u8 prod_id[2];
+ u32 quirks;
+};
+
+#define MFG(first, second) { (first), (second) }
+#define PROD_ID(first, second) { (first), (second) }
+
+/*
+ * Some devices have unreliable OUIDs where they don't set the device ID
+ * correctly, and as a result we need to use the EDID for finding additional
+ * DP quirks in such cases.
+ */
+static const struct edid_quirk edid_quirk_list[] = {
+ /* Optional 4K AMOLED panel in the ThinkPad X1 Extreme 2nd Generation
+ * only supports DPCD backlight controls
+ */
+ { MFG(0x4c, 0x83), PROD_ID(0x41, 0x41), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
+ /*
+ * Some Dell CML 2020 systems have panels support both AUX and PWM
+ * backlight control, and some only support AUX backlight control. All
+ * said panels start up in AUX mode by default, and we don't have any
+ * support for disabling HDR mode on these panels which would be
+ * required to switch to PWM backlight control mode (plus, I'm not
+ * even sure we want PWM backlight controls over DPCD backlight
+ * controls anyway...). Until we have a better way of detecting these,
+ * force DPCD backlight mode on all of them.
+ */
+ { MFG(0x06, 0xaf), PROD_ID(0x9b, 0x32), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
+ { MFG(0x06, 0xaf), PROD_ID(0xeb, 0x41), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
+ { MFG(0x4d, 0x10), PROD_ID(0xc7, 0x14), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
+ { MFG(0x4d, 0x10), PROD_ID(0xe6, 0x14), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
+};
+
+#undef MFG
+#undef PROD_ID
+
+/**
+ * drm_dp_get_edid_quirks() - Check the EDID of a DP device to find additional
+ * DP-specific quirks
+ * @edid: The EDID to check
+ *
+ * While OUIDs are meant to be used to recognize a DisplayPort device, a lot
+ * of manufacturers don't seem to like following standards and neglect to fill
+ * the dev-ID in, making it impossible to only use OUIDs for determining
+ * quirks in some cases. This function can be used to check the EDID and look
+ * up any additional DP quirks. The bits returned by this function correspond
+ * to the quirk bits in &drm_dp_quirk.
+ *
+ * Returns: a bitmask of quirks, if any. The driver can check this using
+ * drm_dp_has_quirk().
+ */
+u32 drm_dp_get_edid_quirks(const struct edid *edid)
+{
+ const struct edid_quirk *quirk;
+ u32 quirks = 0;
+ int i;
+
+ if (!edid)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) {
+ quirk = &edid_quirk_list[i];
+ if (memcmp(quirk->mfg_id, edid->mfg_id,
+ sizeof(edid->mfg_id)) == 0 &&
+ memcmp(quirk->prod_id, edid->prod_code,
+ sizeof(edid->prod_code)) == 0)
+ quirks |= quirk->quirks;
+ }
+
+ DRM_DEBUG_KMS("DP sink: EDID mfg %*phD prod-ID %*phD quirks: 0x%04x\n",
+ (int)sizeof(edid->mfg_id), edid->mfg_id,
+ (int)sizeof(edid->prod_code), edid->prod_code, quirks);
+
+ return quirks;
+}
+EXPORT_SYMBOL(drm_dp_get_edid_quirks);
+
/**
* drm_dp_read_desc - read sink/branch descriptor from DPCD
* @aux: DisplayPort AUX channel
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 38bf111e5f9b..4b255e25e4a1 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -736,6 +736,10 @@ static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
if (msg->curchunk_idx >= msg->curchunk_len) {
/* do CRC */
crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
+ if (crc4 != msg->chunk[msg->curchunk_len - 1])
+ print_hex_dump(KERN_DEBUG, "wrong crc",
+ DUMP_PREFIX_NONE, 16, 1,
+ msg->chunk, msg->curchunk_len, false);
/* copy chunk into bigger msg */
memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
msg->curlen += msg->curchunk_len - 1;
@@ -1035,7 +1039,8 @@ static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
}
}
-static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
+static void build_dpcd_write(struct drm_dp_sideband_msg_tx *msg,
+ u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
{
struct drm_dp_sideband_msg_req_body req;
@@ -1045,17 +1050,14 @@ static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32
req.u.dpcd_write.num_bytes = num_bytes;
req.u.dpcd_write.bytes = bytes;
drm_dp_encode_sideband_req(&req, msg);
-
- return 0;
}
-static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
+static void build_link_address(struct drm_dp_sideband_msg_tx *msg)
{
struct drm_dp_sideband_msg_req_body req;
req.req_type = DP_LINK_ADDRESS;
drm_dp_encode_sideband_req(&req, msg);
- return 0;
}
static int build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
@@ -1067,7 +1069,8 @@ static int build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
return 0;
}
-static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
+static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg,
+ int port_num)
{
struct drm_dp_sideband_msg_req_body req;
@@ -1078,10 +1081,11 @@ static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int por
return 0;
}
-static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
- u8 vcpi, uint16_t pbn,
- u8 number_sdp_streams,
- u8 *sdp_stream_sink)
+static void build_allocate_payload(struct drm_dp_sideband_msg_tx *msg,
+ int port_num,
+ u8 vcpi, uint16_t pbn,
+ u8 number_sdp_streams,
+ u8 *sdp_stream_sink)
{
struct drm_dp_sideband_msg_req_body req;
memset(&req, 0, sizeof(req));
@@ -1094,11 +1098,10 @@ static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_n
number_sdp_streams);
drm_dp_encode_sideband_req(&req, msg);
msg->path_msg = true;
- return 0;
}
-static int build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
- int port_num, bool power_up)
+static void build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
+ int port_num, bool power_up)
{
struct drm_dp_sideband_msg_req_body req;
@@ -1110,7 +1113,6 @@ static int build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
req.u.port_num.port_number = port_num;
drm_dp_encode_sideband_req(&req, msg);
msg->path_msg = true;
- return 0;
}
static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
@@ -1935,73 +1937,90 @@ static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
return parent_lct + 1;
}
-static int drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt)
+static bool drm_dp_mst_is_dp_mst_end_device(u8 pdt, bool mcs)
+{
+ switch (pdt) {
+ case DP_PEER_DEVICE_DP_LEGACY_CONV:
+ case DP_PEER_DEVICE_SST_SINK:
+ return true;
+ case DP_PEER_DEVICE_MST_BRANCHING:
+ /* For sst branch device */
+ if (!mcs)
+ return true;
+
+ return false;
+ }
+ return true;
+}
+
+static int
+drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
+ bool new_mcs)
{
struct drm_dp_mst_topology_mgr *mgr = port->mgr;
struct drm_dp_mst_branch *mstb;
u8 rad[8], lct;
int ret = 0;
- if (port->pdt == new_pdt)
+ if (port->pdt == new_pdt && port->mcs == new_mcs)
return 0;
/* Teardown the old pdt, if there is one */
- switch (port->pdt) {
- case DP_PEER_DEVICE_DP_LEGACY_CONV:
- case DP_PEER_DEVICE_SST_SINK:
- /*
- * If the new PDT would also have an i2c bus, don't bother
- * with reregistering it
- */
- if (new_pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
- new_pdt == DP_PEER_DEVICE_SST_SINK) {
- port->pdt = new_pdt;
- return 0;
- }
+ if (port->pdt != DP_PEER_DEVICE_NONE) {
+ if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
+ /*
+ * If the new PDT would also have an i2c bus,
+ * don't bother with reregistering it
+ */
+ if (new_pdt != DP_PEER_DEVICE_NONE &&
+ drm_dp_mst_is_dp_mst_end_device(new_pdt, new_mcs)) {
+ port->pdt = new_pdt;
+ port->mcs = new_mcs;
+ return 0;
+ }
- /* remove i2c over sideband */
- drm_dp_mst_unregister_i2c_bus(&port->aux);
- break;
- case DP_PEER_DEVICE_MST_BRANCHING:
- mutex_lock(&mgr->lock);
- drm_dp_mst_topology_put_mstb(port->mstb);
- port->mstb = NULL;
- mutex_unlock(&mgr->lock);
- break;
+ /* remove i2c over sideband */
+ drm_dp_mst_unregister_i2c_bus(&port->aux);
+ } else {
+ mutex_lock(&mgr->lock);
+ drm_dp_mst_topology_put_mstb(port->mstb);
+ port->mstb = NULL;
+ mutex_unlock(&mgr->lock);
+ }
}
port->pdt = new_pdt;
- switch (port->pdt) {
- case DP_PEER_DEVICE_DP_LEGACY_CONV:
- case DP_PEER_DEVICE_SST_SINK:
- /* add i2c over sideband */
- ret = drm_dp_mst_register_i2c_bus(&port->aux);
- break;
+ port->mcs = new_mcs;
- case DP_PEER_DEVICE_MST_BRANCHING:
- lct = drm_dp_calculate_rad(port, rad);
- mstb = drm_dp_add_mst_branch_device(lct, rad);
- if (!mstb) {
- ret = -ENOMEM;
- DRM_ERROR("Failed to create MSTB for port %p", port);
- goto out;
- }
+ if (port->pdt != DP_PEER_DEVICE_NONE) {
+ if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
+ /* add i2c over sideband */
+ ret = drm_dp_mst_register_i2c_bus(&port->aux);
+ } else {
+ lct = drm_dp_calculate_rad(port, rad);
+ mstb = drm_dp_add_mst_branch_device(lct, rad);
+ if (!mstb) {
+ ret = -ENOMEM;
+ DRM_ERROR("Failed to create MSTB for port %p",
+ port);
+ goto out;
+ }
- mutex_lock(&mgr->lock);
- port->mstb = mstb;
- mstb->mgr = port->mgr;
- mstb->port_parent = port;
+ mutex_lock(&mgr->lock);
+ port->mstb = mstb;
+ mstb->mgr = port->mgr;
+ mstb->port_parent = port;
- /*
- * Make sure this port's memory allocation stays
- * around until its child MSTB releases it
- */
- drm_dp_mst_get_port_malloc(port);
- mutex_unlock(&mgr->lock);
+ /*
+ * Make sure this port's memory allocation stays
+ * around until its child MSTB releases it
+ */
+ drm_dp_mst_get_port_malloc(port);
+ mutex_unlock(&mgr->lock);
- /* And make sure we send a link address for this */
- ret = 1;
- break;
+ /* And make sure we send a link address for this */
+ ret = 1;
+ }
}
out:
@@ -2044,7 +2063,7 @@ ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
* sideband messaging as drm_dp_dpcd_write() does for local
* devices via actual AUX CH.
*
- * Return: 0 on success, negative error code on failure.
+ * Return: number of bytes written on success, negative error code on failure.
*/
ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
unsigned int offset, void *buffer, size_t size)
@@ -2056,29 +2075,27 @@ ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
offset, size, buffer);
}
-static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
+static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
{
- int ret;
+ int ret = 0;
memcpy(mstb->guid, guid, 16);
if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
if (mstb->port_parent) {
- ret = drm_dp_send_dpcd_write(
- mstb->mgr,
- mstb->port_parent,
- DP_GUID,
- 16,
- mstb->guid);
+ ret = drm_dp_send_dpcd_write(mstb->mgr,
+ mstb->port_parent,
+ DP_GUID, 16, mstb->guid);
} else {
-
- ret = drm_dp_dpcd_write(
- mstb->mgr->aux,
- DP_GUID,
- mstb->guid,
- 16);
+ ret = drm_dp_dpcd_write(mstb->mgr->aux,
+ DP_GUID, mstb->guid, 16);
}
}
+
+ if (ret < 16 && ret > 0)
+ return -EPROTO;
+
+ return ret == 16 ? 0 : ret;
}
static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
@@ -2154,15 +2171,14 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
goto error;
}
- if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
- port->pdt == DP_PEER_DEVICE_SST_SINK) &&
- port->port_num >= DP_MST_LOGICAL_PORT_0) {
+ if (port->pdt != DP_PEER_DEVICE_NONE &&
+ drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
port->cached_edid = drm_get_edid(port->connector,
&port->aux.ddc);
drm_connector_set_tile_property(port->connector);
}
- mgr->cbs->register_connector(port->connector);
+ drm_connector_register(port->connector);
return;
error:
@@ -2224,6 +2240,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
struct drm_dp_mst_port *port;
int old_ddps = 0, ret;
u8 new_pdt = DP_PEER_DEVICE_NONE;
+ bool new_mcs = 0;
bool created = false, send_link_addr = false, changed = false;
port = drm_dp_get_port(mstb, port_msg->port_number);
@@ -2268,7 +2285,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
port->input = port_msg->input_port;
if (!port->input)
new_pdt = port_msg->peer_device_type;
- port->mcs = port_msg->mcs;
+ new_mcs = port_msg->mcs;
port->ddps = port_msg->ddps;
port->ldps = port_msg->legacy_device_plug_status;
port->dpcd_rev = port_msg->dpcd_revision;
@@ -2296,7 +2313,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
}
}
- ret = drm_dp_port_set_pdt(port, new_pdt);
+ ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
if (ret == 1) {
send_link_addr = true;
} else if (ret < 0) {
@@ -2310,7 +2327,8 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
* we're coming out of suspend. In this case, always resend the link
* address if there's an MSTB on this port
*/
- if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING)
+ if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
+ port->mcs)
send_link_addr = true;
if (port->connector)
@@ -2347,6 +2365,7 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
struct drm_dp_mst_port *port;
int old_ddps, old_input, ret, i;
u8 new_pdt;
+ bool new_mcs;
bool dowork = false, create_connector = false;
port = drm_dp_get_port(mstb, conn_stat->port_number);
@@ -2378,7 +2397,6 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
old_ddps = port->ddps;
old_input = port->input;
port->input = conn_stat->input_port;
- port->mcs = conn_stat->message_capability_status;
port->ldps = conn_stat->legacy_device_plug_status;
port->ddps = conn_stat->displayport_device_plug_status;
@@ -2391,8 +2409,8 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
}
new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
-
- ret = drm_dp_port_set_pdt(port, new_pdt);
+ new_mcs = conn_stat->message_capability_status;
+ ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
if (ret == 1) {
dowork = true;
} else if (ret < 0) {
@@ -2627,7 +2645,8 @@ static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
return false;
}
-static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
+static void build_dpcd_read(struct drm_dp_sideband_msg_tx *msg,
+ u8 port_num, u32 offset, u8 num_bytes)
{
struct drm_dp_sideband_msg_req_body req;
@@ -2636,8 +2655,6 @@ static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32
req.u.dpcd_read.dpcd_address = offset;
req.u.dpcd_read.num_bytes = num_bytes;
drm_dp_encode_sideband_req(&req, msg);
-
- return 0;
}
static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
@@ -2863,7 +2880,7 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_sideband_msg_tx *txmsg;
struct drm_dp_link_address_ack_reply *reply;
struct drm_dp_mst_port *port, *tmp;
- int i, len, ret, port_mask = 0;
+ int i, ret, port_mask = 0;
bool changed = false;
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
@@ -2871,7 +2888,7 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
return -ENOMEM;
txmsg->dst = mstb;
- len = build_link_address(txmsg);
+ build_link_address(txmsg);
mstb->link_address_sent = true;
drm_dp_queue_down_tx(mgr, txmsg);
@@ -2892,7 +2909,15 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
DRM_DEBUG_KMS("link address reply: %d\n", reply->nports);
drm_dp_dump_link_address(reply);
- drm_dp_check_mstb_guid(mstb, reply->guid);
+ ret = drm_dp_check_mstb_guid(mstb, reply->guid);
+ if (ret) {
+ char buf[64];
+
+ drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, buf, sizeof(buf));
+ DRM_ERROR("GUID check on %s failed: %d\n",
+ buf, ret);
+ goto out;
+ }
for (i = 0; i < reply->nports; i++) {
port_mask |= BIT(reply->ports[i].port_number);
@@ -2933,14 +2958,14 @@ void drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb)
{
struct drm_dp_sideband_msg_tx *txmsg;
- int len, ret;
+ int ret;
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
if (!txmsg)
return;
txmsg->dst = mstb;
- len = build_clear_payload_id_table(txmsg);
+ build_clear_payload_id_table(txmsg);
drm_dp_queue_down_tx(mgr, txmsg);
@@ -2958,7 +2983,6 @@ drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
{
struct drm_dp_enum_path_resources_ack_reply *path_res;
struct drm_dp_sideband_msg_tx *txmsg;
- int len;
int ret;
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
@@ -2966,7 +2990,7 @@ drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
return -ENOMEM;
txmsg->dst = mstb;
- len = build_enum_path_resources(txmsg, port->port_num);
+ build_enum_path_resources(txmsg, port->port_num);
drm_dp_queue_down_tx(mgr, txmsg);
@@ -3050,7 +3074,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
{
struct drm_dp_sideband_msg_tx *txmsg;
struct drm_dp_mst_branch *mstb;
- int len, ret, port_num;
+ int ret, port_num;
u8 sinks[DRM_DP_MAX_SDP_STREAMS];
int i;
@@ -3075,9 +3099,9 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
sinks[i] = i;
txmsg->dst = mstb;
- len = build_allocate_payload(txmsg, port_num,
- id,
- pbn, port->num_sdp_streams, sinks);
+ build_allocate_payload(txmsg, port_num,
+ id,
+ pbn, port->num_sdp_streams, sinks);
drm_dp_queue_down_tx(mgr, txmsg);
@@ -3106,7 +3130,7 @@ int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port, bool power_up)
{
struct drm_dp_sideband_msg_tx *txmsg;
- int len, ret;
+ int ret;
port = drm_dp_mst_topology_get_port_validated(mgr, port);
if (!port)
@@ -3119,7 +3143,7 @@ int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
}
txmsg->dst = port->parent;
- len = build_power_updown_phy(txmsg, port->port_num, power_up);
+ build_power_updown_phy(txmsg, port->port_num, power_up);
drm_dp_queue_down_tx(mgr, txmsg);
ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
@@ -3341,7 +3365,6 @@ static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
int offset, int size, u8 *bytes)
{
- int len;
int ret = 0;
struct drm_dp_sideband_msg_tx *txmsg;
struct drm_dp_mst_branch *mstb;
@@ -3356,7 +3379,7 @@ static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
goto fail_put;
}
- len = build_dpcd_read(txmsg, port->port_num, offset, size);
+ build_dpcd_read(txmsg, port->port_num, offset, size);
txmsg->dst = port->parent;
drm_dp_queue_down_tx(mgr, txmsg);
@@ -3394,7 +3417,6 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
int offset, int size, u8 *bytes)
{
- int len;
int ret;
struct drm_dp_sideband_msg_tx *txmsg;
struct drm_dp_mst_branch *mstb;
@@ -3409,18 +3431,15 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
goto fail_put;
}
- len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
+ build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
txmsg->dst = mstb;
drm_dp_queue_down_tx(mgr, txmsg);
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
- if (ret > 0) {
- if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
- ret = -EIO;
- else
- ret = 0;
- }
+ if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
+ ret = -EIO;
+
kfree(txmsg);
fail_put:
drm_dp_mst_topology_put_mstb(mstb);
@@ -3481,9 +3500,9 @@ static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8 dp_link_count)
int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
{
int ret = 0;
- int i = 0;
struct drm_dp_mst_branch *mstb = NULL;
+ mutex_lock(&mgr->payload_lock);
mutex_lock(&mgr->lock);
if (mst_state == mgr->mst_state)
goto out_unlock;
@@ -3491,6 +3510,8 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
mgr->mst_state = mst_state;
/* set the device into MST mode */
if (mst_state) {
+ struct drm_dp_payload reset_pay;
+
WARN_ON(mgr->mst_primary);
/* get dpcd info */
@@ -3520,17 +3541,15 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
drm_dp_mst_topology_get_mstb(mgr->mst_primary);
ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
- DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
- if (ret < 0) {
+ DP_MST_EN |
+ DP_UP_REQ_EN |
+ DP_UPSTREAM_IS_SRC);
+ if (ret < 0)
goto out_unlock;
- }
- {
- struct drm_dp_payload reset_pay;
- reset_pay.start_slot = 0;
- reset_pay.num_slots = 0x3f;
- drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
- }
+ reset_pay.start_slot = 0;
+ reset_pay.num_slots = 0x3f;
+ drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
queue_work(system_long_wq, &mgr->work);
@@ -3542,27 +3561,19 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
/* this can fail if the device is gone */
drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
ret = 0;
- mutex_lock(&mgr->payload_lock);
- memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
+ memset(mgr->payloads, 0,
+ mgr->max_payloads * sizeof(mgr->payloads[0]));
+ memset(mgr->proposed_vcpis, 0,
+ mgr->max_payloads * sizeof(mgr->proposed_vcpis[0]));
mgr->payload_mask = 0;
set_bit(0, &mgr->payload_mask);
- for (i = 0; i < mgr->max_payloads; i++) {
- struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
-
- if (vcpi) {
- vcpi->vcpi = 0;
- vcpi->num_slots = 0;
- }
- mgr->proposed_vcpis[i] = NULL;
- }
mgr->vcpi_mask = 0;
- mutex_unlock(&mgr->payload_lock);
-
mgr->payload_id_table_cleared = false;
}
out_unlock:
mutex_unlock(&mgr->lock);
+ mutex_unlock(&mgr->payload_lock);
if (mstb)
drm_dp_mst_topology_put_mstb(mstb);
return ret;
@@ -3663,7 +3674,12 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
goto out_fail;
}
- drm_dp_check_mstb_guid(mgr->mst_primary, guid);
+
+ ret = drm_dp_check_mstb_guid(mgr->mst_primary, guid);
+ if (ret) {
+ DRM_DEBUG_KMS("check mstb failed - undocked during suspend?\n");
+ goto out_fail;
+ }
/*
* For the final step of resuming the topology, we need to bring the
@@ -3690,7 +3706,7 @@ static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
{
int len;
u8 replyblock[32];
- int replylen, origlen, curreply;
+ int replylen, curreply;
int ret;
struct drm_dp_sideband_msg_rx *msg;
int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
@@ -3710,7 +3726,6 @@ static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
}
replylen = msg->curchunk_len + msg->curchunk_hdrlen;
- origlen = replylen;
replylen -= len;
curreply = len;
while (replylen > 0) {
@@ -3820,7 +3835,8 @@ drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY)
guid = msg->u.resource_stat.guid;
- mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
+ if (guid)
+ mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
} else {
mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
}
@@ -4007,6 +4023,8 @@ drm_dp_mst_detect_port(struct drm_connector *connector,
switch (port->pdt) {
case DP_PEER_DEVICE_NONE:
case DP_PEER_DEVICE_MST_BRANCHING:
+ if (!port->mcs)
+ ret = connector_status_connected;
break;
case DP_PEER_DEVICE_SST_SINK:
@@ -4603,15 +4621,34 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
int ret;
ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
+ if (ret) {
+ seq_printf(m, "dpcd read failed\n");
+ goto out;
+ }
seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
+
ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
+ if (ret) {
+ seq_printf(m, "faux/mst read failed\n");
+ goto out;
+ }
seq_printf(m, "faux/mst: %*ph\n", 2, buf);
+
ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
+ if (ret) {
+ seq_printf(m, "mst ctrl read failed\n");
+ goto out;
+ }
seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
/* dump the standard OUI branch header */
ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
+ if (ret) {
+ seq_printf(m, "branch oui read failed\n");
+ goto out;
+ }
seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
+
for (i = 0x3; i < 0x8 && buf[i]; i++)
seq_printf(m, "%c", buf[i]);
seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
@@ -4620,6 +4657,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf);
}
+out:
mutex_unlock(&mgr->lock);
}
@@ -4635,13 +4673,25 @@ static void drm_dp_tx_work(struct work_struct *work)
mutex_unlock(&mgr->qlock);
}
+static inline void drm_dp_destroy_connector(struct drm_dp_mst_port *port)
+{
+ if (!port->connector)
+ return;
+
+ if (port->mgr->cbs->destroy_connector) {
+ port->mgr->cbs->destroy_connector(port->mgr, port->connector);
+ } else {
+ drm_connector_unregister(port->connector);
+ drm_connector_put(port->connector);
+ }
+}
+
static inline void
drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
{
- if (port->connector)
- port->mgr->cbs->destroy_connector(port->mgr, port->connector);
+ drm_dp_destroy_connector(port);
- drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE);
+ drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs);
drm_dp_mst_put_port_malloc(port);
}
@@ -5440,7 +5490,8 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
if (drm_dp_read_desc(port->mgr->aux, &desc, true))
return NULL;
- if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
+ if (drm_dp_has_quirk(&desc, 0,
+ DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 &&
port->parent == port->mgr->mst_primary) {
u8 downstreamport;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 7c18a980cd4b..7b1a628d1f6e 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -946,7 +946,8 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
struct drm_driver *driver = dev->driver;
int ret;
- mutex_lock(&drm_global_mutex);
+ if (drm_dev_needs_global_mutex(dev))
+ mutex_lock(&drm_global_mutex);
ret = drm_minor_register(dev, DRM_MINOR_RENDER);
if (ret)
@@ -986,7 +987,8 @@ err_minors:
drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
drm_minor_unregister(dev, DRM_MINOR_RENDER);
out_unlock:
- mutex_unlock(&drm_global_mutex);
+ if (drm_dev_needs_global_mutex(dev))
+ mutex_unlock(&drm_global_mutex);
return ret;
}
EXPORT_SYMBOL(drm_dev_register);
@@ -1079,17 +1081,14 @@ static int drm_stub_open(struct inode *inode, struct file *filp)
DRM_DEBUG("\n");
- mutex_lock(&drm_global_mutex);
minor = drm_minor_acquire(iminor(inode));
- if (IS_ERR(minor)) {
- err = PTR_ERR(minor);
- goto out_unlock;
- }
+ if (IS_ERR(minor))
+ return PTR_ERR(minor);
new_fops = fops_get(minor->dev->driver->fops);
if (!new_fops) {
err = -ENODEV;
- goto out_release;
+ goto out;
}
replace_fops(filp, new_fops);
@@ -1098,10 +1097,9 @@ static int drm_stub_open(struct inode *inode, struct file *filp)
else
err = 0;
-out_release:
+out:
drm_minor_release(minor);
-out_unlock:
- mutex_unlock(&drm_global_mutex);
+
return err;
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 99769d6c9f84..116451101426 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1590,11 +1590,22 @@ static int validate_displayid(u8 *displayid, int length, int idx);
static int drm_edid_block_checksum(const u8 *raw_edid)
{
int i;
- u8 csum = 0;
- for (i = 0; i < EDID_LENGTH; i++)
+ u8 csum = 0, crc = 0;
+
+ for (i = 0; i < EDID_LENGTH - 1; i++)
csum += raw_edid[i];
- return csum;
+ crc = 0x100 - csum;
+
+ return crc;
+}
+
+static bool drm_edid_block_checksum_diff(const u8 *raw_edid, u8 real_checksum)
+{
+ if (raw_edid[EDID_LENGTH - 1] != real_checksum)
+ return true;
+ else
+ return false;
}
static bool drm_edid_is_zero(const u8 *in_edid, int length)
@@ -1652,7 +1663,7 @@ bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid,
}
csum = drm_edid_block_checksum(raw_edid);
- if (csum) {
+ if (drm_edid_block_checksum_diff(raw_edid, csum)) {
if (edid_corrupt)
*edid_corrupt = true;
@@ -1793,6 +1804,11 @@ static void connector_bad_edid(struct drm_connector *connector,
u8 *edid, int num_blocks)
{
int i;
+ u8 num_of_ext = edid[0x7e];
+
+ /* Calculate real checksum for the last edid extension block data */
+ connector->real_edid_checksum =
+ drm_edid_block_checksum(edid + num_of_ext * EDID_LENGTH);
if (connector->bad_edid_counter++ && !drm_debug_enabled(DRM_UT_KMS))
return;
@@ -2196,15 +2212,29 @@ struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_mode_find_dmt);
+static bool is_display_descriptor(const u8 d[18], u8 tag)
+{
+ return d[0] == 0x00 && d[1] == 0x00 &&
+ d[2] == 0x00 && d[3] == tag;
+}
+
+static bool is_detailed_timing_descriptor(const u8 d[18])
+{
+ return d[0] != 0x00 || d[1] != 0x00;
+}
+
typedef void detailed_cb(struct detailed_timing *timing, void *closure);
static void
cea_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure)
{
- int i, n = 0;
+ int i, n;
u8 d = ext[0x02];
u8 *det_base = ext + d;
+ if (d < 4 || d > 127)
+ return;
+
n = (127 - d) / 18;
for (i = 0; i < n; i++)
cb((struct detailed_timing *)(det_base + 18 * i), closure);
@@ -2254,9 +2284,12 @@ static void
is_rb(struct detailed_timing *t, void *data)
{
u8 *r = (u8 *)t;
- if (r[3] == EDID_DETAIL_MONITOR_RANGE)
- if (r[15] & 0x10)
- *(bool *)data = true;
+
+ if (!is_display_descriptor(r, EDID_DETAIL_MONITOR_RANGE))
+ return;
+
+ if (r[15] & 0x10)
+ *(bool *)data = true;
}
/* EDID 1.4 defines this explicitly. For EDID 1.3, we guess, badly. */
@@ -2276,7 +2309,11 @@ static void
find_gtf2(struct detailed_timing *t, void *data)
{
u8 *r = (u8 *)t;
- if (r[3] == EDID_DETAIL_MONITOR_RANGE && r[10] == 0x02)
+
+ if (!is_display_descriptor(r, EDID_DETAIL_MONITOR_RANGE))
+ return;
+
+ if (r[10] == 0x02)
*(u8 **)data = r;
}
@@ -2815,13 +2852,13 @@ do_inferred_modes(struct detailed_timing *timing, void *c)
struct detailed_non_pixel *data = &timing->data.other_data;
struct detailed_data_monitor_range *range = &data->data.range;
- if (data->type != EDID_DETAIL_MONITOR_RANGE)
+ if (!is_display_descriptor((const u8 *)timing, EDID_DETAIL_MONITOR_RANGE))
return;
closure->modes += drm_dmt_modes_for_range(closure->connector,
closure->edid,
timing);
-
+
if (!version_greater(closure->edid, 1, 1))
return; /* GTF not defined yet */
@@ -2894,10 +2931,11 @@ static void
do_established_modes(struct detailed_timing *timing, void *c)
{
struct detailed_mode_closure *closure = c;
- struct detailed_non_pixel *data = &timing->data.other_data;
- if (data->type == EDID_DETAIL_EST_TIMINGS)
- closure->modes += drm_est3_modes(closure->connector, timing);
+ if (!is_display_descriptor((const u8 *)timing, EDID_DETAIL_EST_TIMINGS))
+ return;
+
+ closure->modes += drm_est3_modes(closure->connector, timing);
}
/**
@@ -2946,19 +2984,19 @@ do_standard_modes(struct detailed_timing *timing, void *c)
struct detailed_non_pixel *data = &timing->data.other_data;
struct drm_connector *connector = closure->connector;
struct edid *edid = closure->edid;
+ int i;
- if (data->type == EDID_DETAIL_STD_MODES) {
- int i;
- for (i = 0; i < 6; i++) {
- struct std_timing *std;
- struct drm_display_mode *newmode;
+ if (!is_display_descriptor((const u8 *)timing, EDID_DETAIL_STD_MODES))
+ return;
- std = &data->data.timings[i];
- newmode = drm_mode_std(connector, edid, std);
- if (newmode) {
- drm_mode_probed_add(connector, newmode);
- closure->modes++;
- }
+ for (i = 0; i < 6; i++) {
+ struct std_timing *std = &data->data.timings[i];
+ struct drm_display_mode *newmode;
+
+ newmode = drm_mode_std(connector, edid, std);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ closure->modes++;
}
}
}
@@ -3053,15 +3091,16 @@ static void
do_cvt_mode(struct detailed_timing *timing, void *c)
{
struct detailed_mode_closure *closure = c;
- struct detailed_non_pixel *data = &timing->data.other_data;
- if (data->type == EDID_DETAIL_CVT_3BYTE)
- closure->modes += drm_cvt_modes(closure->connector, timing);
+ if (!is_display_descriptor((const u8 *)timing, EDID_DETAIL_CVT_3BYTE))
+ return;
+
+ closure->modes += drm_cvt_modes(closure->connector, timing);
}
static int
add_cvt_modes(struct drm_connector *connector, struct edid *edid)
-{
+{
struct detailed_mode_closure closure = {
.connector = connector,
.edid = edid,
@@ -3083,27 +3122,28 @@ do_detailed_mode(struct detailed_timing *timing, void *c)
struct detailed_mode_closure *closure = c;
struct drm_display_mode *newmode;
- if (timing->pixel_clock) {
- newmode = drm_mode_detailed(closure->connector->dev,
- closure->edid, timing,
- closure->quirks);
- if (!newmode)
- return;
+ if (!is_detailed_timing_descriptor((const u8 *)timing))
+ return;
- if (closure->preferred)
- newmode->type |= DRM_MODE_TYPE_PREFERRED;
+ newmode = drm_mode_detailed(closure->connector->dev,
+ closure->edid, timing,
+ closure->quirks);
+ if (!newmode)
+ return;
- /*
- * Detailed modes are limited to 10kHz pixel clock resolution,
- * so fix up anything that looks like CEA/HDMI mode, but the clock
- * is just slightly off.
- */
- fixup_detailed_cea_mode_clock(newmode);
+ if (closure->preferred)
+ newmode->type |= DRM_MODE_TYPE_PREFERRED;
- drm_mode_probed_add(closure->connector, newmode);
- closure->modes++;
- closure->preferred = false;
- }
+ /*
+ * Detailed modes are limited to 10kHz pixel clock resolution,
+ * so fix up anything that looks like CEA/HDMI mode, but the clock
+ * is just slightly off.
+ */
+ fixup_detailed_cea_mode_clock(newmode);
+
+ drm_mode_probed_add(closure->connector, newmode);
+ closure->modes++;
+ closure->preferred = false;
}
/*
@@ -3211,7 +3251,7 @@ static u8 *drm_find_cea_extension(const struct edid *edid)
return cea;
}
-static const struct drm_display_mode *cea_mode_for_vic(u8 vic)
+static __always_inline const struct drm_display_mode *cea_mode_for_vic(u8 vic)
{
BUILD_BUG_ON(1 + ARRAY_SIZE(edid_cea_modes_1) - 1 != 127);
BUILD_BUG_ON(193 + ARRAY_SIZE(edid_cea_modes_193) - 1 != 219);
@@ -3953,6 +3993,13 @@ cea_db_tag(const u8 *db)
static int
cea_revision(const u8 *cea)
{
+ /*
+ * FIXME is this correct for the DispID variant?
+ * The DispID spec doesn't really specify whether
+ * this is the revision of the CEA extension or
+ * the DispID CEA data block. And the only value
+ * given as an example is 0.
+ */
return cea[1];
}
@@ -3977,6 +4024,10 @@ cea_db_offsets(const u8 *cea, int *start, int *end)
* no non-DTD data.
*/
if (cea[0] == DATA_BLOCK_CTA) {
+ /*
+ * for_each_displayid_db() has already verified
+ * that these stay within expected bounds.
+ */
*start = 3;
*end = *start + cea[2];
} else if (cea[0] == CEA_EXT) {
@@ -4282,8 +4333,10 @@ drm_parse_hdmi_vsdb_audio(struct drm_connector *connector, const u8 *db)
static void
monitor_name(struct detailed_timing *t, void *data)
{
- if (t->data.other_data.type == EDID_DETAIL_MONITOR_NAME)
- *(u8 **)data = t->data.other_data.data.str.str;
+ if (!is_display_descriptor((const u8 *)t, EDID_DETAIL_MONITOR_NAME))
+ return;
+
+ *(u8 **)data = t->data.other_data.data.str.str;
}
static int get_monitor_name(struct edid *edid, char name[13])
@@ -4316,7 +4369,7 @@ void drm_edid_get_monitor_name(struct edid *edid, char *name, int bufsize)
{
int name_length;
char buf[13];
-
+
if (bufsize <= 0)
return;
@@ -4381,6 +4434,7 @@ static void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
if (cea_revision(cea) >= 3) {
int i, start, end;
+ int sad_count;
if (cea_db_offsets(cea, &start, &end)) {
start = 0;
@@ -4392,8 +4446,6 @@ static void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
dbl = cea_db_payload_len(db);
switch (cea_db_tag(db)) {
- int sad_count;
-
case AUDIO_BLOCK:
/* Audio Data Block, contains SADs */
sad_count = min(dbl / 3, 15 - total_sad_count);
@@ -4594,6 +4646,9 @@ EXPORT_SYMBOL(drm_av_sync_delay);
*
* Parse the CEA extension according to CEA-861-B.
*
+ * Drivers that have added the modes parsed from EDID to drm_display_info
+ * should use &drm_display_info.is_hdmi instead of calling this function.
+ *
* Return: True if the monitor is HDMI, false if not or unknown.
*/
bool drm_detect_hdmi_monitor(struct edid *edid)
@@ -4828,6 +4883,8 @@ drm_parse_hdmi_vsdb_video(struct drm_connector *connector, const u8 *db)
struct drm_display_info *info = &connector->display_info;
u8 len = cea_db_payload_len(db);
+ info->is_hdmi = true;
+
if (len >= 6)
info->dvi_dual = db[6] & 1;
if (len >= 7)
@@ -4880,6 +4937,47 @@ static void drm_parse_cea_ext(struct drm_connector *connector,
}
}
+static
+void get_monitor_range(struct detailed_timing *timing,
+ void *info_monitor_range)
+{
+ struct drm_monitor_range_info *monitor_range = info_monitor_range;
+ const struct detailed_non_pixel *data = &timing->data.other_data;
+ const struct detailed_data_monitor_range *range = &data->data.range;
+
+ if (!is_display_descriptor((const u8 *)timing, EDID_DETAIL_MONITOR_RANGE))
+ return;
+
+ /*
+ * Check for flag range limits only. If flag == 1 then
+ * no additional timing information provided.
+ * Default GTF, GTF Secondary curve and CVT are not
+ * supported
+ */
+ if (range->flags != DRM_EDID_RANGE_LIMITS_ONLY_FLAG)
+ return;
+
+ monitor_range->min_vfreq = range->min_vfreq;
+ monitor_range->max_vfreq = range->max_vfreq;
+}
+
+static
+void drm_get_monitor_range(struct drm_connector *connector,
+ const struct edid *edid)
+{
+ struct drm_display_info *info = &connector->display_info;
+
+ if (!version_greater(edid, 1, 1))
+ return;
+
+ drm_for_each_detailed_block((u8 *)edid, get_monitor_range,
+ &info->monitor_range);
+
+ DRM_DEBUG_KMS("Supported Monitor Refresh rate range is %d Hz - %d Hz\n",
+ info->monitor_range.min_vfreq,
+ info->monitor_range.max_vfreq);
+}
+
/* A connector has no EDID information, so we've got no EDID to compute quirks from. Reset
* all of the values which would have been set from EDID
*/
@@ -4896,11 +4994,13 @@ drm_reset_display_info(struct drm_connector *connector)
info->cea_rev = 0;
info->max_tmds_clock = 0;
info->dvi_dual = false;
+ info->is_hdmi = false;
info->has_hdmi_infoframe = false;
info->rgb_quant_range_selectable = false;
memset(&info->hdmi, 0, sizeof(info->hdmi));
info->non_desktop = 0;
+ memset(&info->monitor_range, 0, sizeof(info->monitor_range));
}
u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid)
@@ -4916,6 +5016,8 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
info->non_desktop = !!(quirks & EDID_QUIRK_NON_DESKTOP);
+ drm_get_monitor_range(connector, edid);
+
DRM_DEBUG_KMS("non_desktop set to %d\n", info->non_desktop);
if (edid->revision < 3)
@@ -5396,14 +5498,11 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
{
enum hdmi_picture_aspect picture_aspect;
u8 vic, hdmi_vic;
- int err;
if (!frame || !mode)
return -EINVAL;
- err = hdmi_avi_infoframe_init(frame);
- if (err < 0)
- return err;
+ hdmi_avi_infoframe_init(frame);
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
frame->pixel_repeat = 1;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 4c7cbce7bae7..a9771de4d17e 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -250,17 +250,7 @@ int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
return 0;
mutex_lock(&fb_helper->lock);
- /*
- * TODO:
- * We should bail out here if there is a master by dropping _force.
- * Currently these igt tests fail if we do that:
- * - kms_fbcon_fbt@psr
- * - kms_fbcon_fbt@psr-suspend
- *
- * So first these tests need to be fixed so they drop master or don't
- * have an fd open.
- */
- ret = drm_client_modeset_commit_force(&fb_helper->client);
+ ret = drm_client_modeset_commit(&fb_helper->client);
do_delayed = fb_helper->delayed_hotplug;
if (do_delayed)
@@ -294,7 +284,7 @@ static bool drm_fb_helper_force_kernel_mode(void)
continue;
mutex_lock(&helper->lock);
- ret = drm_client_modeset_commit_force(&helper->client);
+ ret = drm_client_modeset_commit_locked(&helper->client);
if (ret)
error = true;
mutex_unlock(&helper->lock);
@@ -460,7 +450,6 @@ EXPORT_SYMBOL(drm_fb_helper_prepare);
* drm_fb_helper_init - initialize a &struct drm_fb_helper
* @dev: drm device
* @fb_helper: driver-allocated fbdev helper structure to initialize
- * @max_conn_count: max connector count (not used)
*
* This allocates the structures for the fbdev helper with the given limits.
* Note that this won't yet touch the hardware (through the driver interfaces)
@@ -473,8 +462,7 @@ EXPORT_SYMBOL(drm_fb_helper_prepare);
* Zero if everything went ok, nonzero otherwise.
*/
int drm_fb_helper_init(struct drm_device *dev,
- struct drm_fb_helper *fb_helper,
- int max_conn_count)
+ struct drm_fb_helper *fb_helper)
{
int ret;
@@ -1357,7 +1345,7 @@ static int pan_display_atomic(struct fb_var_screeninfo *var,
pan_set(fb_helper, var->xoffset, var->yoffset);
- ret = drm_client_modeset_commit_force(&fb_helper->client);
+ ret = drm_client_modeset_commit_locked(&fb_helper->client);
if (!ret) {
info->var.xoffset = var->xoffset;
info->var.yoffset = var->yoffset;
@@ -2135,7 +2123,7 @@ static int drm_fbdev_client_hotplug(struct drm_client_dev *client)
drm_fb_helper_prepare(dev, fb_helper, &drm_fb_helper_generic_funcs);
- ret = drm_fb_helper_init(dev, fb_helper, 0);
+ ret = drm_fb_helper_init(dev, fb_helper);
if (ret)
goto err;
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 92d16724f949..c4c704e01961 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -51,6 +51,37 @@
/* from BKL pushdown */
DEFINE_MUTEX(drm_global_mutex);
+bool drm_dev_needs_global_mutex(struct drm_device *dev)
+{
+ /*
+ * Legacy drivers rely on all kinds of BKL locking semantics, don't
+ * bother. They also still need BKL locking for their ioctls, so better
+ * safe than sorry.
+ */
+ if (drm_core_check_feature(dev, DRIVER_LEGACY))
+ return true;
+
+ /*
+ * The deprecated ->load callback must be called after the driver is
+ * already registered. This means such drivers rely on the BKL to make
+ * sure an open can't proceed until the driver is actually fully set up.
+ * Similar hilarity holds for the unload callback.
+ */
+ if (dev->driver->load || dev->driver->unload)
+ return true;
+
+ /*
+ * Drivers with the lastclose callback assume that it's synchronized
+ * against concurrent opens, which again needs the BKL. The proper fix
+ * is to use the drm_client infrastructure with proper locking for each
+ * client.
+ */
+ if (dev->driver->lastclose)
+ return true;
+
+ return false;
+}
+
/**
* DOC: file operations
*
@@ -220,7 +251,7 @@ void drm_file_free(struct drm_file *file)
DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
task_pid_nr(current),
(long)old_encode_dev(file->minor->kdev->devt),
- dev->open_count);
+ atomic_read(&dev->open_count));
if (drm_core_check_feature(dev, DRIVER_LEGACY) &&
dev->driver->preclose)
@@ -379,7 +410,10 @@ int drm_open(struct inode *inode, struct file *filp)
return PTR_ERR(minor);
dev = minor->dev;
- if (!dev->open_count++)
+ if (drm_dev_needs_global_mutex(dev))
+ mutex_lock(&drm_global_mutex);
+
+ if (!atomic_fetch_inc(&dev->open_count))
need_setup = 1;
/* share address_space across all char-devs of a single device */
@@ -395,10 +429,16 @@ int drm_open(struct inode *inode, struct file *filp)
goto err_undo;
}
}
+
+ if (drm_dev_needs_global_mutex(dev))
+ mutex_unlock(&drm_global_mutex);
+
return 0;
err_undo:
- dev->open_count--;
+ atomic_dec(&dev->open_count);
+ if (drm_dev_needs_global_mutex(dev))
+ mutex_unlock(&drm_global_mutex);
drm_minor_release(minor);
return retcode;
}
@@ -438,16 +478,18 @@ int drm_release(struct inode *inode, struct file *filp)
struct drm_minor *minor = file_priv->minor;
struct drm_device *dev = minor->dev;
- mutex_lock(&drm_global_mutex);
+ if (drm_dev_needs_global_mutex(dev))
+ mutex_lock(&drm_global_mutex);
- DRM_DEBUG("open_count = %d\n", dev->open_count);
+ DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count));
drm_close_helper(filp);
- if (!--dev->open_count)
+ if (atomic_dec_and_test(&dev->open_count))
drm_lastclose(dev);
- mutex_unlock(&drm_global_mutex);
+ if (drm_dev_needs_global_mutex(dev))
+ mutex_unlock(&drm_global_mutex);
drm_minor_release(minor);
@@ -456,6 +498,40 @@ int drm_release(struct inode *inode, struct file *filp)
EXPORT_SYMBOL(drm_release);
/**
+ * drm_release_noglobal - release method for DRM file
+ * @inode: device inode
+ * @filp: file pointer.
+ *
+ * This function may be used by drivers as their &file_operations.release
+ * method. It frees any resources associated with the open file prior to taking
+ * the drm_global_mutex, which then calls the &drm_driver.postclose driver
+ * callback. If this is the last open file for the DRM device also proceeds to
+ * call the &drm_driver.lastclose driver callback.
+ *
+ * RETURNS:
+ *
+ * Always succeeds and returns 0.
+ */
+int drm_release_noglobal(struct inode *inode, struct file *filp)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct drm_minor *minor = file_priv->minor;
+ struct drm_device *dev = minor->dev;
+
+ drm_close_helper(filp);
+
+ if (atomic_dec_and_mutex_lock(&dev->open_count, &drm_global_mutex)) {
+ drm_lastclose(dev);
+ mutex_unlock(&drm_global_mutex);
+ }
+
+ drm_minor_release(minor);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_release_noglobal);
+
+/**
* drm_read - read method for DRM file
* @filp: file pointer
* @buffer: userspace destination pointer for the read
diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
index 0897cb9aeaff..3b818f2b2392 100644
--- a/drivers/gpu/drm/drm_format_helper.c
+++ b/drivers/gpu/drm/drm_format_helper.c
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+// SPDX-License-Identifier: GPL-2.0 or MIT
/*
* Copyright (C) 2016 Noralf Trønnes
*
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 57564318ceea..57ac94ce9b9e 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -31,6 +31,7 @@
#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem.h>
#include <drm/drm_print.h>
#include <drm/drm_util.h>
@@ -548,7 +549,128 @@ int drm_mode_getfb(struct drm_device *dev,
out:
drm_framebuffer_put(fb);
+ return ret;
+}
+
+/**
+ * drm_mode_getfb2 - get extended FB info
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Lookup the FB given its ID and return info about it.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_getfb2_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_fb_cmd2 *r = data;
+ struct drm_framebuffer *fb;
+ unsigned int i;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ fb = drm_framebuffer_lookup(dev, file_priv, r->fb_id);
+ if (!fb)
+ return -ENOENT;
+
+ /* For multi-plane framebuffers, we require the driver to place the
+ * GEM objects directly in the drm_framebuffer. For single-plane
+ * framebuffers, we can fall back to create_handle.
+ */
+ if (!fb->obj[0] &&
+ (fb->format->num_planes > 1 || !fb->funcs->create_handle)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ r->height = fb->height;
+ r->width = fb->width;
+ r->pixel_format = fb->format->format;
+
+ r->flags = 0;
+ if (dev->mode_config.allow_fb_modifiers)
+ r->flags |= DRM_MODE_FB_MODIFIERS;
+
+ for (i = 0; i < ARRAY_SIZE(r->handles); i++) {
+ r->handles[i] = 0;
+ r->pitches[i] = 0;
+ r->offsets[i] = 0;
+ r->modifier[i] = 0;
+ }
+
+ for (i = 0; i < fb->format->num_planes; i++) {
+ r->pitches[i] = fb->pitches[i];
+ r->offsets[i] = fb->offsets[i];
+ if (dev->mode_config.allow_fb_modifiers)
+ r->modifier[i] = fb->modifier;
+ }
+
+ /* GET_FB2() is an unprivileged ioctl so we must not return a
+ * buffer-handle to non master/root processes! To match GET_FB()
+ * just return invalid handles (0) for non masters/root
+ * rather than making GET_FB2() privileged.
+ */
+ if (!drm_is_current_master(file_priv) && !capable(CAP_SYS_ADMIN)) {
+ ret = 0;
+ goto out;
+ }
+ for (i = 0; i < fb->format->num_planes; i++) {
+ int j;
+
+ /* If we reuse the same object for multiple planes, also
+ * return the same handle.
+ */
+ for (j = 0; j < i; j++) {
+ if (fb->obj[i] == fb->obj[j]) {
+ r->handles[i] = r->handles[j];
+ break;
+ }
+ }
+
+ if (r->handles[i])
+ continue;
+
+ if (fb->obj[i]) {
+ ret = drm_gem_handle_create(file_priv, fb->obj[i],
+ &r->handles[i]);
+ } else {
+ WARN_ON(i > 0);
+ ret = fb->funcs->create_handle(fb, file_priv,
+ &r->handles[i]);
+ }
+
+ if (ret != 0)
+ goto out;
+ }
+
+out:
+ if (ret != 0) {
+ /* Delete any previously-created handles on failure. */
+ for (i = 0; i < ARRAY_SIZE(r->handles); i++) {
+ int j;
+
+ if (r->handles[i])
+ drm_gem_handle_delete(file_priv, r->handles[i]);
+
+ /* Zero out any handles identical to the one we just
+ * deleted.
+ */
+ for (j = i + 1; j < ARRAY_SIZE(r->handles); j++) {
+ if (r->handles[j] == r->handles[i])
+ r->handles[j] = 0;
+ }
+ }
+ }
+
+ drm_framebuffer_put(fb);
return ret;
}
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index a9e4a610445a..37627d06fb06 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -218,7 +218,7 @@ drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
struct drm_device *dev = obj->dev;
bool final = false;
- if (WARN_ON(obj->handle_count == 0))
+ if (WARN_ON(READ_ONCE(obj->handle_count) == 0))
return;
/*
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index a421a2eed48a..df31e5782eed 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -254,11 +254,16 @@ static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
if (ret)
goto err_zero_use;
- if (obj->import_attach)
+ if (obj->import_attach) {
shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
- else
+ } else {
+ pgprot_t prot = PAGE_KERNEL;
+
+ if (!shmem->map_cached)
+ prot = pgprot_writecombine(prot);
shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
- VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+ VM_MAP, prot);
+ }
if (!shmem->vaddr) {
DRM_DEBUG_KMS("Failed to vmap pages\n");
@@ -540,8 +545,9 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
}
vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
- vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
- vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ if (!shmem->map_cached)
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
vma->vm_ops = &drm_gem_shmem_vm_ops;
return 0;
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index a4863326061a..92a11bb42365 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -1141,3 +1141,64 @@ void drm_vram_helper_release_mm(struct drm_device *dev)
dev->vram_mm = NULL;
}
EXPORT_SYMBOL(drm_vram_helper_release_mm);
+
+/*
+ * Mode-config helpers
+ */
+
+static enum drm_mode_status
+drm_vram_helper_mode_valid_internal(struct drm_device *dev,
+ const struct drm_display_mode *mode,
+ unsigned long max_bpp)
+{
+ struct drm_vram_mm *vmm = dev->vram_mm;
+ unsigned long fbsize, fbpages, max_fbpages;
+
+ if (WARN_ON(!dev->vram_mm))
+ return MODE_BAD;
+
+ max_fbpages = (vmm->vram_size / 2) >> PAGE_SHIFT;
+
+ fbsize = mode->hdisplay * mode->vdisplay * max_bpp;
+ fbpages = DIV_ROUND_UP(fbsize, PAGE_SIZE);
+
+ if (fbpages > max_fbpages)
+ return MODE_MEM;
+
+ return MODE_OK;
+}
+
+/**
+ * drm_vram_helper_mode_valid - Tests if a display mode's
+ * framebuffer fits into the available video memory.
+ * @dev: the DRM device
+ * @mode: the mode to test
+ *
+ * This function tests if enough video memory is available for using the
+ * specified display mode. Atomic modesetting requires importing the
+ * designated framebuffer into video memory before evicting the active
+ * one. Hence, any framebuffer may consume at most half of the available
+ * VRAM. Display modes that require a larger framebuffer can not be used,
+ * even if the CRTC does support them. Each framebuffer is assumed to
+ * have 32-bit color depth.
+ *
+ * Note:
+ * The function can only test if the display mode is supported in
+ * general. If there are too many framebuffers pinned to video memory,
+ * a display mode may still not be usable in practice. The color depth of
+ * 32-bit fits all current use case. A more flexible test can be added
+ * when necessary.
+ *
+ * Returns:
+ * MODE_OK if the display mode is supported, or an error code of type
+ * enum drm_mode_status otherwise.
+ */
+enum drm_mode_status
+drm_vram_helper_mode_valid(struct drm_device *dev,
+ const struct drm_display_mode *mode)
+{
+ static const unsigned long max_bpp = 4; /* DRM_FORMAT_XRGB8888 */
+
+ return drm_vram_helper_mode_valid_internal(dev, mode, max_bpp);
+}
+EXPORT_SYMBOL(drm_vram_helper_mode_valid);
diff --git a/drivers/gpu/drm/drm_hdcp.c b/drivers/gpu/drm/drm_hdcp.c
index 9191633a3c43..7f386adcf872 100644
--- a/drivers/gpu/drm/drm_hdcp.c
+++ b/drivers/gpu/drm/drm_hdcp.c
@@ -23,14 +23,6 @@
#include "drm_internal.h"
-static struct hdcp_srm {
- u32 revoked_ksv_cnt;
- u8 *revoked_ksv_list;
-
- /* Mutex to protect above struct member */
- struct mutex mutex;
-} *srm_data;
-
static inline void drm_hdcp_print_ksv(const u8 *ksv)
{
DRM_DEBUG("\t%#02x, %#02x, %#02x, %#02x, %#02x\n",
@@ -60,11 +52,11 @@ static u32 drm_hdcp_get_revoked_ksv_count(const u8 *buf, u32 vrls_length)
return ksv_count;
}
-static u32 drm_hdcp_get_revoked_ksvs(const u8 *buf, u8 *revoked_ksv_list,
+static u32 drm_hdcp_get_revoked_ksvs(const u8 *buf, u8 **revoked_ksv_list,
u32 vrls_length)
{
- u32 parsed_bytes = 0, ksv_count = 0;
u32 vrl_ksv_cnt, vrl_ksv_sz, vrl_idx = 0;
+ u32 parsed_bytes = 0, ksv_count = 0;
do {
vrl_ksv_cnt = *buf;
@@ -74,10 +66,10 @@ static u32 drm_hdcp_get_revoked_ksvs(const u8 *buf, u8 *revoked_ksv_list,
DRM_DEBUG("vrl: %d, Revoked KSVs: %d\n", vrl_idx++,
vrl_ksv_cnt);
- memcpy(revoked_ksv_list, buf, vrl_ksv_sz);
+ memcpy((*revoked_ksv_list) + (ksv_count * DRM_HDCP_KSV_LEN),
+ buf, vrl_ksv_sz);
ksv_count += vrl_ksv_cnt;
- revoked_ksv_list += vrl_ksv_sz;
buf += vrl_ksv_sz;
parsed_bytes += (vrl_ksv_sz + 1);
@@ -91,7 +83,8 @@ static inline u32 get_vrl_length(const u8 *buf)
return drm_hdcp_be24_to_cpu(buf);
}
-static int drm_hdcp_parse_hdcp1_srm(const u8 *buf, size_t count)
+static int drm_hdcp_parse_hdcp1_srm(const u8 *buf, size_t count,
+ u8 **revoked_ksv_list, u32 *revoked_ksv_cnt)
{
struct hdcp_srm_header *header;
u32 vrl_length, ksv_count;
@@ -131,29 +124,28 @@ static int drm_hdcp_parse_hdcp1_srm(const u8 *buf, size_t count)
ksv_count = drm_hdcp_get_revoked_ksv_count(buf, vrl_length);
if (!ksv_count) {
DRM_DEBUG("Revoked KSV count is 0\n");
- return count;
+ return 0;
}
- kfree(srm_data->revoked_ksv_list);
- srm_data->revoked_ksv_list = kcalloc(ksv_count, DRM_HDCP_KSV_LEN,
- GFP_KERNEL);
- if (!srm_data->revoked_ksv_list) {
+ *revoked_ksv_list = kcalloc(ksv_count, DRM_HDCP_KSV_LEN, GFP_KERNEL);
+ if (!*revoked_ksv_list) {
DRM_ERROR("Out of Memory\n");
return -ENOMEM;
}
- if (drm_hdcp_get_revoked_ksvs(buf, srm_data->revoked_ksv_list,
+ if (drm_hdcp_get_revoked_ksvs(buf, revoked_ksv_list,
vrl_length) != ksv_count) {
- srm_data->revoked_ksv_cnt = 0;
- kfree(srm_data->revoked_ksv_list);
+ *revoked_ksv_cnt = 0;
+ kfree(*revoked_ksv_list);
return -EINVAL;
}
- srm_data->revoked_ksv_cnt = ksv_count;
- return count;
+ *revoked_ksv_cnt = ksv_count;
+ return 0;
}
-static int drm_hdcp_parse_hdcp2_srm(const u8 *buf, size_t count)
+static int drm_hdcp_parse_hdcp2_srm(const u8 *buf, size_t count,
+ u8 **revoked_ksv_list, u32 *revoked_ksv_cnt)
{
struct hdcp_srm_header *header;
u32 vrl_length, ksv_count, ksv_sz;
@@ -195,13 +187,11 @@ static int drm_hdcp_parse_hdcp2_srm(const u8 *buf, size_t count)
ksv_count = (*buf << 2) | DRM_HDCP_2_KSV_COUNT_2_LSBITS(*(buf + 1));
if (!ksv_count) {
DRM_DEBUG("Revoked KSV count is 0\n");
- return count;
+ return 0;
}
- kfree(srm_data->revoked_ksv_list);
- srm_data->revoked_ksv_list = kcalloc(ksv_count, DRM_HDCP_KSV_LEN,
- GFP_KERNEL);
- if (!srm_data->revoked_ksv_list) {
+ *revoked_ksv_list = kcalloc(ksv_count, DRM_HDCP_KSV_LEN, GFP_KERNEL);
+ if (!*revoked_ksv_list) {
DRM_ERROR("Out of Memory\n");
return -ENOMEM;
}
@@ -210,10 +200,10 @@ static int drm_hdcp_parse_hdcp2_srm(const u8 *buf, size_t count)
buf += DRM_HDCP_2_NO_OF_DEV_PLUS_RESERVED_SZ;
DRM_DEBUG("Revoked KSVs: %d\n", ksv_count);
- memcpy(srm_data->revoked_ksv_list, buf, ksv_sz);
+ memcpy(*revoked_ksv_list, buf, ksv_sz);
- srm_data->revoked_ksv_cnt = ksv_count;
- return count;
+ *revoked_ksv_cnt = ksv_count;
+ return 0;
}
static inline bool is_srm_version_hdcp1(const u8 *buf)
@@ -226,22 +216,27 @@ static inline bool is_srm_version_hdcp2(const u8 *buf)
return *buf == (u8)(DRM_HDCP_2_SRM_ID << 4 | DRM_HDCP_2_INDICATOR);
}
-static void drm_hdcp_srm_update(const u8 *buf, size_t count)
+static int drm_hdcp_srm_update(const u8 *buf, size_t count,
+ u8 **revoked_ksv_list, u32 *revoked_ksv_cnt)
{
if (count < sizeof(struct hdcp_srm_header))
- return;
+ return -EINVAL;
if (is_srm_version_hdcp1(buf))
- drm_hdcp_parse_hdcp1_srm(buf, count);
+ return drm_hdcp_parse_hdcp1_srm(buf, count, revoked_ksv_list,
+ revoked_ksv_cnt);
else if (is_srm_version_hdcp2(buf))
- drm_hdcp_parse_hdcp2_srm(buf, count);
+ return drm_hdcp_parse_hdcp2_srm(buf, count, revoked_ksv_list,
+ revoked_ksv_cnt);
+ else
+ return -EINVAL;
}
-static void drm_hdcp_request_srm(struct drm_device *drm_dev)
+static int drm_hdcp_request_srm(struct drm_device *drm_dev,
+ u8 **revoked_ksv_list, u32 *revoked_ksv_cnt)
{
char fw_name[36] = "display_hdcp_srm.bin";
const struct firmware *fw;
-
int ret;
ret = request_firmware_direct(&fw, (const char *)fw_name,
@@ -250,10 +245,12 @@ static void drm_hdcp_request_srm(struct drm_device *drm_dev)
goto exit;
if (fw->size && fw->data)
- drm_hdcp_srm_update(fw->data, fw->size);
+ ret = drm_hdcp_srm_update(fw->data, fw->size, revoked_ksv_list,
+ revoked_ksv_cnt);
exit:
release_firmware(fw);
+ return ret;
}
/**
@@ -279,71 +276,34 @@ exit:
* https://www.digital-cp.com/sites/default/files/specifications/HDCP%20on%20HDMI%20Specification%20Rev2_2_Final1.pdf
*
* Returns:
- * TRUE on any of the KSV is revoked, else FALSE.
+ * Count of the revoked KSVs or -ve error number incase of the failure.
*/
-bool drm_hdcp_check_ksvs_revoked(struct drm_device *drm_dev, u8 *ksvs,
- u32 ksv_count)
+int drm_hdcp_check_ksvs_revoked(struct drm_device *drm_dev, u8 *ksvs,
+ u32 ksv_count)
{
- u32 rev_ksv_cnt, cnt, i, j;
- u8 *rev_ksv_list;
-
- if (!srm_data)
- return false;
-
- mutex_lock(&srm_data->mutex);
- drm_hdcp_request_srm(drm_dev);
-
- rev_ksv_cnt = srm_data->revoked_ksv_cnt;
- rev_ksv_list = srm_data->revoked_ksv_list;
-
- /* If the Revoked ksv list is empty */
- if (!rev_ksv_cnt || !rev_ksv_list) {
- mutex_unlock(&srm_data->mutex);
- return false;
- }
-
- for (cnt = 0; cnt < ksv_count; cnt++) {
- rev_ksv_list = srm_data->revoked_ksv_list;
- for (i = 0; i < rev_ksv_cnt; i++) {
- for (j = 0; j < DRM_HDCP_KSV_LEN; j++)
- if (ksvs[j] != rev_ksv_list[j]) {
- break;
- } else if (j == (DRM_HDCP_KSV_LEN - 1)) {
- DRM_DEBUG("Revoked KSV is ");
- drm_hdcp_print_ksv(ksvs);
- mutex_unlock(&srm_data->mutex);
- return true;
- }
- /* Move the offset to next KSV in the revoked list */
- rev_ksv_list += DRM_HDCP_KSV_LEN;
- }
-
- /* Iterate to next ksv_offset */
- ksvs += DRM_HDCP_KSV_LEN;
- }
- mutex_unlock(&srm_data->mutex);
- return false;
+ u32 revoked_ksv_cnt = 0, i, j;
+ u8 *revoked_ksv_list = NULL;
+ int ret = 0;
+
+ ret = drm_hdcp_request_srm(drm_dev, &revoked_ksv_list,
+ &revoked_ksv_cnt);
+
+ /* revoked_ksv_cnt will be zero when above function failed */
+ for (i = 0; i < revoked_ksv_cnt; i++)
+ for (j = 0; j < ksv_count; j++)
+ if (!memcmp(&ksvs[j * DRM_HDCP_KSV_LEN],
+ &revoked_ksv_list[i * DRM_HDCP_KSV_LEN],
+ DRM_HDCP_KSV_LEN)) {
+ DRM_DEBUG("Revoked KSV is ");
+ drm_hdcp_print_ksv(&ksvs[j * DRM_HDCP_KSV_LEN]);
+ ret++;
+ }
+
+ kfree(revoked_ksv_list);
+ return ret;
}
EXPORT_SYMBOL_GPL(drm_hdcp_check_ksvs_revoked);
-int drm_setup_hdcp_srm(struct class *drm_class)
-{
- srm_data = kzalloc(sizeof(*srm_data), GFP_KERNEL);
- if (!srm_data)
- return -ENOMEM;
- mutex_init(&srm_data->mutex);
-
- return 0;
-}
-
-void drm_teardown_hdcp_srm(struct class *drm_class)
-{
- if (srm_data) {
- kfree(srm_data->revoked_ksv_list);
- kfree(srm_data);
- }
-}
-
static struct drm_prop_enum_list drm_cp_enum_list[] = {
{ DRM_MODE_CONTENT_PROTECTION_UNDESIRED, "Undesired" },
{ DRM_MODE_CONTENT_PROTECTION_DESIRED, "Desired" },
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 6937bf923f05..5714a78365ac 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -41,6 +41,7 @@ struct drm_printer;
/* drm_file.c */
extern struct mutex drm_global_mutex;
+bool drm_dev_needs_global_mutex(struct drm_device *dev);
struct drm_file *drm_file_alloc(struct drm_minor *minor);
void drm_file_free(struct drm_file *file);
void drm_lastclose(struct drm_device *dev);
@@ -235,7 +236,3 @@ int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
void drm_framebuffer_print_info(struct drm_printer *p, unsigned int indent,
const struct drm_framebuffer *fb);
int drm_framebuffer_debugfs_init(struct drm_minor *minor);
-
-/* drm_hdcp.c */
-int drm_setup_hdcp_srm(struct class *drm_class);
-void drm_teardown_hdcp_srm(struct class *drm_class);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 5afb39688b55..9e41972c4bbc 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -671,6 +671,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_connector_property_set_ioctl, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB2, drm_mode_getfb2_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb_ioctl, 0),
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 03bce566a8c3..588be45abd7a 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -111,10 +111,6 @@ int drm_irq_install(struct drm_device *dev, int irq)
if (irq == 0)
return -EINVAL;
- /* Driver must have been initialized */
- if (!dev->dev_private)
- return -EINVAL;
-
if (dev->irq_enabled)
return -EBUSY;
dev->irq_enabled = true;
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index 2c79e8199e3c..f16eefbf2829 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -46,7 +46,7 @@
static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
-/**
+/*
* Take the heavyweight lock.
*
* \param lock lock pointer.
@@ -93,7 +93,7 @@ int drm_lock_take(struct drm_lock_data *lock_data,
return 0;
}
-/**
+/*
* This takes a lock forcibly and hands it to context. Should ONLY be used
* inside *_unlock to give lock to kernel before calling *_dma_schedule.
*
@@ -150,7 +150,7 @@ static int drm_legacy_lock_free(struct drm_lock_data *lock_data,
return 0;
}
-/**
+/*
* Lock ioctl.
*
* \param inode device inode.
@@ -243,7 +243,7 @@ int drm_legacy_lock(struct drm_device *dev, void *data,
return 0;
}
-/**
+/*
* Unlock ioctl.
*
* \param inode device inode.
@@ -275,7 +275,7 @@ int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_
return 0;
}
-/**
+/*
* This function returns immediately and takes the hw lock
* with the kernel context if it is free, otherwise it gets the highest priority when and if
* it is eventually released.
@@ -287,7 +287,6 @@ int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_
* This should be sufficient to wait for GPU idle without
* having to worry about starvation.
*/
-
void drm_legacy_idlelock_take(struct drm_lock_data *lock_data)
{
int ret;
diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
index 16bff1be4b8a..558baf989f5a 100644
--- a/drivers/gpu/drm/drm_mipi_dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@ -24,7 +24,6 @@
#include <drm/drm_modes.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
-#include <drm/drm_vblank.h>
#include <video/mipi_display.h>
#define MIPI_DBI_MAX_SPI_READ_SPEED 2000000 /* 2MHz */
@@ -238,6 +237,23 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
}
EXPORT_SYMBOL(mipi_dbi_buf_copy);
+static void mipi_dbi_set_window_address(struct mipi_dbi_dev *dbidev,
+ unsigned int xs, unsigned int xe,
+ unsigned int ys, unsigned int ye)
+{
+ struct mipi_dbi *dbi = &dbidev->dbi;
+
+ xs += dbidev->left_offset;
+ xe += dbidev->left_offset;
+ ys += dbidev->top_offset;
+ ye += dbidev->top_offset;
+
+ mipi_dbi_command(dbi, MIPI_DCS_SET_COLUMN_ADDRESS, (xs >> 8) & 0xff,
+ xs & 0xff, (xe >> 8) & 0xff, xe & 0xff);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_PAGE_ADDRESS, (ys >> 8) & 0xff,
+ ys & 0xff, (ye >> 8) & 0xff, ye & 0xff);
+}
+
static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
{
struct drm_gem_object *gem = drm_gem_fb_get_obj(fb, 0);
@@ -271,12 +287,8 @@ static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
tr = cma_obj->vaddr;
}
- mipi_dbi_command(dbi, MIPI_DCS_SET_COLUMN_ADDRESS,
- (rect->x1 >> 8) & 0xff, rect->x1 & 0xff,
- ((rect->x2 - 1) >> 8) & 0xff, (rect->x2 - 1) & 0xff);
- mipi_dbi_command(dbi, MIPI_DCS_SET_PAGE_ADDRESS,
- (rect->y1 >> 8) & 0xff, rect->y1 & 0xff,
- ((rect->y2 - 1) >> 8) & 0xff, (rect->y2 - 1) & 0xff);
+ mipi_dbi_set_window_address(dbidev, rect->x1, rect->x2 - 1, rect->y1,
+ rect->y2 - 1);
ret = mipi_dbi_command_buf(dbi, MIPI_DCS_WRITE_MEMORY_START, tr,
width * height * 2);
@@ -299,18 +311,10 @@ void mipi_dbi_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct drm_plane_state *state = pipe->plane.state;
- struct drm_crtc *crtc = &pipe->crtc;
struct drm_rect rect;
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
mipi_dbi_fb_dirty(state->fb, &rect);
-
- if (crtc->state->event) {
- spin_lock_irq(&crtc->dev->event_lock);
- drm_crtc_send_vblank_event(crtc, crtc->state->event);
- spin_unlock_irq(&crtc->dev->event_lock);
- crtc->state->event = NULL;
- }
}
EXPORT_SYMBOL(mipi_dbi_pipe_update);
@@ -366,10 +370,7 @@ static void mipi_dbi_blank(struct mipi_dbi_dev *dbidev)
memset(dbidev->tx_buf, 0, len);
- mipi_dbi_command(dbi, MIPI_DCS_SET_COLUMN_ADDRESS, 0, 0,
- ((width - 1) >> 8) & 0xFF, (width - 1) & 0xFF);
- mipi_dbi_command(dbi, MIPI_DCS_SET_PAGE_ADDRESS, 0, 0,
- ((height - 1) >> 8) & 0xFF, (height - 1) & 0xFF);
+ mipi_dbi_set_window_address(dbidev, 0, width - 1, 0, height - 1);
mipi_dbi_command_buf(dbi, MIPI_DCS_WRITE_MEMORY_START,
(u8 *)dbidev->tx_buf, len);
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 2a6e34663146..bc6e208949e8 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -45,6 +45,7 @@
#include <linux/export.h>
#include <linux/interval_tree_generic.h>
#include <linux/seq_file.h>
+#include <linux/sched/signal.h>
#include <linux/slab.h>
#include <linux/stacktrace.h>
@@ -366,6 +367,11 @@ next_hole(struct drm_mm *mm,
struct drm_mm_node *node,
enum drm_mm_insert_mode mode)
{
+ /* Searching is slow; check if we ran out of time/patience */
+ cond_resched();
+ if (fatal_signal_pending(current))
+ return NULL;
+
switch (mode) {
default:
case DRM_MM_INSERT_BEST:
@@ -399,10 +405,10 @@ next_hole(struct drm_mm *mm,
*/
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
{
- u64 end = node->start + node->size;
struct drm_mm_node *hole;
u64 hole_start, hole_end;
u64 adj_start, adj_end;
+ u64 end;
end = node->start + node->size;
if (unlikely(end <= node->start))
@@ -557,7 +563,7 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
return 0;
}
- return -ENOSPC;
+ return signal_pending(current) ? -ERESTARTSYS : -ENOSPC;
}
EXPORT_SYMBOL(drm_mm_insert_node_in_range);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 10336b144c72..d4d64518e11b 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1698,6 +1698,13 @@ static int drm_mode_parse_cmdline_options(const char *str,
if (rotation && freestanding)
return -EINVAL;
+ if (!(rotation & DRM_MODE_ROTATE_MASK))
+ rotation |= DRM_MODE_ROTATE_0;
+
+ /* Make sure there is exactly one rotation defined */
+ if (!is_power_of_2(rotation & DRM_MODE_ROTATE_MASK))
+ return -EINVAL;
+
mode->rotation_reflection = rotation;
return 0;
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index f2e43d341980..81aa21561982 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -51,8 +51,6 @@
drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
{
drm_dma_handle_t *dmah;
- unsigned long addr;
- size_t sz;
/* pci_alloc_consistent only guarantees alignment to the smallest
* PAGE_SIZE order which is greater than or equal to the requested size.
@@ -68,47 +66,17 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
dmah->size = size;
dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size,
&dmah->busaddr,
- GFP_KERNEL | __GFP_COMP);
+ GFP_KERNEL);
if (dmah->vaddr == NULL) {
kfree(dmah);
return NULL;
}
- /* XXX - Is virt_to_page() legal for consistent mem? */
- /* Reserve */
- for (addr = (unsigned long)dmah->vaddr, sz = size;
- sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
- SetPageReserved(virt_to_page((void *)addr));
- }
-
return dmah;
}
-
EXPORT_SYMBOL(drm_pci_alloc);
-/*
- * Free a PCI consistent memory block without freeing its descriptor.
- *
- * This function is for internal use in the Linux-specific DRM core code.
- */
-void __drm_legacy_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
-{
- unsigned long addr;
- size_t sz;
-
- if (dmah->vaddr) {
- /* XXX - Is virt_to_page() legal for consistent mem? */
- /* Unreserve */
- for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
- sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
- ClearPageReserved(virt_to_page((void *)addr));
- }
- dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
- dmah->busaddr);
- }
-}
-
/**
* drm_pci_free - Free a PCI consistent memory block
* @dev: DRM device
@@ -119,7 +87,8 @@ void __drm_legacy_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
*/
void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
{
- __drm_legacy_pci_free(dev, dmah);
+ dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
+ dmah->busaddr);
kfree(dmah);
}
@@ -197,6 +166,18 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
return drm_pci_irq_by_busid(dev, p);
}
+void drm_pci_agp_destroy(struct drm_device *dev)
+{
+ if (dev->agp) {
+ arch_phys_wc_del(dev->agp->agp_mtrr);
+ drm_legacy_agp_clear(dev);
+ kfree(dev->agp);
+ dev->agp = NULL;
+ }
+}
+
+#ifdef CONFIG_DRM_LEGACY
+
static void drm_pci_agp_init(struct drm_device *dev)
{
if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
@@ -211,33 +192,9 @@ static void drm_pci_agp_init(struct drm_device *dev)
}
}
-void drm_pci_agp_destroy(struct drm_device *dev)
-{
- if (dev->agp) {
- arch_phys_wc_del(dev->agp->agp_mtrr);
- drm_legacy_agp_clear(dev);
- kfree(dev->agp);
- dev->agp = NULL;
- }
-}
-
-/**
- * drm_get_pci_dev - Register a PCI device with the DRM subsystem
- * @pdev: PCI device
- * @ent: entry from the PCI ID table that matches @pdev
- * @driver: DRM device driver
- *
- * Attempt to gets inter module "drm" information. If we are first
- * then register the character device and inter module information.
- * Try and register, if we fail to register, backout previous work.
- *
- * NOTE: This function is deprecated, please use drm_dev_alloc() and
- * drm_dev_register() instead and remove your &drm_driver.load callback.
- *
- * Return: 0 on success or a negative error code on failure.
- */
-int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
- struct drm_driver *driver)
+static int drm_get_pci_dev(struct pci_dev *pdev,
+ const struct pci_device_id *ent,
+ struct drm_driver *driver)
{
struct drm_device *dev;
int ret;
@@ -280,9 +237,6 @@ err_free:
drm_dev_put(dev);
return ret;
}
-EXPORT_SYMBOL(drm_get_pci_dev);
-
-#ifdef CONFIG_DRM_LEGACY
/**
* drm_legacy_pci_init - shadow-attach a legacy DRM PCI driver
diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c
index d5c386154246..ca520028b2cb 100644
--- a/drivers/gpu/drm/drm_scatter.c
+++ b/drivers/gpu/drm/drm_scatter.c
@@ -99,6 +99,9 @@ int drm_legacy_sg_alloc(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_SG))
return -EOPNOTSUPP;
+ if (request->size > SIZE_MAX - PAGE_SIZE)
+ return -EINVAL;
+
if (dev->sg)
return -EINVAL;
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index 15fb516ae2d8..74946690aba4 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -26,12 +26,51 @@
* entity. Some flexibility for code reuse is provided through a separately
* allocated &drm_connector object and supporting optional &drm_bridge
* encoder drivers.
+ *
+ * Many drivers require only a very simple encoder that fulfills the minimum
+ * requirements of the display pipeline and does not add additional
+ * functionality. The function drm_simple_encoder_init() provides an
+ * implementation of such an encoder.
*/
-static const struct drm_encoder_funcs drm_simple_kms_encoder_funcs = {
+static const struct drm_encoder_funcs drm_simple_encoder_funcs_cleanup = {
.destroy = drm_encoder_cleanup,
};
+/**
+ * drm_simple_encoder_init - Initialize a preallocated encoder with
+ * basic functionality.
+ * @dev: drm device
+ * @encoder: the encoder to initialize
+ * @encoder_type: user visible type of the encoder
+ *
+ * Initialises a preallocated encoder that has no further functionality.
+ * Settings for possible CRTC and clones are left to their initial values.
+ * The encoder will be cleaned up automatically as part of the mode-setting
+ * cleanup.
+ *
+ * The caller of drm_simple_encoder_init() is responsible for freeing
+ * the encoder's memory after the encoder has been cleaned up. At the
+ * moment this only works reliably if the encoder data structure is
+ * stored in the device structure. Free the encoder's memory as part of
+ * the device release function.
+ *
+ * FIXME: Later improvements to DRM's resource management may allow for
+ * an automated kfree() of the encoder's memory.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_simple_encoder_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ int encoder_type)
+{
+ return drm_encoder_init(dev, encoder,
+ &drm_simple_encoder_funcs_cleanup,
+ encoder_type, NULL);
+}
+EXPORT_SYMBOL(drm_simple_encoder_init);
+
static enum drm_mode_status
drm_simple_kms_crtc_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
@@ -229,7 +268,7 @@ static const struct drm_plane_funcs drm_simple_kms_plane_funcs = {
int drm_simple_display_pipe_attach_bridge(struct drm_simple_display_pipe *pipe,
struct drm_bridge *bridge)
{
- return drm_bridge_attach(&pipe->encoder, bridge, NULL);
+ return drm_bridge_attach(&pipe->encoder, bridge, NULL, 0);
}
EXPORT_SYMBOL(drm_simple_display_pipe_attach_bridge);
@@ -288,8 +327,7 @@ int drm_simple_display_pipe_init(struct drm_device *dev,
return ret;
encoder->possible_crtcs = drm_crtc_mask(crtc);
- ret = drm_encoder_init(dev, encoder, &drm_simple_kms_encoder_funcs,
- DRM_MODE_ENCODER_NONE, NULL);
+ ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_NONE);
if (ret || !connector)
return ret;
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 669c93fe2500..42d46414f767 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -43,27 +43,66 @@
* - Signal a syncobj (set a trivially signaled fence)
* - Wait for a syncobj's fence to appear and be signaled
*
+ * The syncobj userspace API also provides operations to manipulate a syncobj
+ * in terms of a timeline of struct &dma_fence_chain rather than a single
+ * struct &dma_fence, through the following operations:
+ *
+ * - Signal a given point on the timeline
+ * - Wait for a given point to appear and/or be signaled
+ * - Import and export from/to a given point of a timeline
+ *
* At it's core, a syncobj is simply a wrapper around a pointer to a struct
* &dma_fence which may be NULL.
* When a syncobj is first created, its pointer is either NULL or a pointer
* to an already signaled fence depending on whether the
* &DRM_SYNCOBJ_CREATE_SIGNALED flag is passed to
* &DRM_IOCTL_SYNCOBJ_CREATE.
- * When GPU work which signals a syncobj is enqueued in a DRM driver,
- * the syncobj fence is replaced with a fence which will be signaled by the
- * completion of that work.
- * When GPU work which waits on a syncobj is enqueued in a DRM driver, the
- * driver retrieves syncobj's current fence at the time the work is enqueued
- * waits on that fence before submitting the work to hardware.
- * If the syncobj's fence is NULL, the enqueue operation is expected to fail.
- * All manipulation of the syncobjs's fence happens in terms of the current
- * fence at the time the ioctl is called by userspace regardless of whether
- * that operation is an immediate host-side operation (signal or reset) or
- * or an operation which is enqueued in some driver queue.
- * &DRM_IOCTL_SYNCOBJ_RESET and &DRM_IOCTL_SYNCOBJ_SIGNAL can be used to
- * manipulate a syncobj from the host by resetting its pointer to NULL or
+ *
+ * If the syncobj is considered as a binary (its state is either signaled or
+ * unsignaled) primitive, when GPU work is enqueued in a DRM driver to signal
+ * the syncobj, the syncobj's fence is replaced with a fence which will be
+ * signaled by the completion of that work.
+ * If the syncobj is considered as a timeline primitive, when GPU work is
+ * enqueued in a DRM driver to signal the a given point of the syncobj, a new
+ * struct &dma_fence_chain pointing to the DRM driver's fence and also
+ * pointing to the previous fence that was in the syncobj. The new struct
+ * &dma_fence_chain fence replace the syncobj's fence and will be signaled by
+ * completion of the DRM driver's work and also any work associated with the
+ * fence previously in the syncobj.
+ *
+ * When GPU work which waits on a syncobj is enqueued in a DRM driver, at the
+ * time the work is enqueued, it waits on the syncobj's fence before
+ * submitting the work to hardware. That fence is either :
+ *
+ * - The syncobj's current fence if the syncobj is considered as a binary
+ * primitive.
+ * - The struct &dma_fence associated with a given point if the syncobj is
+ * considered as a timeline primitive.
+ *
+ * If the syncobj's fence is NULL or not present in the syncobj's timeline,
+ * the enqueue operation is expected to fail.
+ *
+ * With binary syncobj, all manipulation of the syncobjs's fence happens in
+ * terms of the current fence at the time the ioctl is called by userspace
+ * regardless of whether that operation is an immediate host-side operation
+ * (signal or reset) or or an operation which is enqueued in some driver
+ * queue. &DRM_IOCTL_SYNCOBJ_RESET and &DRM_IOCTL_SYNCOBJ_SIGNAL can be used
+ * to manipulate a syncobj from the host by resetting its pointer to NULL or
* setting its pointer to a fence which is already signaled.
*
+ * With a timeline syncobj, all manipulation of the synobj's fence happens in
+ * terms of a u64 value referring to point in the timeline. See
+ * dma_fence_chain_find_seqno() to see how a given point is found in the
+ * timeline.
+ *
+ * Note that applications should be careful to always use timeline set of
+ * ioctl() when dealing with syncobj considered as timeline. Using a binary
+ * set of ioctl() with a syncobj considered as timeline could result incorrect
+ * synchronization. The use of binary syncobj is supported through the
+ * timeline set of ioctl() by using a point value of 0, this will reproduce
+ * the behavior of the binary set of ioctl() (for example replace the
+ * syncobj's fence when signaling).
+ *
*
* Host-side wait on syncobjs
* --------------------------
@@ -87,6 +126,16 @@
* synchronize between the two.
* This requirement is inherited from the Vulkan fence API.
*
+ * Similarly, &DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT takes an array of syncobj
+ * handles as well as an array of u64 points and does a host-side wait on all
+ * of syncobj fences at the given points simultaneously.
+ *
+ * &DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT also adds the ability to wait for a given
+ * fence to materialize on the timeline without waiting for the fence to be
+ * signaled by using the &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE flag. This
+ * requirement is inherited from the wait-before-signal behavior required by
+ * the Vulkan timeline semaphore API.
+ *
*
* Import/export of syncobjs
* -------------------------
@@ -120,6 +169,18 @@
* Because sync files are immutable, resetting or signaling the syncobj
* will not affect any sync files whose fences have been imported into the
* syncobj.
+ *
+ *
+ * Import/export of timeline points in timeline syncobjs
+ * -----------------------------------------------------
+ *
+ * &DRM_IOCTL_SYNCOBJ_TRANSFER provides a mechanism to transfer a struct
+ * &dma_fence_chain of a syncobj at a given u64 point to another u64 point
+ * into another syncobj.
+ *
+ * Note that if you want to transfer a struct &dma_fence_chain from a given
+ * point on a timeline syncobj from/into a binary syncobj, you can use the
+ * point 0 to mean take/replace the fence in the syncobj.
*/
#include <linux/anon_inodes.h>
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index dd2bc85f43cc..939f0032aab1 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -85,7 +85,6 @@ int drm_sysfs_init(void)
}
drm_class->devnode = drm_devnode;
- drm_setup_hdcp_srm(drm_class);
return 0;
}
@@ -98,7 +97,6 @@ void drm_sysfs_destroy(void)
{
if (IS_ERR_OR_NULL(drm_class))
return;
- drm_teardown_hdcp_srm(drm_class);
class_remove_file(drm_class, &class_attr_version.attr);
class_destroy(drm_class);
drm_class = NULL;
@@ -230,7 +228,7 @@ static ssize_t modes_show(struct device *device,
mutex_lock(&connector->dev->mode_config.mutex);
list_for_each_entry(mode, &connector->modes, head) {
- written += snprintf(buf + written, PAGE_SIZE - written, "%s\n",
+ written += scnprintf(buf + written, PAGE_SIZE - written, "%s\n",
mode->name);
}
mutex_unlock(&connector->dev->mode_config.mutex);
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index 1659b13b178c..da7b0b0c1090 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -30,6 +30,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_drv.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
@@ -69,6 +70,12 @@
* &drm_driver.max_vblank_count. In that case the vblank core only disables the
* vblanks after a timer has expired, which can be configured through the
* ``vblankoffdelay`` module parameter.
+ *
+ * Drivers for hardware without support for vertical-blanking interrupts
+ * must not call drm_vblank_init(). For such drivers, atomic helpers will
+ * automatically generate fake vblank events as part of the display update.
+ * This functionality also can be controlled by the driver by enabling and
+ * disabling struct drm_crtc_state.no_vblank.
*/
/* Retry timestamp calculation up to 3 times to satisfy
@@ -137,10 +144,9 @@ static u32 __get_vblank_counter(struct drm_device *dev, unsigned int pipe)
if (crtc->funcs->get_vblank_counter)
return crtc->funcs->get_vblank_counter(crtc);
- }
-
- if (dev->driver->get_vblank_counter)
+ } else if (dev->driver->get_vblank_counter) {
return dev->driver->get_vblank_counter(dev, pipe);
+ }
return drm_vblank_no_hw_counter(dev, pipe);
}
@@ -332,7 +338,8 @@ u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc)
u64 vblank;
unsigned long flags;
- WARN_ONCE(drm_debug_enabled(DRM_UT_VBL) && !dev->driver->get_vblank_timestamp,
+ WARN_ONCE(drm_debug_enabled(DRM_UT_VBL) &&
+ !crtc->funcs->get_vblank_timestamp,
"This function requires support for accurate vblank timestamps.");
spin_lock_irqsave(&dev->vblank_time_lock, flags);
@@ -354,13 +361,11 @@ static void __disable_vblank(struct drm_device *dev, unsigned int pipe)
if (WARN_ON(!crtc))
return;
- if (crtc->funcs->disable_vblank) {
+ if (crtc->funcs->disable_vblank)
crtc->funcs->disable_vblank(crtc);
- return;
- }
+ } else {
+ dev->driver->disable_vblank(dev, pipe);
}
-
- dev->driver->disable_vblank(dev, pipe);
}
/*
@@ -480,19 +485,6 @@ int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs)
DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n");
- /* Driver specific high-precision vblank timestamping supported? */
- if (dev->driver->get_vblank_timestamp)
- DRM_INFO("Driver supports precise vblank timestamp query.\n");
- else
- DRM_INFO("No driver support for vblank timestamp query.\n");
-
- /* Must have precise timestamping for reliable vblank instant disable */
- if (dev->vblank_disable_immediate && !dev->driver->get_vblank_timestamp) {
- dev->vblank_disable_immediate = false;
- DRM_INFO("Setting vblank_disable_immediate to false because "
- "get_vblank_timestamp == NULL\n");
- }
-
return 0;
err:
@@ -502,6 +494,28 @@ err:
EXPORT_SYMBOL(drm_vblank_init);
/**
+ * drm_dev_has_vblank - test if vblanking has been initialized for
+ * a device
+ * @dev: the device
+ *
+ * Drivers may call this function to test if vblank support is
+ * initialized for a device. For most hardware this means that vblanking
+ * can also be enabled.
+ *
+ * Atomic helpers use this function to initialize
+ * &drm_crtc_state.no_vblank. See also drm_atomic_helper_check_modeset().
+ *
+ * Returns:
+ * True if vblanking has been initialized for the given device, false
+ * otherwise.
+ */
+bool drm_dev_has_vblank(const struct drm_device *dev)
+{
+ return dev->num_crtcs != 0;
+}
+EXPORT_SYMBOL(drm_dev_has_vblank);
+
+/**
* drm_crtc_vblank_waitqueue - get vblank waitqueue for the CRTC
* @crtc: which CRTC's vblank waitqueue to retrieve
*
@@ -523,9 +537,9 @@ EXPORT_SYMBOL(drm_crtc_vblank_waitqueue);
*
* Calculate and store various constants which are later needed by vblank and
* swap-completion timestamping, e.g, by
- * drm_calc_vbltimestamp_from_scanoutpos(). They are derived from CRTC's true
- * scanout timing, so they take things like panel scaling or other adjustments
- * into account.
+ * drm_crtc_vblank_helper_get_vblank_timestamp(). They are derived from
+ * CRTC's true scanout timing, so they take things like panel scaling or
+ * other adjustments into account.
*/
void drm_calc_timestamping_constants(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
@@ -576,9 +590,9 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc,
EXPORT_SYMBOL(drm_calc_timestamping_constants);
/**
- * drm_calc_vbltimestamp_from_scanoutpos - precise vblank timestamp helper
- * @dev: DRM device
- * @pipe: index of CRTC whose vblank timestamp to retrieve
+ * drm_crtc_vblank_helper_get_vblank_timestamp_internal - precise vblank
+ * timestamp helper
+ * @crtc: CRTC whose vblank timestamp to retrieve
* @max_error: Desired maximum allowable error in timestamps (nanosecs)
* On return contains true maximum error of timestamp
* @vblank_time: Pointer to time which should receive the timestamp
@@ -586,11 +600,12 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants);
* True when called from drm_crtc_handle_vblank(). Some drivers
* need to apply some workarounds for gpu-specific vblank irq quirks
* if flag is set.
+ * @get_scanout_position:
+ * Callback function to retrieve the scanout position. See
+ * @struct drm_crtc_helper_funcs.get_scanout_position.
*
* Implements calculation of exact vblank timestamps from given drm_display_mode
- * timings and current video scanout position of a CRTC. This can be directly
- * used as the &drm_driver.get_vblank_timestamp implementation of a kms driver
- * if &drm_driver.get_scanout_position is implemented.
+ * timings and current video scanout position of a CRTC.
*
* The current implementation only handles standard video modes. For double scan
* and interlaced modes the driver is supposed to adjust the hardware mode
@@ -606,34 +621,30 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants);
* Returns true on success, and false on failure, i.e. when no accurate
* timestamp could be acquired.
*/
-bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
- unsigned int pipe,
- int *max_error,
- ktime_t *vblank_time,
- bool in_vblank_irq)
+bool
+drm_crtc_vblank_helper_get_vblank_timestamp_internal(
+ struct drm_crtc *crtc, int *max_error, ktime_t *vblank_time,
+ bool in_vblank_irq,
+ drm_vblank_get_scanout_position_func get_scanout_position)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
struct timespec64 ts_etime, ts_vblank_time;
ktime_t stime, etime;
bool vbl_status;
- struct drm_crtc *crtc;
const struct drm_display_mode *mode;
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
int vpos, hpos, i;
int delta_ns, duration_ns;
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return false;
-
- crtc = drm_crtc_from_index(dev, pipe);
-
- if (pipe >= dev->num_crtcs || !crtc) {
+ if (pipe >= dev->num_crtcs) {
DRM_ERROR("Invalid crtc %u\n", pipe);
return false;
}
/* Scanout position query not supported? Should not happen. */
- if (!dev->driver->get_scanout_position) {
- DRM_ERROR("Called from driver w/o get_scanout_position()!?\n");
+ if (!get_scanout_position) {
+ DRM_ERROR("Called from CRTC w/o get_scanout_position()!?\n");
return false;
}
@@ -648,7 +659,6 @@ bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
if (mode->crtc_clock == 0) {
DRM_DEBUG("crtc %u: Noop due to uninitialized mode.\n", pipe);
WARN_ON_ONCE(drm_drv_uses_atomic_modeset(dev));
-
return false;
}
@@ -664,11 +674,10 @@ bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
* Get vertical and horizontal scanout position vpos, hpos,
* and bounding timestamps stime, etime, pre/post query.
*/
- vbl_status = dev->driver->get_scanout_position(dev, pipe,
- in_vblank_irq,
- &vpos, &hpos,
- &stime, &etime,
- mode);
+ vbl_status = get_scanout_position(crtc, in_vblank_irq,
+ &vpos, &hpos,
+ &stime, &etime,
+ mode);
/* Return as no-op if scanout query unsupported or failed. */
if (!vbl_status) {
@@ -720,7 +729,49 @@ bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
return true;
}
-EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos);
+EXPORT_SYMBOL(drm_crtc_vblank_helper_get_vblank_timestamp_internal);
+
+/**
+ * drm_crtc_vblank_helper_get_vblank_timestamp - precise vblank timestamp
+ * helper
+ * @crtc: CRTC whose vblank timestamp to retrieve
+ * @max_error: Desired maximum allowable error in timestamps (nanosecs)
+ * On return contains true maximum error of timestamp
+ * @vblank_time: Pointer to time which should receive the timestamp
+ * @in_vblank_irq:
+ * True when called from drm_crtc_handle_vblank(). Some drivers
+ * need to apply some workarounds for gpu-specific vblank irq quirks
+ * if flag is set.
+ *
+ * Implements calculation of exact vblank timestamps from given drm_display_mode
+ * timings and current video scanout position of a CRTC. This can be directly
+ * used as the &drm_crtc_funcs.get_vblank_timestamp implementation of a kms
+ * driver if &drm_crtc_helper_funcs.get_scanout_position is implemented.
+ *
+ * The current implementation only handles standard video modes. For double scan
+ * and interlaced modes the driver is supposed to adjust the hardware mode
+ * (taken from &drm_crtc_state.adjusted mode for atomic modeset drivers) to
+ * match the scanout position reported.
+ *
+ * Note that atomic drivers must call drm_calc_timestamping_constants() before
+ * enabling a CRTC. The atomic helpers already take care of that in
+ * drm_atomic_helper_update_legacy_modeset_state().
+ *
+ * Returns:
+ *
+ * Returns true on success, and false on failure, i.e. when no accurate
+ * timestamp could be acquired.
+ */
+bool drm_crtc_vblank_helper_get_vblank_timestamp(struct drm_crtc *crtc,
+ int *max_error,
+ ktime_t *vblank_time,
+ bool in_vblank_irq)
+{
+ return drm_crtc_vblank_helper_get_vblank_timestamp_internal(
+ crtc, max_error, vblank_time, in_vblank_irq,
+ crtc->helper_private->get_scanout_position);
+}
+EXPORT_SYMBOL(drm_crtc_vblank_helper_get_vblank_timestamp);
/**
* drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
@@ -747,15 +798,19 @@ static bool
drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
ktime_t *tvblank, bool in_vblank_irq)
{
+ struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
bool ret = false;
/* Define requested maximum error on timestamps (nanoseconds). */
int max_error = (int) drm_timestamp_precision * 1000;
/* Query driver if possible and precision timestamping enabled. */
- if (dev->driver->get_vblank_timestamp && (max_error > 0))
- ret = dev->driver->get_vblank_timestamp(dev, pipe, &max_error,
+ if (crtc && crtc->funcs->get_vblank_timestamp && max_error > 0) {
+ struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
+
+ ret = crtc->funcs->get_vblank_timestamp(crtc, &max_error,
tvblank, in_vblank_irq);
+ }
/* GPU high precision timestamp query unsupported or failed.
* Return current monotonic/gettimeofday timestamp as best estimate.
@@ -977,9 +1032,11 @@ static int __enable_vblank(struct drm_device *dev, unsigned int pipe)
if (crtc->funcs->enable_vblank)
return crtc->funcs->enable_vblank(crtc);
+ } else if (dev->driver->enable_vblank) {
+ return dev->driver->enable_vblank(dev, pipe);
}
- return dev->driver->enable_vblank(dev, pipe);
+ return -EINVAL;
}
static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe)
@@ -1738,6 +1795,8 @@ done:
static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe)
{
+ struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
+ bool high_prec = false;
struct drm_pending_vblank_event *e, *t;
ktime_t now;
u64 seq;
@@ -1760,8 +1819,10 @@ static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe)
send_vblank_event(dev, e, seq, now);
}
- trace_drm_vblank_event(pipe, seq, now,
- dev->driver->get_vblank_timestamp != NULL);
+ if (crtc && crtc->funcs->get_vblank_timestamp)
+ high_prec = true;
+
+ trace_drm_vblank_event(pipe, seq, now, high_prec);
}
/**
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 52e87e4869a5..aa88911bbc06 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -102,7 +102,7 @@ static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
return tmp;
}
-/**
+/*
* \c fault method for AGP virtual memory.
*
* \param vma virtual memory area.
@@ -192,7 +192,7 @@ static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
}
#endif
-/**
+/*
* \c nopage method for shared virtual memory.
*
* \param vma virtual memory area.
@@ -225,7 +225,7 @@ static vm_fault_t drm_vm_shm_fault(struct vm_fault *vmf)
return 0;
}
-/**
+/*
* \c close method for shared virtual memory.
*
* \param vma virtual memory area.
@@ -269,8 +269,6 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
}
if (!found_maps) {
- drm_dma_handle_t dmah;
-
switch (map->type) {
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
@@ -284,10 +282,10 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
case _DRM_SCATTER_GATHER:
break;
case _DRM_CONSISTENT:
- dmah.vaddr = map->handle;
- dmah.busaddr = map->offset;
- dmah.size = map->size;
- __drm_legacy_pci_free(dev, &dmah);
+ dma_free_coherent(&dev->pdev->dev,
+ map->size,
+ map->handle,
+ map->offset);
break;
}
kfree(map);
@@ -296,7 +294,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
mutex_unlock(&dev->struct_mutex);
}
-/**
+/*
* \c fault method for DMA virtual memory.
*
* \param address access address.
@@ -331,7 +329,7 @@ static vm_fault_t drm_vm_dma_fault(struct vm_fault *vmf)
return 0;
}
-/**
+/*
* \c fault method for scatter-gather virtual memory.
*
* \param address access address.
@@ -437,7 +435,7 @@ static void drm_vm_close_locked(struct drm_device *dev,
}
}
-/**
+/*
* \c close method for all virtual memory types.
*
* \param vma virtual memory area.
@@ -455,7 +453,7 @@ static void drm_vm_close(struct vm_area_struct *vma)
mutex_unlock(&dev->struct_mutex);
}
-/**
+/*
* mmap DMA memory.
*
* \param file_priv DRM file private.
@@ -515,7 +513,7 @@ static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
#endif
}
-/**
+/*
* mmap DMA memory.
*
* \param file_priv DRM file private.
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 76ecdf8bd31c..6b43c1c94e8f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -283,11 +283,6 @@ static int etnaviv_ioctl_gem_new(struct drm_device *dev, void *data,
args->flags, &args->handle);
}
-#define TS(t) ((struct timespec){ \
- .tv_sec = (t).tv_sec, \
- .tv_nsec = (t).tv_nsec \
-})
-
static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
struct drm_file *file)
{
@@ -302,7 +297,7 @@ static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
if (!obj)
return -ENOENT;
- ret = etnaviv_gem_cpu_prep(obj, args->op, &TS(args->timeout));
+ ret = etnaviv_gem_cpu_prep(obj, args->op, &args->timeout);
drm_gem_object_put_unlocked(obj);
@@ -355,7 +350,7 @@ static int etnaviv_ioctl_wait_fence(struct drm_device *dev, void *data,
{
struct drm_etnaviv_wait_fence *args = data;
struct etnaviv_drm_private *priv = dev->dev_private;
- struct timespec *timeout = &TS(args->timeout);
+ struct drm_etnaviv_timespec *timeout = &args->timeout;
struct etnaviv_gpu *gpu;
if (args->flags & ~(ETNA_WAIT_NONBLOCK))
@@ -404,7 +399,7 @@ static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data,
{
struct etnaviv_drm_private *priv = dev->dev_private;
struct drm_etnaviv_gem_wait *args = data;
- struct timespec *timeout = &TS(args->timeout);
+ struct drm_etnaviv_timespec *timeout = &args->timeout;
struct drm_gem_object *obj;
struct etnaviv_gpu *gpu;
int ret;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index 32cfa5a48d42..efc656efeb0f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -61,7 +61,7 @@ int etnaviv_gem_prime_pin(struct drm_gem_object *obj);
void etnaviv_gem_prime_unpin(struct drm_gem_object *obj);
void *etnaviv_gem_vmap(struct drm_gem_object *obj);
int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
- struct timespec *timeout);
+ struct drm_etnaviv_timespec *timeout);
int etnaviv_gem_cpu_fini(struct drm_gem_object *obj);
void etnaviv_gem_free_object(struct drm_gem_object *obj);
int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
@@ -107,11 +107,12 @@ static inline size_t size_vstruct(size_t nelem, size_t elem_size, size_t base)
* between the specified timeout and the current CLOCK_MONOTONIC time.
*/
static inline unsigned long etnaviv_timeout_to_jiffies(
- const struct timespec *timeout)
+ const struct drm_etnaviv_timespec *timeout)
{
- struct timespec64 ts, to;
-
- to = timespec_to_timespec64(*timeout);
+ struct timespec64 ts, to = {
+ .tv_sec = timeout->tv_sec,
+ .tv_nsec = timeout->tv_nsec,
+ };
ktime_get_ts64(&ts);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index cb1faaac380a..6adea180d629 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -373,7 +373,7 @@ static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
}
int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
- struct timespec *timeout)
+ struct drm_etnaviv_timespec *timeout)
{
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
struct drm_device *dev = obj->dev;
@@ -431,7 +431,7 @@ int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
}
int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
- struct timespec *timeout)
+ struct drm_etnaviv_timespec *timeout)
{
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index d6270acce619..6b68fe16041b 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -112,7 +112,7 @@ struct etnaviv_gem_submit {
void etnaviv_submit_put(struct etnaviv_gem_submit * submit);
int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
- struct timespec *timeout);
+ struct drm_etnaviv_timespec *timeout);
int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
const struct etnaviv_gem_ops *ops, struct etnaviv_gem_object **res);
void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index d47d1a8e0219..799ec20b267d 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -1132,7 +1132,7 @@ static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
* Cmdstream submission/retirement:
*/
int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
- u32 id, struct timespec *timeout)
+ u32 id, struct drm_etnaviv_timespec *timeout)
{
struct dma_fence *fence;
int ret;
@@ -1179,7 +1179,8 @@ int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
* that lock in this function while waiting.
*/
int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
- struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout)
+ struct etnaviv_gem_object *etnaviv_obj,
+ struct drm_etnaviv_timespec *timeout)
{
unsigned long remaining;
long ret;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 8f9bd4edc96a..97bb48042b4d 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -169,9 +169,10 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m);
void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu);
void etnaviv_gpu_retire(struct etnaviv_gpu *gpu);
int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
- u32 fence, struct timespec *timeout);
+ u32 fence, struct drm_etnaviv_timespec *timeout);
int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
- struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout);
+ struct etnaviv_gem_object *etnaviv_obj,
+ struct drm_etnaviv_timespec *timeout);
struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit);
int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu);
void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu);
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index ff59c641fa80..e7b58097ccdc 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -139,7 +139,7 @@ static void decon_ctx_remove(struct decon_context *ctx)
static u32 decon_calc_clkdiv(struct decon_context *ctx,
const struct drm_display_mode *mode)
{
- unsigned long ideal_clk = mode->htotal * mode->vtotal * mode->vrefresh;
+ unsigned long ideal_clk = mode->clock;
u32 clkdiv;
/* Find the clock divider value that gets us closest to ideal_clk */
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index 4785885c0f4f..d23d3502ca91 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -106,7 +106,8 @@ static int exynos_dp_bridge_attach(struct analogix_dp_plat_data *plat_data,
/* Pre-empt DP connector creation if there's a bridge */
if (dp->ptn_bridge) {
- ret = drm_bridge_attach(&dp->encoder, dp->ptn_bridge, bridge);
+ ret = drm_bridge_attach(&dp->encoder, dp->ptn_bridge, bridge,
+ 0);
if (ret) {
DRM_DEV_ERROR(dp->dev,
"Failed to attach bridge to drm\n");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index ba0f868b2477..57defeb44522 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -270,7 +270,7 @@ static int exynos_drm_bind(struct device *dev)
struct drm_encoder *encoder;
struct drm_device *drm;
unsigned int clone_mask;
- int cnt, ret;
+ int ret;
drm = drm_dev_alloc(&exynos_drm_driver, dev);
if (IS_ERR(drm))
@@ -293,10 +293,9 @@ static int exynos_drm_bind(struct device *dev)
exynos_drm_mode_config_init(drm);
/* setup possible_clones. */
- cnt = 0;
clone_mask = 0;
list_for_each_entry(encoder, &drm->mode_config.encoder_list, head)
- clone_mask |= (1 << (cnt++));
+ clone_mask |= drm_encoder_mask(encoder);
list_for_each_entry(encoder, &drm->mode_config.encoder_list, head)
encoder->possible_clones = clone_mask;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 33628d85edad..e080aa92338c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1514,7 +1514,6 @@ static int exynos_dsi_create_connector(struct drm_encoder *encoder)
return 0;
connector->funcs->reset(connector);
- drm_fb_helper_add_one_connector(drm->fb_helper, connector);
drm_connector_register(connector);
return 0;
}
@@ -1540,7 +1539,7 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
out_bridge = of_drm_find_bridge(device->dev.of_node);
if (out_bridge) {
- drm_bridge_attach(encoder, out_bridge, NULL);
+ drm_bridge_attach(encoder, out_bridge, NULL, 0);
dsi->out_bridge = out_bridge;
list_splice_init(&encoder->bridge_chain, &dsi->bridge_chain);
} else {
@@ -1717,7 +1716,7 @@ static int exynos_dsi_bind(struct device *dev, struct device *master,
if (dsi->in_bridge_node) {
in_bridge = of_drm_find_bridge(dsi->in_bridge_node);
if (in_bridge)
- drm_bridge_attach(encoder, in_bridge, NULL);
+ drm_bridge_attach(encoder, in_bridge, NULL, 0);
}
return mipi_dsi_host_register(&dsi->dsi_host);
@@ -1773,8 +1772,9 @@ static int exynos_dsi_probe(struct platform_device *pdev)
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(dsi->supplies),
dsi->supplies);
if (ret) {
- dev_info(dev, "failed to get regulators: %d\n", ret);
- return -EPROBE_DEFER;
+ if (ret != -EPROBE_DEFER)
+ dev_info(dev, "failed to get regulators: %d\n", ret);
+ return ret;
}
dsi->clks = devm_kcalloc(dev,
@@ -1787,9 +1787,10 @@ static int exynos_dsi_probe(struct platform_device *pdev)
dsi->clks[i] = devm_clk_get(dev, clk_names[i]);
if (IS_ERR(dsi->clks[i])) {
if (strcmp(clk_names[i], "sclk_mipi") == 0) {
- strcpy(clk_names[i], OLD_SCLK_MIPI_CLK_NAME);
- i--;
- continue;
+ dsi->clks[i] = devm_clk_get(dev,
+ OLD_SCLK_MIPI_CLK_NAME);
+ if (!IS_ERR(dsi->clks[i]))
+ continue;
}
dev_info(dev, "failed to get the clock: %s\n",
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 647a1fd1d815..e6ceaf36fb04 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -200,21 +200,13 @@ int exynos_drm_fbdev_init(struct drm_device *dev)
drm_fb_helper_prepare(dev, helper, &exynos_drm_fb_helper_funcs);
- ret = drm_fb_helper_init(dev, helper, MAX_CONNECTOR);
+ ret = drm_fb_helper_init(dev, helper);
if (ret < 0) {
DRM_DEV_ERROR(dev->dev,
"failed to initialize drm fb helper.\n");
goto err_init;
}
- ret = drm_fb_helper_single_add_all_connectors(helper);
- if (ret < 0) {
- DRM_DEV_ERROR(dev->dev,
- "failed to register drm_fb_helper_connector.\n");
- goto err_setup;
-
- }
-
ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
if (ret < 0) {
DRM_DEV_ERROR(dev->dev,
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 9ff921f43a93..1a7c828fc41d 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -960,7 +960,7 @@ static int hdmi_create_connector(struct drm_encoder *encoder)
drm_connector_attach_encoder(connector, encoder);
if (hdata->bridge) {
- ret = drm_bridge_attach(encoder, hdata->bridge, NULL);
+ ret = drm_bridge_attach(encoder, hdata->bridge, NULL, 0);
if (ret)
DRM_DEV_ERROR(hdata->dev, "Failed to attach bridge\n");
}
@@ -1805,18 +1805,10 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
hdata->reg_hdmi_en = devm_regulator_get_optional(dev, "hdmi-en");
- if (PTR_ERR(hdata->reg_hdmi_en) != -ENODEV) {
+ if (PTR_ERR(hdata->reg_hdmi_en) != -ENODEV)
if (IS_ERR(hdata->reg_hdmi_en))
return PTR_ERR(hdata->reg_hdmi_en);
- ret = regulator_enable(hdata->reg_hdmi_en);
- if (ret) {
- DRM_DEV_ERROR(dev,
- "failed to enable hdmi-en regulator\n");
- return ret;
- }
- }
-
return hdmi_bridge_init(hdata);
}
@@ -2023,6 +2015,15 @@ static int hdmi_probe(struct platform_device *pdev)
}
}
+ if (!IS_ERR(hdata->reg_hdmi_en)) {
+ ret = regulator_enable(hdata->reg_hdmi_en);
+ if (ret) {
+ DRM_DEV_ERROR(dev,
+ "failed to enable hdmi-en regulator\n");
+ goto err_hdmiphy;
+ }
+ }
+
pm_runtime_enable(dev);
audio_infoframe = &hdata->audio.infoframe;
@@ -2047,7 +2048,8 @@ err_unregister_audio:
err_rpm_disable:
pm_runtime_disable(dev);
-
+ if (!IS_ERR(hdata->reg_hdmi_en))
+ regulator_disable(hdata->reg_hdmi_en);
err_hdmiphy:
if (hdata->hdmiphy_port)
put_device(&hdata->hdmiphy_port->dev);
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index 9598ee3cc4d2..cff344367f81 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -151,5 +151,5 @@ int fsl_dcu_create_outputs(struct fsl_dcu_drm_device *fsl_dev)
return fsl_dcu_attach_panel(fsl_dev, panel);
}
- return drm_bridge_attach(&fsl_dev->encoder, bridge, NULL);
+ return drm_bridge_attach(&fsl_dev->encoder, bridge, NULL, 0);
}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index 1ed854f498b7..686385a66167 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -977,6 +977,9 @@ const struct drm_crtc_funcs cdv_intel_crtc_funcs = {
.set_config = gma_crtc_set_config,
.destroy = gma_crtc_destroy,
.page_flip = gma_crtc_page_flip,
+ .enable_vblank = psb_enable_vblank,
+ .disable_vblank = psb_disable_vblank,
+ .get_vblank_counter = psb_get_vblank_counter,
};
const struct gma_clock_funcs cdv_clock_funcs = {
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 1459076d1980..1d8f67e4795a 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -513,14 +513,10 @@ int psb_fbdev_init(struct drm_device *dev)
drm_fb_helper_prepare(dev, fb_helper, &psb_fb_helper_funcs);
- ret = drm_fb_helper_init(dev, fb_helper, INTELFB_CONN_LIMIT);
+ ret = drm_fb_helper_init(dev, fb_helper);
if (ret)
goto free;
- ret = drm_fb_helper_single_add_all_connectors(fb_helper);
- if (ret)
- goto fini;
-
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(dev);
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index afaf4bea21cf..9278bcfad1bf 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -503,7 +503,7 @@ int psb_gtt_init(struct drm_device *dev, int resume)
* Map the GTT and the stolen memory area
*/
if (!resume)
- dev_priv->gtt_map = ioremap_nocache(pg->gtt_phys_start,
+ dev_priv->gtt_map = ioremap(pg->gtt_phys_start,
gtt_pages << PAGE_SHIFT);
if (!dev_priv->gtt_map) {
dev_err(dev->dev, "Failure to map gtt.\n");
diff --git a/drivers/gpu/drm/gma500/intel_bios.h b/drivers/gpu/drm/gma500/intel_bios.h
index a1f9ce9465a5..0e6facf21e33 100644
--- a/drivers/gpu/drm/gma500/intel_bios.h
+++ b/drivers/gpu/drm/gma500/intel_bios.h
@@ -227,7 +227,7 @@ struct bdb_general_definitions {
* number = (block_size - sizeof(bdb_general_definitions))/
* sizeof(child_device_config);
*/
- struct child_device_config devices[0];
+ struct child_device_config devices[];
};
struct bdb_lvds_options {
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 52591416f8fe..2411eb9827b8 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -256,7 +256,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
PSB_AUX_RESOURCE);
resource_len = pci_resource_len(dev_priv->aux_pdev,
PSB_AUX_RESOURCE);
- dev_priv->aux_reg = ioremap_nocache(resource_start,
+ dev_priv->aux_reg = ioremap(resource_start,
resource_len);
if (!dev_priv->aux_reg)
goto out_err;
@@ -363,7 +363,6 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
drm_irq_install(dev, dev->pdev->irq);
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
- dev->driver->get_vblank_counter = psb_get_vblank_counter;
psb_modeset_init(dev);
psb_fbdev_init(dev);
@@ -507,9 +506,6 @@ static struct drm_driver driver = {
.irq_postinstall = psb_irq_postinstall,
.irq_uninstall = psb_irq_uninstall,
.irq_handler = psb_irq_handler,
- .enable_vblank = psb_enable_vblank,
- .disable_vblank = psb_disable_vblank,
- .get_vblank_counter = psb_get_vblank_counter,
.gem_free_object = psb_gem_free_object,
.gem_vm_ops = &psb_gem_vm_ops,
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 3d4ef3071d45..956926341316 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -681,15 +681,15 @@ extern void psb_irq_turn_off_dpst(struct drm_device *dev);
extern void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands);
extern int psb_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
extern int psb_vblank_wait(struct drm_device *dev, unsigned int *sequence);
-extern int psb_enable_vblank(struct drm_device *dev, unsigned int pipe);
-extern void psb_disable_vblank(struct drm_device *dev, unsigned int pipe);
+extern int psb_enable_vblank(struct drm_crtc *crtc);
+extern void psb_disable_vblank(struct drm_crtc *crtc);
void
psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
void
psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
-extern u32 psb_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
+extern u32 psb_get_vblank_counter(struct drm_crtc *crtc);
/* framebuffer.c */
extern int psbfb_probed(struct drm_device *dev);
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index fed3b563e62e..531c5485be17 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -433,6 +433,9 @@ const struct drm_crtc_funcs psb_intel_crtc_funcs = {
.set_config = gma_crtc_set_config,
.destroy = gma_crtc_destroy,
.page_flip = gma_crtc_page_flip,
+ .enable_vblank = psb_enable_vblank,
+ .disable_vblank = psb_disable_vblank,
+ .get_vblank_counter = psb_get_vblank_counter,
};
const struct gma_clock_funcs psb_clock_funcs = {
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index 91f90016dba9..15eb3770d817 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -506,8 +506,10 @@ int psb_irq_disable_dpst(struct drm_device *dev)
/*
* It is used to enable VBLANK interrupt
*/
-int psb_enable_vblank(struct drm_device *dev, unsigned int pipe)
+int psb_enable_vblank(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct drm_psb_private *dev_priv = dev->dev_private;
unsigned long irqflags;
uint32_t reg_val = 0;
@@ -545,8 +547,10 @@ int psb_enable_vblank(struct drm_device *dev, unsigned int pipe)
/*
* It is used to disable VBLANK interrupt
*/
-void psb_disable_vblank(struct drm_device *dev, unsigned int pipe)
+void psb_disable_vblank(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct drm_psb_private *dev_priv = dev->dev_private;
unsigned long irqflags;
@@ -618,8 +622,10 @@ void mdfld_disable_te(struct drm_device *dev, int pipe)
/* Called from drm generic code, passed a 'crtc', which
* we use as a pipe index
*/
-u32 psb_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
+u32 psb_get_vblank_counter(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
uint32_t high_frame = PIPEAFRAMEHIGH;
uint32_t low_frame = PIPEAFRAMEPIXEL;
uint32_t pipeconf_reg = PIPEACONF;
diff --git a/drivers/gpu/drm/gma500/psb_irq.h b/drivers/gpu/drm/gma500/psb_irq.h
index 58fd502e3b9d..4f73998848d1 100644
--- a/drivers/gpu/drm/gma500/psb_irq.h
+++ b/drivers/gpu/drm/gma500/psb_irq.h
@@ -12,6 +12,7 @@
#ifndef _PSB_IRQ_H_
#define _PSB_IRQ_H_
+struct drm_crtc;
struct drm_device;
bool sysirq_init(struct drm_device *dev);
@@ -26,9 +27,9 @@ int psb_irq_enable_dpst(struct drm_device *dev);
int psb_irq_disable_dpst(struct drm_device *dev);
void psb_irq_turn_on_dpst(struct drm_device *dev);
void psb_irq_turn_off_dpst(struct drm_device *dev);
-int psb_enable_vblank(struct drm_device *dev, unsigned int pipe);
-void psb_disable_vblank(struct drm_device *dev, unsigned int pipe);
-u32 psb_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
+int psb_enable_vblank(struct drm_crtc *crtc);
+void psb_disable_vblank(struct drm_crtc *crtc);
+u32 psb_get_vblank_counter(struct drm_crtc *crtc);
int mdfld_enable_te(struct drm_device *dev, int pipe);
void mdfld_disable_te(struct drm_device *dev, int pipe);
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
index 7fa7d4933f60..55b46a7150a5 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
@@ -40,6 +40,7 @@ struct hibmc_dislay_pll_config {
};
static const struct hibmc_dislay_pll_config hibmc_pll_table[] = {
+ {640, 480, CRT_PLL1_HS_25MHZ, CRT_PLL2_HS_25MHZ},
{800, 600, CRT_PLL1_HS_40MHZ, CRT_PLL2_HS_40MHZ},
{1024, 768, CRT_PLL1_HS_65MHZ, CRT_PLL2_HS_65MHZ},
{1152, 864, CRT_PLL1_HS_80MHZ_1152, CRT_PLL2_HS_80MHZ},
@@ -47,6 +48,8 @@ static const struct hibmc_dislay_pll_config hibmc_pll_table[] = {
{1280, 720, CRT_PLL1_HS_74MHZ, CRT_PLL2_HS_74MHZ},
{1280, 960, CRT_PLL1_HS_108MHZ, CRT_PLL2_HS_108MHZ},
{1280, 1024, CRT_PLL1_HS_108MHZ, CRT_PLL2_HS_108MHZ},
+ {1440, 900, CRT_PLL1_HS_106MHZ, CRT_PLL2_HS_106MHZ},
+ {1600, 900, CRT_PLL1_HS_108MHZ, CRT_PLL2_HS_108MHZ},
{1600, 1200, CRT_PLL1_HS_162MHZ, CRT_PLL2_HS_162MHZ},
{1920, 1080, CRT_PLL1_HS_148MHZ, CRT_PLL2_HS_148MHZ},
{1920, 1200, CRT_PLL1_HS_193MHZ, CRT_PLL2_HS_193MHZ},
@@ -80,6 +83,9 @@ static int hibmc_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
}
+ if (!crtc_state->enable)
+ return 0;
+
if (state->crtc_x + state->crtc_w >
crtc_state->adjusted_mode.hdisplay ||
state->crtc_y + state->crtc_h >
@@ -184,6 +190,20 @@ static struct drm_plane *hibmc_plane_init(struct hibmc_drm_private *priv)
return plane;
}
+static void hibmc_crtc_dpms(struct drm_crtc *crtc, int dpms)
+{
+ struct hibmc_drm_private *priv = crtc->dev->dev_private;
+ unsigned int reg;
+
+ reg = readl(priv->mmio + HIBMC_CRT_DISP_CTL);
+ reg &= ~HIBMC_CRT_DISP_CTL_DPMS_MASK;
+ reg |= HIBMC_FIELD(HIBMC_CRT_DISP_CTL_DPMS, dpms);
+ reg &= ~HIBMC_CRT_DISP_CTL_TIMING_MASK;
+ if (dpms == HIBMC_CRT_DPMS_ON)
+ reg |= HIBMC_CRT_DISP_CTL_TIMING(1);
+ writel(reg, priv->mmio + HIBMC_CRT_DISP_CTL);
+}
+
static void hibmc_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
@@ -200,6 +220,7 @@ static void hibmc_crtc_atomic_enable(struct drm_crtc *crtc,
reg |= HIBMC_CURR_GATE_DISPLAY(1);
hibmc_set_current_gate(priv, reg);
drm_crtc_vblank_on(crtc);
+ hibmc_crtc_dpms(crtc, HIBMC_CRT_DPMS_ON);
}
static void hibmc_crtc_atomic_disable(struct drm_crtc *crtc,
@@ -208,6 +229,7 @@ static void hibmc_crtc_atomic_disable(struct drm_crtc *crtc,
unsigned int reg;
struct hibmc_drm_private *priv = crtc->dev->dev_private;
+ hibmc_crtc_dpms(crtc, HIBMC_CRT_DPMS_OFF);
drm_crtc_vblank_off(crtc);
hibmc_set_power_mode(priv, HIBMC_PW_MODE_CTL_MODE_SLEEP);
@@ -221,6 +243,25 @@ static void hibmc_crtc_atomic_disable(struct drm_crtc *crtc,
hibmc_set_current_gate(priv, reg);
}
+static enum drm_mode_status
+hibmc_crtc_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ int i = 0;
+ int vrefresh = drm_mode_vrefresh(mode);
+
+ if (vrefresh < 59 || vrefresh > 61)
+ return MODE_NOCLOCK;
+
+ for (i = 0; i < ARRAY_SIZE(hibmc_pll_table); i++) {
+ if (hibmc_pll_table[i].hdisplay == mode->hdisplay &&
+ hibmc_pll_table[i].vdisplay == mode->vdisplay)
+ return MODE_OK;
+ }
+
+ return MODE_BAD;
+}
+
static unsigned int format_pll_reg(void)
{
unsigned int pllreg = 0;
@@ -435,6 +476,42 @@ static void hibmc_crtc_disable_vblank(struct drm_crtc *crtc)
priv->mmio + HIBMC_RAW_INTERRUPT_EN);
}
+static void hibmc_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct hibmc_drm_private *priv = crtc->dev->dev_private;
+ void __iomem *mmio = priv->mmio;
+ u16 *r, *g, *b;
+ unsigned int reg;
+ int i;
+
+ r = crtc->gamma_store;
+ g = r + crtc->gamma_size;
+ b = g + crtc->gamma_size;
+
+ for (i = 0; i < crtc->gamma_size; i++) {
+ unsigned int offset = i << 2;
+ u8 red = *r++ >> 8;
+ u8 green = *g++ >> 8;
+ u8 blue = *b++ >> 8;
+ u32 rgb = (red << 16) | (green << 8) | blue;
+
+ writel(rgb, mmio + HIBMC_CRT_PALETTE + offset);
+ }
+
+ reg = readl(priv->mmio + HIBMC_CRT_DISP_CTL);
+ reg |= HIBMC_FIELD(HIBMC_CTL_DISP_CTL_GAMMA, 1);
+ writel(reg, priv->mmio + HIBMC_CRT_DISP_CTL);
+}
+
+static int hibmc_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t size,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ hibmc_crtc_load_lut(crtc);
+
+ return 0;
+}
+
static const struct drm_crtc_funcs hibmc_crtc_funcs = {
.page_flip = drm_atomic_helper_page_flip,
.set_config = drm_atomic_helper_set_config,
@@ -444,6 +521,7 @@ static const struct drm_crtc_funcs hibmc_crtc_funcs = {
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.enable_vblank = hibmc_crtc_enable_vblank,
.disable_vblank = hibmc_crtc_disable_vblank,
+ .gamma_set = hibmc_crtc_gamma_set,
};
static const struct drm_crtc_helper_funcs hibmc_crtc_helper_funcs = {
@@ -452,6 +530,7 @@ static const struct drm_crtc_helper_funcs hibmc_crtc_helper_funcs = {
.atomic_flush = hibmc_crtc_atomic_flush,
.atomic_enable = hibmc_crtc_atomic_enable,
.atomic_disable = hibmc_crtc_atomic_disable,
+ .mode_valid = hibmc_crtc_mode_valid,
};
int hibmc_de_init(struct hibmc_drm_private *priv)
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index 11d1b0761c9a..222356a4f9a8 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -91,11 +91,11 @@ static int hibmc_kms_init(struct hibmc_drm_private *priv)
priv->dev->mode_config.min_width = 0;
priv->dev->mode_config.min_height = 0;
priv->dev->mode_config.max_width = 1920;
- priv->dev->mode_config.max_height = 1440;
+ priv->dev->mode_config.max_height = 1200;
priv->dev->mode_config.fb_base = priv->fb_base;
priv->dev->mode_config.preferred_depth = 24;
- priv->dev->mode_config.prefer_shadow = 0;
+ priv->dev->mode_config.prefer_shadow = 1;
priv->dev->mode_config.funcs = (void *)&hibmc_mode_funcs;
@@ -213,7 +213,7 @@ static int hibmc_hw_map(struct hibmc_drm_private *priv)
ioaddr = pci_resource_start(pdev, 1);
iosize = pci_resource_len(pdev, 1);
- priv->mmio = devm_ioremap_nocache(dev->dev, ioaddr, iosize);
+ priv->mmio = devm_ioremap(dev->dev, ioaddr, iosize);
if (!priv->mmio) {
DRM_ERROR("Cannot map mmio region\n");
return -ENOMEM;
@@ -327,6 +327,11 @@ static int hibmc_pci_probe(struct pci_dev *pdev,
struct drm_device *dev;
int ret;
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev,
+ "hibmcdrmfb");
+ if (ret)
+ return ret;
+
dev = drm_dev_alloc(&hibmc_driver, &pdev->dev);
if (IS_ERR(dev)) {
DRM_ERROR("failed to allocate drm_device\n");
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h
index b63a1ee15ceb..17b30c393b10 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h
@@ -68,6 +68,12 @@
#define HIBMC_CRT_DISP_CTL 0x80200
+#define HIBMC_CRT_DISP_CTL_DPMS(x) ((x) << 30)
+#define HIBMC_CRT_DISP_CTL_DPMS_MASK 0xc0000000
+
+#define HIBMC_CRT_DPMS_ON 0
+#define HIBMC_CRT_DPMS_OFF 3
+
#define HIBMC_CRT_DISP_CTL_CRTSELECT(x) ((x) << 25)
#define HIBMC_CRT_DISP_CTL_CRTSELECT_MASK 0x2000000
@@ -85,6 +91,9 @@
#define HIBMC_CRT_DISP_CTL_TIMING(x) ((x) << 8)
#define HIBMC_CRT_DISP_CTL_TIMING_MASK 0x100
+#define HIBMC_CTL_DISP_CTL_GAMMA(x) ((x) << 3)
+#define HIBMC_CTL_DISP_CTL_GAMMA_MASK 0x08
+
#define HIBMC_CRT_DISP_CTL_PLANE(x) ((x) << 2)
#define HIBMC_CRT_DISP_CTL_PLANE_MASK 4
@@ -170,6 +179,7 @@
#define CRT_PLL1_HS_74MHZ 0x23941dc2
#define CRT_PLL1_HS_80MHZ 0x23941001
#define CRT_PLL1_HS_80MHZ_1152 0x23540fc2
+#define CRT_PLL1_HS_106MHZ 0x237C1641
#define CRT_PLL1_HS_108MHZ 0x23b41b01
#define CRT_PLL1_HS_162MHZ 0x23480681
#define CRT_PLL1_HS_148MHZ 0x23541dc2
@@ -182,10 +192,13 @@
#define CRT_PLL2_HS_78MHZ 0x50E147AE
#define CRT_PLL2_HS_74MHZ 0x602B6AE7
#define CRT_PLL2_HS_80MHZ 0x70000000
+#define CRT_PLL2_HS_106MHZ 0x0075c28f
#define CRT_PLL2_HS_108MHZ 0x80000000
#define CRT_PLL2_HS_162MHZ 0xA0000000
#define CRT_PLL2_HS_148MHZ 0xB0CCCCCD
#define CRT_PLL2_HS_193MHZ 0xC0872B02
+#define HIBMC_CRT_PALETTE 0x80C00
+
#define HIBMC_FIELD(field, value) (field(value) & field##_MASK)
#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
index 6d98fdc06f6c..678ac2ef2a93 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
@@ -11,8 +11,10 @@
* Jianhua Li <[email protected]>
*/
+#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_crtc_helper.h>
#include <drm/drm_print.h>
#include "hibmc_drm_drv.h"
@@ -20,7 +22,14 @@
static int hibmc_connector_get_modes(struct drm_connector *connector)
{
- return drm_add_modes_noedid(connector, 800, 600);
+ int count;
+
+ count = drm_add_modes_noedid(connector,
+ connector->dev->mode_config.max_width,
+ connector->dev->mode_config.max_height);
+ drm_set_preferred_mode(connector, 1024, 768);
+
+ return count;
}
static enum drm_mode_status hibmc_connector_mode_valid(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
index 50b988fdd5cc..99397ac3b363 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
@@ -54,6 +54,7 @@ int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
}
const struct drm_mode_config_funcs hibmc_mode_funcs = {
+ .mode_valid = drm_vram_helper_mode_valid,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
.fb_create = drm_gem_fb_create,
diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
index bdcf9c6ae9e9..f31068d74b18 100644
--- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
+++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
@@ -777,7 +777,7 @@ static int dsi_bridge_init(struct drm_device *dev, struct dw_dsi *dsi)
int ret;
/* associate the bridge to dsi encoder */
- ret = drm_bridge_attach(encoder, bridge, NULL);
+ ret = drm_bridge_attach(encoder, bridge, NULL, 0);
if (ret) {
DRM_ERROR("failed to attach external bridge\n");
return ret;
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h b/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h
index 0da860200410..e2ac09894a6d 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h
@@ -83,7 +83,6 @@
#define VSIZE_OFST 20
#define LDI_INT_EN 0x741C
#define FRAME_END_INT_EN_OFST 1
-#define UNDERFLOW_INT_EN_OFST 2
#define LDI_CTRL 0x7420
#define BPP_OFST 3
#define DATA_GATE_EN BIT(2)
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index 73cd28a6ea07..86000127d4ee 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -46,7 +46,6 @@ struct ade_hw_ctx {
struct clk *media_noc_clk;
struct clk *ade_pix_clk;
struct reset_control *reset;
- struct work_struct display_reset_wq;
bool power_on;
int irq;
@@ -136,7 +135,6 @@ static void ade_init(struct ade_hw_ctx *ctx)
*/
ade_update_bits(base + ADE_CTRL, FRM_END_START_OFST,
FRM_END_START_MASK, REG_EFFECTIVE_IN_ADEEN_FRMEND);
- ade_update_bits(base + LDI_INT_EN, UNDERFLOW_INT_EN_OFST, MASK(1), 1);
}
static bool ade_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -304,17 +302,6 @@ static void ade_crtc_disable_vblank(struct drm_crtc *crtc)
MASK(1), 0);
}
-static void drm_underflow_wq(struct work_struct *work)
-{
- struct ade_hw_ctx *ctx = container_of(work, struct ade_hw_ctx,
- display_reset_wq);
- struct drm_device *drm_dev = ctx->crtc->dev;
- struct drm_atomic_state *state;
-
- state = drm_atomic_helper_suspend(drm_dev);
- drm_atomic_helper_resume(drm_dev, state);
-}
-
static irqreturn_t ade_irq_handler(int irq, void *data)
{
struct ade_hw_ctx *ctx = data;
@@ -331,12 +318,6 @@ static irqreturn_t ade_irq_handler(int irq, void *data)
MASK(1), 1);
drm_crtc_handle_vblank(crtc);
}
- if (status & BIT(UNDERFLOW_INT_EN_OFST)) {
- ade_update_bits(base + LDI_INT_CLR, UNDERFLOW_INT_EN_OFST,
- MASK(1), 1);
- DRM_ERROR("LDI underflow!");
- schedule_work(&ctx->display_reset_wq);
- }
return IRQ_HANDLED;
}
@@ -919,7 +900,6 @@ static void *ade_hw_ctx_alloc(struct platform_device *pdev,
if (ret)
return ERR_PTR(-EIO);
- INIT_WORK(&ctx->display_reset_wq, drm_underflow_wq);
ctx->crtc = crtc;
return ctx;
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index a63790d32d75..c3332209f27a 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -1356,10 +1356,16 @@ static int tda998x_connector_init(struct tda998x_priv *priv,
/* DRM bridge functions */
-static int tda998x_bridge_attach(struct drm_bridge *bridge)
+static int tda998x_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
return tda998x_connector_init(priv, bridge->dev);
}
@@ -2022,7 +2028,7 @@ static int tda998x_encoder_init(struct device *dev, struct drm_device *drm)
if (ret)
goto err_encoder;
- ret = drm_bridge_attach(&priv->encoder, &priv->bridge, NULL);
+ ret = drm_bridge_attach(&priv->encoder, &priv->bridge, NULL, 0);
if (ret)
goto err_bridge;
diff --git a/drivers/gpu/drm/i915/Kconfig.profile b/drivers/gpu/drm/i915/Kconfig.profile
index c280b6ae38eb..0bfd276c19fe 100644
--- a/drivers/gpu/drm/i915/Kconfig.profile
+++ b/drivers/gpu/drm/i915/Kconfig.profile
@@ -20,6 +20,9 @@ config DRM_I915_HEARTBEAT_INTERVAL
check the health of the GPU and undertake regular house-keeping of
internal driver state.
+ This is adjustable via
+ /sys/class/drm/card?/engine/*/heartbeat_interval_ms
+
May be 0 to disable heartbeats and therefore disable automatic GPU
hang detection.
@@ -33,11 +36,18 @@ config DRM_I915_PREEMPT_TIMEOUT
expires, the HW will be reset to allow the more important context
to execute.
+ This is adjustable via
+ /sys/class/drm/card?/engine/*/preempt_timeout_ms
+
May be 0 to disable the timeout.
-config DRM_I915_SPIN_REQUEST
- int "Busywait for request completion (us)"
- default 5 # microseconds
+ The compiled in default may get overridden at driver probe time on
+ certain platforms and certain engines which will be reflected in the
+ sysfs control.
+
+config DRM_I915_MAX_REQUEST_BUSYWAIT
+ int "Busywait for request completion limit (ns)"
+ default 8000 # nanoseconds
help
Before sleeping waiting for a request (GPU operation) to complete,
we may spend some time polling for its completion. As the IRQ may
@@ -45,6 +55,9 @@ config DRM_I915_SPIN_REQUEST
check if the request will complete in the time it would have taken
us to enable the interrupt.
+ This is adjustable via
+ /sys/class/drm/card?/engine/*/max_busywait_duration_ns
+
May be 0 to disable the initial spin. In practice, we estimate
the cost of enabling the interrupt (if currently disabled) to be
a few microseconds.
@@ -60,6 +73,9 @@ config DRM_I915_STOP_TIMEOUT
that the reset itself may take longer and so be more disruptive to
interactive or low latency workloads.
+ This is adjustable via
+ /sys/class/drm/card?/engine/*/stop_timeout_ms
+
config DRM_I915_TIMESLICE_DURATION
int "Scheduling quantum for userspace batches (ms, jiffy granularity)"
default 1 # milliseconds
@@ -73,4 +89,7 @@ config DRM_I915_TIMESLICE_DURATION
is scheduled for execution for the timeslice duration, before
switching to the next context.
+ This is adjustable via
+ /sys/class/drm/card?/engine/*/timeslice_duration_ms
+
May be 0 to disable timeslicing.
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index b314d44ded5e..9f887a86e555 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -47,6 +47,7 @@ i915-y += i915_drv.o \
i915_sysfs.o \
i915_utils.o \
intel_device_info.o \
+ intel_dram.o \
intel_memory_region.o \
intel_pch.o \
intel_pm.o \
@@ -79,9 +80,11 @@ gt-y += \
gt/debugfs_gt.o \
gt/debugfs_gt_pm.o \
gt/gen6_ppgtt.o \
+ gt/gen7_renderclear.o \
gt/gen8_ppgtt.o \
gt/intel_breadcrumbs.o \
gt/intel_context.o \
+ gt/intel_context_param.o \
gt/intel_context_sseu.o \
gt/intel_engine_cs.o \
gt/intel_engine_heartbeat.o \
@@ -107,7 +110,8 @@ gt-y += \
gt/intel_rps.o \
gt/intel_sseu.o \
gt/intel_timeline.o \
- gt/intel_workarounds.o
+ gt/intel_workarounds.o \
+ gt/sysfs_engines.o
# autogenerated null render state
gt-y += \
gt/gen6_renderstate.o \
@@ -301,7 +305,7 @@ extra-$(CONFIG_DRM_I915_WERROR) += \
$(shell cd $(srctree)/$(src) && find * -name '*.h')))
quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@)
- cmd_hdrtest = $(CC) $(c_flags) -S -o /dev/null -x c /dev/null -include $<; touch $@
+ cmd_hdrtest = $(CC) $(filter-out $(CFLAGS_GCOV), $(c_flags)) -S -o /dev/null -x c /dev/null -include $<; touch $@
$(obj)/%.hdrtest: $(src)/%.h FORCE
$(call if_changed_dep,hdrtest)
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index d0f7419fb02b..17cee6f80d8b 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -599,13 +599,13 @@ static void gen11_dsi_gate_clocks(struct intel_encoder *encoder)
u32 tmp;
enum phy phy;
- mutex_lock(&dev_priv->dpll_lock);
+ mutex_lock(&dev_priv->dpll.lock);
tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
for_each_dsi_phy(phy, intel_dsi->phys)
tmp |= ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, tmp);
- mutex_unlock(&dev_priv->dpll_lock);
+ mutex_unlock(&dev_priv->dpll.lock);
}
static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder)
@@ -615,13 +615,13 @@ static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder)
u32 tmp;
enum phy phy;
- mutex_lock(&dev_priv->dpll_lock);
+ mutex_lock(&dev_priv->dpll.lock);
tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
for_each_dsi_phy(phy, intel_dsi->phys)
tmp &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, tmp);
- mutex_unlock(&dev_priv->dpll_lock);
+ mutex_unlock(&dev_priv->dpll.lock);
}
static void gen11_dsi_map_pll(struct intel_encoder *encoder,
@@ -633,7 +633,7 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
enum phy phy;
u32 val;
- mutex_lock(&dev_priv->dpll_lock);
+ mutex_lock(&dev_priv->dpll.lock);
val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
for_each_dsi_phy(phy, intel_dsi->phys) {
@@ -652,7 +652,7 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
intel_de_posting_read(dev_priv, ICL_DPCLKA_CFGCR0);
- mutex_unlock(&dev_priv->dpll_lock);
+ mutex_unlock(&dev_priv->dpll.lock);
}
static void
@@ -1350,15 +1350,15 @@ static void gen11_dsi_get_timings(struct intel_encoder *encoder,
static void gen11_dsi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
intel_dsc_get_config(encoder, pipe_config);
/* FIXME: adapt icl_ddi_clock_get() for DSI and use that? */
- pipe_config->port_clock =
- cnl_calc_wrpll_link(dev_priv, &pipe_config->dpll_hw_state);
+ pipe_config->port_clock = intel_dpll_get_freq(i915,
+ pipe_config->shared_dpll);
pipe_config->hw.adjusted_mode.crtc_clock = intel_dsi->pclk;
if (intel_dsi->dual_link)
@@ -1718,9 +1718,8 @@ static void icl_dsi_add_properties(struct intel_connector *connector)
connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT;
- connector->base.display_info.panel_orientation =
- intel_dsi_get_panel_orientation(connector);
- drm_connector_init_panel_orientation_property(&connector->base,
+ drm_connector_set_panel_orientation_with_quirk(&connector->base,
+ intel_dsi_get_panel_orientation(connector),
connector->panel.fixed_mode->hdisplay,
connector->panel.fixed_mode->vdisplay);
}
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index c86d7a35c816..457b258683d3 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -133,15 +133,37 @@ intel_plane_destroy_state(struct drm_plane *plane,
kfree(plane_state);
}
+unsigned int intel_plane_pixel_rate(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ unsigned int src_w, src_h, dst_w, dst_h;
+ unsigned int pixel_rate = crtc_state->pixel_rate;
+
+ src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
+ dst_w = drm_rect_width(&plane_state->uapi.dst);
+ dst_h = drm_rect_height(&plane_state->uapi.dst);
+
+ /* Downscaling limits the maximum pixel rate */
+ dst_w = min(src_w, dst_w);
+ dst_h = min(src_h, dst_h);
+
+ return DIV_ROUND_UP_ULL(mul_u32_u32(pixel_rate, src_w * src_h),
+ dst_w * dst_h);
+}
+
unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->hw.fb;
unsigned int cpp;
+ unsigned int pixel_rate;
if (!plane_state->uapi.visible)
return 0;
+ pixel_rate = intel_plane_pixel_rate(crtc_state, plane_state);
+
cpp = fb->format->cpp[0];
/*
@@ -153,7 +175,7 @@ unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
if (fb->format->is_yuv && fb->format->num_planes > 1)
cpp *= 4;
- return cpp * crtc_state->pixel_rate;
+ return pixel_rate * cpp;
}
int intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
index 2bcf15e34728..a6bbf42bae1f 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
@@ -18,6 +18,9 @@ struct intel_plane_state;
extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
+unsigned int intel_plane_pixel_rate(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+
unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index 30fb7c887ff0..62f234f641de 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -149,6 +149,10 @@ static const struct {
{ 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 },
{ 148352, AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
{ 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
+ { 296703, AUD_CONFIG_PIXEL_CLOCK_HDMI_296703 },
+ { 297000, AUD_CONFIG_PIXEL_CLOCK_HDMI_297000 },
+ { 593407, AUD_CONFIG_PIXEL_CLOCK_HDMI_593407 },
+ { 594000, AUD_CONFIG_PIXEL_CLOCK_HDMI_594000 },
};
/* HDMI N/CTS table */
@@ -234,6 +238,7 @@ static const struct hdmi_aud_ncts hdmi_aud_ncts_36bpp[] = {
/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
int i;
@@ -243,6 +248,9 @@ static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_sta
break;
}
+ if (INTEL_GEN(dev_priv) < 12 && adjusted_mode->crtc_clock > 148500)
+ i = ARRAY_SIZE(hdmi_audio_clock);
+
if (i == ARRAY_SIZE(hdmi_audio_clock)) {
DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n",
adjusted_mode->crtc_clock);
@@ -844,7 +852,7 @@ static void glk_force_audio_cdclk(struct drm_i915_private *dev_priv,
struct intel_crtc *crtc;
int ret;
- crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
+ crtc = intel_get_first_crtc(dev_priv);
if (!crtc)
return;
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 2049cf5b54f3..839124647202 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -26,7 +26,6 @@
*/
#include <drm/drm_dp_helper.h>
-#include <drm/i915_drm.h>
#include "display/intel_display.h"
#include "display/intel_display_types.h"
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index c17199caeff8..e29e79faa01b 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -32,8 +32,6 @@
#include <linux/types.h>
-#include <drm/i915_drm.h>
-
struct drm_i915_private;
struct intel_crtc_state;
struct intel_encoder;
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 146c2b9bb7fb..979a0241fdcb 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -525,7 +525,8 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
* FIXME is this guaranteed to clear
* immediately or should we poll for it?
*/
- WARN_ON(intel_de_read(dev_priv, GCI_CONTROL) & PFI_CREDIT_RESEND);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, GCI_CONTROL) & PFI_CREDIT_RESEND);
}
static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
@@ -727,12 +728,13 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
u32 val;
int ret;
- if (WARN((intel_de_read(dev_priv, LCPLL_CTL) &
- (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
- LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
- LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
- LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
- "trying to change cdclk frequency with cdclk not enabled\n"))
+ if (drm_WARN(&dev_priv->drm,
+ (intel_de_read(dev_priv, LCPLL_CTL) &
+ (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
+ LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
+ LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
+ LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
+ "trying to change cdclk frequency with cdclk not enabled\n"))
return;
ret = sandybridge_pcode_write(dev_priv,
@@ -842,15 +844,16 @@ static void skl_dpll0_update(struct drm_i915_private *dev_priv,
if ((val & LCPLL_PLL_ENABLE) == 0)
return;
- if (WARN_ON((val & LCPLL_PLL_LOCK) == 0))
+ if (drm_WARN_ON(&dev_priv->drm, (val & LCPLL_PLL_LOCK) == 0))
return;
val = intel_de_read(dev_priv, DPLL_CTRL1);
- if (WARN_ON((val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) |
- DPLL_CTRL1_SSC(SKL_DPLL0) |
- DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) !=
- DPLL_CTRL1_OVERRIDE(SKL_DPLL0)))
+ if (drm_WARN_ON(&dev_priv->drm,
+ (val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) |
+ DPLL_CTRL1_SSC(SKL_DPLL0) |
+ DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) !=
+ DPLL_CTRL1_OVERRIDE(SKL_DPLL0)))
return;
switch (val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) {
@@ -952,7 +955,7 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
{
u32 val;
- WARN_ON(vco != 8100000 && vco != 8640000);
+ drm_WARN_ON(&dev_priv->drm, vco != 8100000 && vco != 8640000);
/*
* We always enable DPLL0 with the lowest link rate possible, but still
@@ -1017,7 +1020,8 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
* use the corresponding VCO freq as that always leads to using the
* minimum 308MHz CDCLK.
*/
- WARN_ON_ONCE(IS_SKYLAKE(dev_priv) && vco == 8640000);
+ drm_WARN_ON_ONCE(&dev_priv->drm,
+ IS_SKYLAKE(dev_priv) && vco == 8640000);
ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
SKL_CDCLK_PREPARE_FOR_CHANGE,
@@ -1032,8 +1036,9 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
/* Choose frequency for this cdclk */
switch (cdclk) {
default:
- WARN_ON(cdclk != dev_priv->cdclk.hw.bypass);
- WARN_ON(vco != 0);
+ drm_WARN_ON(&dev_priv->drm,
+ cdclk != dev_priv->cdclk.hw.bypass);
+ drm_WARN_ON(&dev_priv->drm, vco != 0);
/* fall through */
case 308571:
case 337500:
@@ -1235,8 +1240,9 @@ static int bxt_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk)
table[i].cdclk >= min_cdclk)
return table[i].cdclk;
- WARN(1, "Cannot satisfy minimum cdclk %d with refclk %u\n",
- min_cdclk, dev_priv->cdclk.hw.ref);
+ drm_WARN(&dev_priv->drm, 1,
+ "Cannot satisfy minimum cdclk %d with refclk %u\n",
+ min_cdclk, dev_priv->cdclk.hw.ref);
return 0;
}
@@ -1253,8 +1259,8 @@ static int bxt_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
table[i].cdclk == cdclk)
return dev_priv->cdclk.hw.ref * table[i].ratio;
- WARN(1, "cdclk %d not valid for refclk %u\n",
- cdclk, dev_priv->cdclk.hw.ref);
+ drm_WARN(&dev_priv->drm, 1, "cdclk %d not valid for refclk %u\n",
+ cdclk, dev_priv->cdclk.hw.ref);
return 0;
}
@@ -1399,15 +1405,17 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
div = 2;
break;
case BXT_CDCLK_CD2X_DIV_SEL_1_5:
- WARN(IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10,
- "Unsupported divider\n");
+ drm_WARN(&dev_priv->drm,
+ IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10,
+ "Unsupported divider\n");
div = 3;
break;
case BXT_CDCLK_CD2X_DIV_SEL_2:
div = 4;
break;
case BXT_CDCLK_CD2X_DIV_SEL_4:
- WARN(INTEL_GEN(dev_priv) >= 10, "Unsupported divider\n");
+ drm_WARN(&dev_priv->drm, INTEL_GEN(dev_priv) >= 10,
+ "Unsupported divider\n");
div = 8;
break;
default:
@@ -1547,22 +1555,25 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
/* cdclk = vco / 2 / div{1,1.5,2,4} */
switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
default:
- WARN_ON(cdclk != dev_priv->cdclk.hw.bypass);
- WARN_ON(vco != 0);
+ drm_WARN_ON(&dev_priv->drm,
+ cdclk != dev_priv->cdclk.hw.bypass);
+ drm_WARN_ON(&dev_priv->drm, vco != 0);
/* fall through */
case 2:
divider = BXT_CDCLK_CD2X_DIV_SEL_1;
break;
case 3:
- WARN(IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10,
- "Unsupported divider\n");
+ drm_WARN(&dev_priv->drm,
+ IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10,
+ "Unsupported divider\n");
divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
break;
case 4:
divider = BXT_CDCLK_CD2X_DIV_SEL_2;
break;
case 8:
- WARN(INTEL_GEN(dev_priv) >= 10, "Unsupported divider\n");
+ drm_WARN(&dev_priv->drm, INTEL_GEN(dev_priv) >= 10,
+ "Unsupported divider\n");
divider = BXT_CDCLK_CD2X_DIV_SEL_4;
break;
}
@@ -1857,18 +1868,41 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
+ struct intel_encoder *encoder;
+
if (!intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config))
return;
- if (WARN_ON_ONCE(!dev_priv->display.set_cdclk))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !dev_priv->display.set_cdclk))
return;
intel_dump_cdclk_config(cdclk_config, "Changing CDCLK to");
+ /*
+ * Lock aux/gmbus while we change cdclk in case those
+ * functions use cdclk. Not all platforms/ports do,
+ * but we'll lock them all for simplicity.
+ */
+ mutex_lock(&dev_priv->gmbus_mutex);
+ for_each_intel_dp(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ mutex_lock_nest_lock(&intel_dp->aux.hw_mutex,
+ &dev_priv->gmbus_mutex);
+ }
+
dev_priv->display.set_cdclk(dev_priv, cdclk_config, pipe);
- if (WARN(intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config),
- "cdclk state doesn't match!\n")) {
+ for_each_intel_dp(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ mutex_unlock(&intel_dp->aux.hw_mutex);
+ }
+ mutex_unlock(&dev_priv->gmbus_mutex);
+
+ if (drm_WARN(&dev_priv->drm,
+ intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config),
+ "cdclk state doesn't match!\n")) {
intel_dump_cdclk_config(&dev_priv->cdclk.hw, "[hw state]");
intel_dump_cdclk_config(cdclk_config, "[sw state]");
}
@@ -1897,7 +1931,7 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
if (pipe == INVALID_PIPE ||
old_cdclk_state->actual.cdclk <= new_cdclk_state->actual.cdclk) {
- WARN_ON(!new_cdclk_state->base.changed);
+ drm_WARN_ON(&dev_priv->drm, !new_cdclk_state->base.changed);
intel_set_cdclk(dev_priv, &new_cdclk_state->actual, pipe);
}
@@ -1926,7 +1960,7 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
if (pipe != INVALID_PIPE &&
old_cdclk_state->actual.cdclk > new_cdclk_state->actual.cdclk) {
- WARN_ON(!new_cdclk_state->base.changed);
+ drm_WARN_ON(&dev_priv->drm, !new_cdclk_state->base.changed);
intel_set_cdclk(dev_priv, &new_cdclk_state->actual, pipe);
}
@@ -2550,7 +2584,7 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
int max_cdclk, vco;
vco = dev_priv->skl_preferred_vco_freq;
- WARN_ON(vco != 8100000 && vco != 8640000);
+ drm_WARN_ON(&dev_priv->drm, vco != 8100000 && vco != 8640000);
/*
* Use the lower (vco 8640) cdclk values as a
@@ -2809,8 +2843,8 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
else if (IS_I845G(dev_priv))
dev_priv->display.get_cdclk = fixed_200mhz_get_cdclk;
else { /* 830 */
- WARN(!IS_I830(dev_priv),
- "Unknown platform. Assuming 133 MHz CDCLK\n");
+ drm_WARN(&dev_priv->drm, !IS_I830(dev_priv),
+ "Unknown platform. Assuming 133 MHz CDCLK\n");
dev_priv->display.get_cdclk = fixed_133mhz_get_cdclk;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index d44bd8287801..c1cce93a1c25 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -348,48 +348,70 @@ static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state)
crtc_state->csc_mode);
}
-/*
- * Set up the pipe CSC unit on CherryView.
- */
-static void cherryview_load_csc_matrix(const struct intel_crtc_state *crtc_state)
+static void chv_load_cgm_csc(struct intel_crtc *crtc,
+ const struct drm_property_blob *blob)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_color_ctm *ctm = blob->data;
enum pipe pipe = crtc->pipe;
+ u16 coeffs[9];
+ int i;
- if (crtc_state->hw.ctm) {
- const struct drm_color_ctm *ctm = crtc_state->hw.ctm->data;
- u16 coeffs[9] = {};
- int i;
-
- for (i = 0; i < ARRAY_SIZE(coeffs); i++) {
- u64 abs_coeff =
- ((1ULL << 63) - 1) & ctm->matrix[i];
-
- /* Round coefficient. */
- abs_coeff += 1 << (32 - 13);
- /* Clamp to hardware limits. */
- abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_8_0 - 1);
-
- /* Write coefficients in S3.12 format. */
- if (ctm->matrix[i] & (1ULL << 63))
- coeffs[i] = 1 << 15;
- coeffs[i] |= ((abs_coeff >> 32) & 7) << 12;
- coeffs[i] |= (abs_coeff >> 20) & 0xfff;
- }
+ for (i = 0; i < ARRAY_SIZE(coeffs); i++) {
+ u64 abs_coeff = ((1ULL << 63) - 1) & ctm->matrix[i];
+
+ /* Round coefficient. */
+ abs_coeff += 1 << (32 - 13);
+ /* Clamp to hardware limits. */
+ abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_8_0 - 1);
- intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF01(pipe),
- coeffs[1] << 16 | coeffs[0]);
- intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF23(pipe),
- coeffs[3] << 16 | coeffs[2]);
- intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF45(pipe),
- coeffs[5] << 16 | coeffs[4]);
- intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF67(pipe),
- coeffs[7] << 16 | coeffs[6]);
- intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF8(pipe), coeffs[8]);
+ coeffs[i] = 0;
+
+ /* Write coefficients in S3.12 format. */
+ if (ctm->matrix[i] & (1ULL << 63))
+ coeffs[i] |= 1 << 15;
+
+ coeffs[i] |= ((abs_coeff >> 32) & 7) << 12;
+ coeffs[i] |= (abs_coeff >> 20) & 0xfff;
}
- intel_de_write(dev_priv, CGM_PIPE_MODE(pipe), crtc_state->cgm_mode);
+ intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF01(pipe),
+ coeffs[1] << 16 | coeffs[0]);
+ intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF23(pipe),
+ coeffs[3] << 16 | coeffs[2]);
+ intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF45(pipe),
+ coeffs[5] << 16 | coeffs[4]);
+ intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF67(pipe),
+ coeffs[7] << 16 | coeffs[6]);
+ intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF8(pipe),
+ coeffs[8]);
+}
+
+/* convert hw value with given bit_precision to lut property val */
+static u32 intel_color_lut_pack(u32 val, int bit_precision)
+{
+ u32 max = 0xffff >> (16 - bit_precision);
+
+ val = clamp_val(val, 0, max);
+
+ if (bit_precision < 16)
+ val <<= 16 - bit_precision;
+
+ return val;
+}
+
+static u32 i9xx_lut_8(const struct drm_color_lut *color)
+{
+ return drm_color_lut_extract(color->red, 8) << 16 |
+ drm_color_lut_extract(color->green, 8) << 8 |
+ drm_color_lut_extract(color->blue, 8);
+}
+
+static void i9xx_lut_8_pack(struct drm_color_lut *entry, u32 val)
+{
+ entry->red = intel_color_lut_pack(REG_FIELD_GET(LGC_PALETTE_RED_MASK, val), 8);
+ entry->green = intel_color_lut_pack(REG_FIELD_GET(LGC_PALETTE_GREEN_MASK, val), 8);
+ entry->blue = intel_color_lut_pack(REG_FIELD_GET(LGC_PALETTE_BLUE_MASK, val), 8);
}
/* i965+ "10.6" bit interpolated format "even DW" (low 8 bits) */
@@ -408,51 +430,34 @@ static u32 i965_lut_10p6_udw(const struct drm_color_lut *color)
(color->blue >> 8);
}
-static u32 ilk_lut_10(const struct drm_color_lut *color)
+static void i965_lut_10p6_pack(struct drm_color_lut *entry, u32 ldw, u32 udw)
{
- return drm_color_lut_extract(color->red, 10) << 20 |
- drm_color_lut_extract(color->green, 10) << 10 |
- drm_color_lut_extract(color->blue, 10);
+ entry->red = REG_FIELD_GET(PALETTE_RED_MASK, udw) << 8 |
+ REG_FIELD_GET(PALETTE_RED_MASK, ldw);
+ entry->green = REG_FIELD_GET(PALETTE_GREEN_MASK, udw) << 8 |
+ REG_FIELD_GET(PALETTE_GREEN_MASK, ldw);
+ entry->blue = REG_FIELD_GET(PALETTE_BLUE_MASK, udw) << 8 |
+ REG_FIELD_GET(PALETTE_BLUE_MASK, ldw);
}
-/* Loads the legacy palette/gamma unit for the CRTC. */
-static void i9xx_load_luts_internal(const struct intel_crtc_state *crtc_state,
- const struct drm_property_blob *blob)
+static u16 i965_lut_11p6_max_pack(u32 val)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
- int i;
-
- if (HAS_GMCH(dev_priv)) {
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
- assert_dsi_pll_enabled(dev_priv);
- else
- assert_pll_enabled(dev_priv, pipe);
- }
+ /* PIPEGCMAX is 11.6, clamp to 10.6 */
+ return clamp_val(val, 0, 0xffff);
+}
- if (blob) {
- const struct drm_color_lut *lut = blob->data;
-
- for (i = 0; i < 256; i++) {
- u32 word =
- (drm_color_lut_extract(lut[i].red, 8) << 16) |
- (drm_color_lut_extract(lut[i].green, 8) << 8) |
- drm_color_lut_extract(lut[i].blue, 8);
-
- if (HAS_GMCH(dev_priv))
- intel_de_write(dev_priv, PALETTE(pipe, i),
- word);
- else
- intel_de_write(dev_priv, LGC_PALETTE(pipe, i),
- word);
- }
- }
+static u32 ilk_lut_10(const struct drm_color_lut *color)
+{
+ return drm_color_lut_extract(color->red, 10) << 20 |
+ drm_color_lut_extract(color->green, 10) << 10 |
+ drm_color_lut_extract(color->blue, 10);
}
-static void i9xx_load_luts(const struct intel_crtc_state *crtc_state)
+static void ilk_lut_10_pack(struct drm_color_lut *entry, u32 val)
{
- i9xx_load_luts_internal(crtc_state, crtc_state->hw.gamma_lut);
+ entry->red = intel_color_lut_pack(REG_FIELD_GET(PREC_PALETTE_RED_MASK, val), 10);
+ entry->green = intel_color_lut_pack(REG_FIELD_GET(PREC_PALETTE_GREEN_MASK, val), 10);
+ entry->blue = intel_color_lut_pack(REG_FIELD_GET(PREC_PALETTE_BLUE_MASK, val), 10);
}
static void i9xx_color_commit(const struct intel_crtc_state *crtc_state)
@@ -521,6 +526,35 @@ static void skl_color_commit(const struct intel_crtc_state *crtc_state)
ilk_load_csc_matrix(crtc_state);
}
+static void i9xx_load_lut_8(struct intel_crtc *crtc,
+ const struct drm_property_blob *blob)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_color_lut *lut;
+ enum pipe pipe = crtc->pipe;
+ int i;
+
+ if (!blob)
+ return;
+
+ lut = blob->data;
+
+ for (i = 0; i < 256; i++)
+ intel_de_write(dev_priv, PALETTE(pipe, i),
+ i9xx_lut_8(&lut[i]));
+}
+
+static void i9xx_load_luts(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+
+ assert_pll_enabled(dev_priv, crtc->pipe);
+
+ i9xx_load_lut_8(crtc, gamma_lut);
+}
+
static void i965_load_lut_10p6(struct intel_crtc *crtc,
const struct drm_property_blob *blob)
{
@@ -544,14 +578,38 @@ static void i965_load_lut_10p6(struct intel_crtc *crtc,
static void i965_load_luts(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
+ assert_dsi_pll_enabled(dev_priv);
+ else
+ assert_pll_enabled(dev_priv, crtc->pipe);
+
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
- i9xx_load_luts(crtc_state);
+ i9xx_load_lut_8(crtc, gamma_lut);
else
i965_load_lut_10p6(crtc, gamma_lut);
}
+static void ilk_load_lut_8(struct intel_crtc *crtc,
+ const struct drm_property_blob *blob)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_color_lut *lut;
+ enum pipe pipe = crtc->pipe;
+ int i;
+
+ if (!blob)
+ return;
+
+ lut = blob->data;
+
+ for (i = 0; i < 256; i++)
+ intel_de_write(dev_priv, LGC_PALETTE(pipe, i),
+ i9xx_lut_8(&lut[i]));
+}
+
static void ilk_load_lut_10(struct intel_crtc *crtc,
const struct drm_property_blob *blob)
{
@@ -562,7 +620,7 @@ static void ilk_load_lut_10(struct intel_crtc *crtc,
for (i = 0; i < lut_size; i++)
intel_de_write(dev_priv, PREC_PALETTE(pipe, i),
- ilk_lut_10(&lut[i]));
+ ilk_lut_10(&lut[i]));
}
static void ilk_load_luts(const struct intel_crtc_state *crtc_state)
@@ -571,7 +629,7 @@ static void ilk_load_luts(const struct intel_crtc_state *crtc_state)
const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
- i9xx_load_luts(crtc_state);
+ ilk_load_lut_8(crtc, gamma_lut);
else
ilk_load_lut_10(crtc, gamma_lut);
}
@@ -681,7 +739,7 @@ static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
- i9xx_load_luts(crtc_state);
+ ilk_load_lut_8(crtc, gamma_lut);
} else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) {
ivb_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(0));
@@ -704,7 +762,7 @@ static void bdw_load_luts(const struct intel_crtc_state *crtc_state)
const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
- i9xx_load_luts(crtc_state);
+ ilk_load_lut_8(crtc, gamma_lut);
} else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) {
bdw_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(0));
@@ -725,9 +783,8 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- const u32 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
+ int i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
const struct drm_color_lut *lut = crtc_state->hw.degamma_lut->data;
- u32 i;
/*
* When setting the auto-increment bit, the hardware seems to
@@ -766,8 +823,7 @@ static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_stat
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- const u32 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
- u32 i;
+ int i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
/*
* When setting the auto-increment bit, the hardware seems to
@@ -808,7 +864,7 @@ static void glk_load_luts(const struct intel_crtc_state *crtc_state)
glk_load_degamma_lut_linear(crtc_state);
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
- i9xx_load_luts(crtc_state);
+ ilk_load_lut_8(crtc, gamma_lut);
} else {
bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0));
ivb_load_lut_ext_max(crtc);
@@ -852,7 +908,7 @@ icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state)
const struct drm_color_lut *lut = blob->data;
struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
- u32 i;
+ int i;
/*
* Program Super Fine segment (let's call it seg1)...
@@ -885,7 +941,7 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
const struct drm_color_lut *entry;
struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
- u32 i;
+ int i;
/*
* Program Fine segment (let's call it seg2)...
@@ -944,7 +1000,7 @@ static void icl_load_luts(const struct intel_crtc_state *crtc_state)
switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) {
case GAMMA_MODE_MODE_8BIT:
- i9xx_load_luts(crtc_state);
+ ilk_load_lut_8(crtc, gamma_lut);
break;
case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED:
icl_program_gamma_superfine_segment(crtc_state);
@@ -970,6 +1026,13 @@ static u32 chv_cgm_degamma_udw(const struct drm_color_lut *color)
return drm_color_lut_extract(color->red, 14);
}
+static void chv_cgm_gamma_pack(struct drm_color_lut *entry, u32 ldw, u32 udw)
+{
+ entry->green = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_GREEN_MASK, ldw), 10);
+ entry->blue = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_BLUE_MASK, ldw), 10);
+ entry->red = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_RED_MASK, udw), 10);
+}
+
static void chv_load_cgm_degamma(struct intel_crtc *crtc,
const struct drm_property_blob *blob)
{
@@ -1016,21 +1079,24 @@ static void chv_load_cgm_gamma(struct intel_crtc *crtc,
static void chv_load_luts(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+ const struct drm_property_blob *ctm = crtc_state->hw.ctm;
- cherryview_load_csc_matrix(crtc_state);
-
- if (crtc_state_is_legacy_gamma(crtc_state)) {
- i9xx_load_luts(crtc_state);
- return;
- }
+ if (crtc_state->cgm_mode & CGM_PIPE_MODE_CSC)
+ chv_load_cgm_csc(crtc, ctm);
- if (degamma_lut)
+ if (crtc_state->cgm_mode & CGM_PIPE_MODE_DEGAMMA)
chv_load_cgm_degamma(crtc, degamma_lut);
- if (gamma_lut)
+ if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA)
chv_load_cgm_gamma(crtc, gamma_lut);
+ else
+ i965_load_luts(crtc_state);
+
+ intel_de_write(dev_priv, CGM_PIPE_MODE(crtc->pipe),
+ crtc_state->cgm_mode);
}
void intel_color_load_luts(const struct intel_crtc_state *crtc_state)
@@ -1656,28 +1722,13 @@ bool intel_color_lut_equal(struct drm_property_blob *blob1,
return true;
}
-/* convert hw value with given bit_precision to lut property val */
-static u32 intel_color_lut_pack(u32 val, u32 bit_precision)
+static struct drm_property_blob *i9xx_read_lut_8(struct intel_crtc *crtc)
{
- u32 max = 0xffff >> (16 - bit_precision);
-
- val = clamp_val(val, 0, max);
-
- if (bit_precision < 16)
- val <<= 16 - bit_precision;
-
- return val;
-}
-
-static struct drm_property_blob *
-i9xx_read_lut_8(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
- struct drm_color_lut *blob_data;
- u32 i, val;
+ struct drm_color_lut *lut;
+ int i;
blob = drm_property_create_blob(&dev_priv->drm,
sizeof(struct drm_color_lut) * LEGACY_LUT_LENGTH,
@@ -1685,20 +1736,12 @@ i9xx_read_lut_8(const struct intel_crtc_state *crtc_state)
if (IS_ERR(blob))
return NULL;
- blob_data = blob->data;
+ lut = blob->data;
for (i = 0; i < LEGACY_LUT_LENGTH; i++) {
- if (HAS_GMCH(dev_priv))
- val = intel_de_read(dev_priv, PALETTE(pipe, i));
- else
- val = intel_de_read(dev_priv, LGC_PALETTE(pipe, i));
-
- blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET(
- LGC_PALETTE_RED_MASK, val), 8);
- blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET(
- LGC_PALETTE_GREEN_MASK, val), 8);
- blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET(
- LGC_PALETTE_BLUE_MASK, val), 8);
+ u32 val = intel_de_read(dev_priv, PALETTE(pipe, i));
+
+ i9xx_lut_8_pack(&lut[i], val);
}
return blob;
@@ -1706,22 +1749,21 @@ i9xx_read_lut_8(const struct intel_crtc_state *crtc_state)
static void i9xx_read_luts(struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
if (!crtc_state->gamma_enable)
return;
- crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state);
+ crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc);
}
-static struct drm_property_blob *
-i965_read_lut_10p6(const struct intel_crtc_state *crtc_state)
+static struct drm_property_blob *i965_read_lut_10p6(struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+ int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
- struct drm_color_lut *blob_data;
- u32 i, val1, val2;
+ struct drm_color_lut *lut;
blob = drm_property_create_blob(&dev_priv->drm,
sizeof(struct drm_color_lut) * lut_size,
@@ -1729,51 +1771,42 @@ i965_read_lut_10p6(const struct intel_crtc_state *crtc_state)
if (IS_ERR(blob))
return NULL;
- blob_data = blob->data;
+ lut = blob->data;
for (i = 0; i < lut_size - 1; i++) {
- val1 = intel_de_read(dev_priv, PALETTE(pipe, 2 * i + 0));
- val2 = intel_de_read(dev_priv, PALETTE(pipe, 2 * i + 1));
-
- blob_data[i].red = REG_FIELD_GET(PALETTE_RED_MASK, val2) << 8 |
- REG_FIELD_GET(PALETTE_RED_MASK, val1);
- blob_data[i].green = REG_FIELD_GET(PALETTE_GREEN_MASK, val2) << 8 |
- REG_FIELD_GET(PALETTE_GREEN_MASK, val1);
- blob_data[i].blue = REG_FIELD_GET(PALETTE_BLUE_MASK, val2) << 8 |
- REG_FIELD_GET(PALETTE_BLUE_MASK, val1);
+ u32 ldw = intel_de_read(dev_priv, PALETTE(pipe, 2 * i + 0));
+ u32 udw = intel_de_read(dev_priv, PALETTE(pipe, 2 * i + 1));
+
+ i965_lut_10p6_pack(&lut[i], ldw, udw);
}
- blob_data[i].red = REG_FIELD_GET(PIPEGCMAX_RGB_MASK,
- intel_de_read(dev_priv, PIPEGCMAX(pipe, 0)));
- blob_data[i].green = REG_FIELD_GET(PIPEGCMAX_RGB_MASK,
- intel_de_read(dev_priv, PIPEGCMAX(pipe, 1)));
- blob_data[i].blue = REG_FIELD_GET(PIPEGCMAX_RGB_MASK,
- intel_de_read(dev_priv, PIPEGCMAX(pipe, 2)));
+ lut[i].red = i965_lut_11p6_max_pack(intel_de_read(dev_priv, PIPEGCMAX(pipe, 0)));
+ lut[i].green = i965_lut_11p6_max_pack(intel_de_read(dev_priv, PIPEGCMAX(pipe, 1)));
+ lut[i].blue = i965_lut_11p6_max_pack(intel_de_read(dev_priv, PIPEGCMAX(pipe, 2)));
return blob;
}
static void i965_read_luts(struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
if (!crtc_state->gamma_enable)
return;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
- crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state);
+ crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc);
else
- crtc_state->hw.gamma_lut = i965_read_lut_10p6(crtc_state);
+ crtc_state->hw.gamma_lut = i965_read_lut_10p6(crtc);
}
-static struct drm_property_blob *
-chv_read_cgm_lut(const struct intel_crtc_state *crtc_state)
+static struct drm_property_blob *chv_read_cgm_gamma(struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+ int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
- struct drm_color_lut *blob_data;
- u32 i, val;
+ struct drm_color_lut *lut;
blob = drm_property_create_blob(&dev_priv->drm,
sizeof(struct drm_color_lut) * lut_size,
@@ -1781,18 +1814,13 @@ chv_read_cgm_lut(const struct intel_crtc_state *crtc_state)
if (IS_ERR(blob))
return NULL;
- blob_data = blob->data;
+ lut = blob->data;
for (i = 0; i < lut_size; i++) {
- val = intel_de_read(dev_priv, CGM_PIPE_GAMMA(pipe, i, 0));
- blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET(
- CGM_PIPE_GAMMA_GREEN_MASK, val), 10);
- blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET(
- CGM_PIPE_GAMMA_BLUE_MASK, val), 10);
-
- val = intel_de_read(dev_priv, CGM_PIPE_GAMMA(pipe, i, 1));
- blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET(
- CGM_PIPE_GAMMA_RED_MASK, val), 10);
+ u32 ldw = intel_de_read(dev_priv, CGM_PIPE_GAMMA(pipe, i, 0));
+ u32 udw = intel_de_read(dev_priv, CGM_PIPE_GAMMA(pipe, i, 1));
+
+ chv_cgm_gamma_pack(&lut[i], ldw, udw);
}
return blob;
@@ -1800,22 +1828,46 @@ chv_read_cgm_lut(const struct intel_crtc_state *crtc_state)
static void chv_read_luts(struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA)
- crtc_state->hw.gamma_lut = chv_read_cgm_lut(crtc_state);
+ crtc_state->hw.gamma_lut = chv_read_cgm_gamma(crtc);
else
i965_read_luts(crtc_state);
}
-static struct drm_property_blob *
-ilk_read_lut_10(const struct intel_crtc_state *crtc_state)
+static struct drm_property_blob *ilk_read_lut_8(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *lut;
+ int i;
+
+ blob = drm_property_create_blob(&dev_priv->drm,
+ sizeof(struct drm_color_lut) * LEGACY_LUT_LENGTH,
+ NULL);
+ if (IS_ERR(blob))
+ return NULL;
+
+ lut = blob->data;
+
+ for (i = 0; i < LEGACY_LUT_LENGTH; i++) {
+ u32 val = intel_de_read(dev_priv, LGC_PALETTE(pipe, i));
+
+ i9xx_lut_8_pack(&lut[i], val);
+ }
+
+ return blob;
+}
+
+static struct drm_property_blob *ilk_read_lut_10(struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+ int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
- struct drm_color_lut *blob_data;
- u32 i, val;
+ struct drm_color_lut *lut;
blob = drm_property_create_blob(&dev_priv->drm,
sizeof(struct drm_color_lut) * lut_size,
@@ -1823,17 +1875,12 @@ ilk_read_lut_10(const struct intel_crtc_state *crtc_state)
if (IS_ERR(blob))
return NULL;
- blob_data = blob->data;
+ lut = blob->data;
for (i = 0; i < lut_size; i++) {
- val = intel_de_read(dev_priv, PREC_PALETTE(pipe, i));
-
- blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET(
- PREC_PALETTE_RED_MASK, val), 10);
- blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET(
- PREC_PALETTE_GREEN_MASK, val), 10);
- blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET(
- PREC_PALETTE_BLUE_MASK, val), 10);
+ u32 val = intel_de_read(dev_priv, PREC_PALETTE(pipe, i));
+
+ ilk_lut_10_pack(&lut[i], val);
}
return blob;
@@ -1841,6 +1888,8 @@ ilk_read_lut_10(const struct intel_crtc_state *crtc_state)
static void ilk_read_luts(struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
if (!crtc_state->gamma_enable)
return;
@@ -1848,21 +1897,19 @@ static void ilk_read_luts(struct intel_crtc_state *crtc_state)
return;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
- crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state);
+ crtc_state->hw.gamma_lut = ilk_read_lut_8(crtc);
else
- crtc_state->hw.gamma_lut = ilk_read_lut_10(crtc_state);
+ crtc_state->hw.gamma_lut = ilk_read_lut_10(crtc);
}
-static struct drm_property_blob *
-glk_read_lut_10(const struct intel_crtc_state *crtc_state, u32 prec_index)
+static struct drm_property_blob *glk_read_lut_10(struct intel_crtc *crtc,
+ u32 prec_index)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- int hw_lut_size = ivb_lut_10_size(prec_index);
+ int i, hw_lut_size = ivb_lut_10_size(prec_index);
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
- struct drm_color_lut *blob_data;
- u32 i, val;
+ struct drm_color_lut *lut;
blob = drm_property_create_blob(&dev_priv->drm,
sizeof(struct drm_color_lut) * hw_lut_size,
@@ -1870,20 +1917,15 @@ glk_read_lut_10(const struct intel_crtc_state *crtc_state, u32 prec_index)
if (IS_ERR(blob))
return NULL;
- blob_data = blob->data;
+ lut = blob->data;
intel_de_write(dev_priv, PREC_PAL_INDEX(pipe),
prec_index | PAL_PREC_AUTO_INCREMENT);
for (i = 0; i < hw_lut_size; i++) {
- val = intel_de_read(dev_priv, PREC_PAL_DATA(pipe));
-
- blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET(
- PREC_PAL_DATA_RED_MASK, val), 10);
- blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET(
- PREC_PAL_DATA_GREEN_MASK, val), 10);
- blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET(
- PREC_PAL_DATA_BLUE_MASK, val), 10);
+ u32 val = intel_de_read(dev_priv, PREC_PAL_DATA(pipe));
+
+ ilk_lut_10_pack(&lut[i], val);
}
intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 0);
@@ -1893,13 +1935,15 @@ glk_read_lut_10(const struct intel_crtc_state *crtc_state, u32 prec_index)
static void glk_read_luts(struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
if (!crtc_state->gamma_enable)
return;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
- crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state);
+ crtc_state->hw.gamma_lut = ilk_read_lut_8(crtc);
else
- crtc_state->hw.gamma_lut = glk_read_lut_10(crtc_state, PAL_PREC_INDEX_VALUE(0));
+ crtc_state->hw.gamma_lut = glk_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0));
}
void intel_color_init(struct intel_crtc *crtc)
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 45ecc7d9c829..78f9b6cde810 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -32,7 +32,6 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_connector.h"
@@ -1046,6 +1045,7 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
!dmi_check_system(intel_spurious_crt_detect)) {
crt->base.hpd_pin = HPD_CRT;
crt->base.hotplug = intel_encoder_hotplug;
+ intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
} else {
intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
}
diff --git a/drivers/gpu/drm/i915/display/intel_csr.c b/drivers/gpu/drm/i915/display/intel_csr.c
index 57320c12839f..3112572cfb7d 100644
--- a/drivers/gpu/drm/i915/display/intel_csr.c
+++ b/drivers/gpu/drm/i915/display/intel_csr.c
@@ -40,8 +40,8 @@
#define GEN12_CSR_MAX_FW_SIZE ICL_CSR_MAX_FW_SIZE
-#define TGL_CSR_PATH "i915/tgl_dmc_ver2_04.bin"
-#define TGL_CSR_VERSION_REQUIRED CSR_VERSION(2, 4)
+#define TGL_CSR_PATH "i915/tgl_dmc_ver2_06.bin"
+#define TGL_CSR_VERSION_REQUIRED CSR_VERSION(2, 6)
#define TGL_CSR_MAX_FW_SIZE 0x6000
MODULE_FIRMWARE(TGL_CSR_PATH);
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index ff292dfe2dd3..73d0f4648c06 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -1006,18 +1006,18 @@ static int intel_ddi_hdmi_level(struct intel_encoder *encoder)
intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
default_entry = 6;
} else {
- WARN(1, "ddi translation table missing\n");
+ drm_WARN(&dev_priv->drm, 1, "ddi translation table missing\n");
return 0;
}
- if (WARN_ON_ONCE(n_entries == 0))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, n_entries == 0))
return 0;
level = intel_bios_hdmi_level_shift(encoder);
if (level < 0)
level = default_entry;
- if (WARN_ON_ONCE(level >= n_entries))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
level = n_entries - 1;
return level;
@@ -1075,9 +1075,9 @@ static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
ddi_translations = intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
- if (WARN_ON_ONCE(!ddi_translations))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
return;
- if (WARN_ON_ONCE(level >= n_entries))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
level = n_entries - 1;
/* If we're boosting the current, set bit 31 of trans1 */
@@ -1208,7 +1208,7 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
/* Configure Port Clock Select */
ddi_pll_sel = hsw_pll_to_ddi_pll_sel(crtc_state->shared_dpll);
intel_de_write(dev_priv, PORT_CLK_SEL(PORT_E), ddi_pll_sel);
- WARN_ON(ddi_pll_sel != PORT_CLK_SEL_SPLL);
+ drm_WARN_ON(&dev_priv->drm, ddi_pll_sel != PORT_CLK_SEL_SPLL);
/* Start the training iterating through available voltages and emphasis,
* testing each value twice. */
@@ -1317,171 +1317,14 @@ intel_ddi_get_crtc_encoder(struct intel_crtc *crtc)
}
if (num_encoders != 1)
- WARN(1, "%d encoders on crtc for pipe %c\n", num_encoders,
- pipe_name(crtc->pipe));
+ drm_WARN(dev, 1, "%d encoders on crtc for pipe %c\n",
+ num_encoders,
+ pipe_name(crtc->pipe));
BUG_ON(ret == NULL);
return ret;
}
-static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
- i915_reg_t reg)
-{
- int refclk;
- int n, p, r;
- u32 wrpll;
-
- wrpll = intel_de_read(dev_priv, reg);
- switch (wrpll & WRPLL_REF_MASK) {
- case WRPLL_REF_SPECIAL_HSW:
- /*
- * muxed-SSC for BDW.
- * non-SSC for non-ULT HSW. Check FUSE_STRAP3
- * for the non-SSC reference frequency.
- */
- if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
- if (intel_de_read(dev_priv, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
- refclk = 24;
- else
- refclk = 135;
- break;
- }
- /* fall through */
- case WRPLL_REF_PCH_SSC:
- /*
- * We could calculate spread here, but our checking
- * code only cares about 5% accuracy, and spread is a max of
- * 0.5% downspread.
- */
- refclk = 135;
- break;
- case WRPLL_REF_LCPLL:
- refclk = 2700;
- break;
- default:
- MISSING_CASE(wrpll);
- return 0;
- }
-
- r = wrpll & WRPLL_DIVIDER_REF_MASK;
- p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
- n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
-
- /* Convert to KHz, p & r have a fixed point portion */
- return (refclk * n * 100) / (p * r);
-}
-
-static int skl_calc_wrpll_link(const struct intel_dpll_hw_state *pll_state)
-{
- u32 p0, p1, p2, dco_freq;
-
- p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
- p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
-
- if (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_MODE(1))
- p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
- else
- p1 = 1;
-
-
- switch (p0) {
- case DPLL_CFGCR2_PDIV_1:
- p0 = 1;
- break;
- case DPLL_CFGCR2_PDIV_2:
- p0 = 2;
- break;
- case DPLL_CFGCR2_PDIV_3:
- p0 = 3;
- break;
- case DPLL_CFGCR2_PDIV_7:
- p0 = 7;
- break;
- }
-
- switch (p2) {
- case DPLL_CFGCR2_KDIV_5:
- p2 = 5;
- break;
- case DPLL_CFGCR2_KDIV_2:
- p2 = 2;
- break;
- case DPLL_CFGCR2_KDIV_3:
- p2 = 3;
- break;
- case DPLL_CFGCR2_KDIV_1:
- p2 = 1;
- break;
- }
-
- dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK)
- * 24 * 1000;
-
- dco_freq += (((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9)
- * 24 * 1000) / 0x8000;
-
- if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0))
- return 0;
-
- return dco_freq / (p0 * p1 * p2 * 5);
-}
-
-int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
- struct intel_dpll_hw_state *pll_state)
-{
- u32 p0, p1, p2, dco_freq, ref_clock;
-
- p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
- p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
-
- if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
- p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
- DPLL_CFGCR1_QDIV_RATIO_SHIFT;
- else
- p1 = 1;
-
-
- switch (p0) {
- case DPLL_CFGCR1_PDIV_2:
- p0 = 2;
- break;
- case DPLL_CFGCR1_PDIV_3:
- p0 = 3;
- break;
- case DPLL_CFGCR1_PDIV_5:
- p0 = 5;
- break;
- case DPLL_CFGCR1_PDIV_7:
- p0 = 7;
- break;
- }
-
- switch (p2) {
- case DPLL_CFGCR1_KDIV_1:
- p2 = 1;
- break;
- case DPLL_CFGCR1_KDIV_2:
- p2 = 2;
- break;
- case DPLL_CFGCR1_KDIV_3:
- p2 = 3;
- break;
- }
-
- ref_clock = cnl_hdmi_pll_ref_clock(dev_priv);
-
- dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK)
- * ref_clock;
-
- dco_freq += (((pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
- DPLL_CFGCR0_DCO_FRACTION_SHIFT) * ref_clock) / 0x8000;
-
- if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0))
- return 0;
-
- return dco_freq / (p0 * p1 * p2 * 5);
-}
-
static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv,
enum port port)
{
@@ -1504,77 +1347,6 @@ static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv,
}
}
-static int icl_calc_mg_pll_link(struct drm_i915_private *dev_priv,
- const struct intel_dpll_hw_state *pll_state)
-{
- u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
- u64 tmp;
-
- ref_clock = dev_priv->cdclk.hw.ref;
-
- if (INTEL_GEN(dev_priv) >= 12) {
- m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
- m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
- m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
-
- if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
- m2_frac = pll_state->mg_pll_bias &
- DKL_PLL_BIAS_FBDIV_FRAC_MASK;
- m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
- } else {
- m2_frac = 0;
- }
- } else {
- m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
- m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
-
- if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
- m2_frac = pll_state->mg_pll_div0 &
- MG_PLL_DIV0_FBDIV_FRAC_MASK;
- m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
- } else {
- m2_frac = 0;
- }
- }
-
- switch (pll_state->mg_clktop2_hsclkctl &
- MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
- case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
- div1 = 2;
- break;
- case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
- div1 = 3;
- break;
- case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
- div1 = 5;
- break;
- case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
- div1 = 7;
- break;
- default:
- MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
- return 0;
- }
-
- div2 = (pll_state->mg_clktop2_hsclkctl &
- MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
- MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
-
- /* div2 value of 0 is same as 1 means no div */
- if (div2 == 0)
- div2 = 1;
-
- /*
- * Adjust the original formula to delay the division by 2^22 in order to
- * minimize possible rounding errors.
- */
- tmp = (u64)m1 * m2_int * ref_clock +
- (((u64)m1 * m2_frac * ref_clock) >> 22);
- tmp = div_u64(tmp, 5 * div1 * div2);
-
- return tmp;
-}
-
static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
{
int dotclock;
@@ -1600,215 +1372,22 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
pipe_config->hw.adjusted_mode.crtc_clock = dotclock;
}
-static void icl_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state;
- enum port port = encoder->port;
- enum phy phy = intel_port_to_phy(dev_priv, port);
- int link_clock;
-
- if (intel_phy_is_combo(dev_priv, phy)) {
- link_clock = cnl_calc_wrpll_link(dev_priv, pll_state);
- } else {
- enum intel_dpll_id pll_id = intel_get_shared_dpll_id(dev_priv,
- pipe_config->shared_dpll);
-
- if (pll_id == DPLL_ID_ICL_TBTPLL)
- link_clock = icl_calc_tbt_pll_link(dev_priv, port);
- else
- link_clock = icl_calc_mg_pll_link(dev_priv, pll_state);
- }
-
- pipe_config->port_clock = link_clock;
-
- ddi_dotclock_get(pipe_config);
-}
-
-static void cnl_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state;
- int link_clock;
-
- if (pll_state->cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
- link_clock = cnl_calc_wrpll_link(dev_priv, pll_state);
- } else {
- link_clock = pll_state->cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK;
-
- switch (link_clock) {
- case DPLL_CFGCR0_LINK_RATE_810:
- link_clock = 81000;
- break;
- case DPLL_CFGCR0_LINK_RATE_1080:
- link_clock = 108000;
- break;
- case DPLL_CFGCR0_LINK_RATE_1350:
- link_clock = 135000;
- break;
- case DPLL_CFGCR0_LINK_RATE_1620:
- link_clock = 162000;
- break;
- case DPLL_CFGCR0_LINK_RATE_2160:
- link_clock = 216000;
- break;
- case DPLL_CFGCR0_LINK_RATE_2700:
- link_clock = 270000;
- break;
- case DPLL_CFGCR0_LINK_RATE_3240:
- link_clock = 324000;
- break;
- case DPLL_CFGCR0_LINK_RATE_4050:
- link_clock = 405000;
- break;
- default:
- WARN(1, "Unsupported link rate\n");
- break;
- }
- link_clock *= 2;
- }
-
- pipe_config->port_clock = link_clock;
-
- ddi_dotclock_get(pipe_config);
-}
-
-static void skl_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
-{
- struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state;
- int link_clock;
-
- /*
- * ctrl1 register is already shifted for each pll, just use 0 to get
- * the internal shift for each field
- */
- if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0)) {
- link_clock = skl_calc_wrpll_link(pll_state);
- } else {
- link_clock = pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0);
- link_clock >>= DPLL_CTRL1_LINK_RATE_SHIFT(0);
-
- switch (link_clock) {
- case DPLL_CTRL1_LINK_RATE_810:
- link_clock = 81000;
- break;
- case DPLL_CTRL1_LINK_RATE_1080:
- link_clock = 108000;
- break;
- case DPLL_CTRL1_LINK_RATE_1350:
- link_clock = 135000;
- break;
- case DPLL_CTRL1_LINK_RATE_1620:
- link_clock = 162000;
- break;
- case DPLL_CTRL1_LINK_RATE_2160:
- link_clock = 216000;
- break;
- case DPLL_CTRL1_LINK_RATE_2700:
- link_clock = 270000;
- break;
- default:
- drm_WARN(encoder->base.dev, 1,
- "Unsupported link rate\n");
- break;
- }
- link_clock *= 2;
- }
-
- pipe_config->port_clock = link_clock;
-
- ddi_dotclock_get(pipe_config);
-}
-
-static void hsw_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- int link_clock = 0;
- u32 val, pll;
-
- val = hsw_pll_to_ddi_pll_sel(pipe_config->shared_dpll);
- switch (val & PORT_CLK_SEL_MASK) {
- case PORT_CLK_SEL_LCPLL_810:
- link_clock = 81000;
- break;
- case PORT_CLK_SEL_LCPLL_1350:
- link_clock = 135000;
- break;
- case PORT_CLK_SEL_LCPLL_2700:
- link_clock = 270000;
- break;
- case PORT_CLK_SEL_WRPLL1:
- link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL(0));
- break;
- case PORT_CLK_SEL_WRPLL2:
- link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL(1));
- break;
- case PORT_CLK_SEL_SPLL:
- pll = intel_de_read(dev_priv, SPLL_CTL) & SPLL_FREQ_MASK;
- if (pll == SPLL_FREQ_810MHz)
- link_clock = 81000;
- else if (pll == SPLL_FREQ_1350MHz)
- link_clock = 135000;
- else if (pll == SPLL_FREQ_2700MHz)
- link_clock = 270000;
- else {
- WARN(1, "bad spll freq\n");
- return;
- }
- break;
- default:
- WARN(1, "bad port clock sel\n");
- return;
- }
-
- pipe_config->port_clock = link_clock * 2;
-
- ddi_dotclock_get(pipe_config);
-}
-
-static int bxt_calc_pll_link(const struct intel_dpll_hw_state *pll_state)
-{
- struct dpll clock;
-
- clock.m1 = 2;
- clock.m2 = (pll_state->pll0 & PORT_PLL_M2_MASK) << 22;
- if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
- clock.m2 |= pll_state->pll2 & PORT_PLL_M2_FRAC_MASK;
- clock.n = (pll_state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
- clock.p1 = (pll_state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
- clock.p2 = (pll_state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
-
- return chv_calc_dpll_params(100000, &clock);
-}
-
-static void bxt_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
-{
- pipe_config->port_clock =
- bxt_calc_pll_link(&pipe_config->dpll_hw_state);
-
- ddi_dotclock_get(pipe_config);
-}
-
static void intel_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
- if (INTEL_GEN(dev_priv) >= 11)
- icl_ddi_clock_get(encoder, pipe_config);
- else if (IS_CANNONLAKE(dev_priv))
- cnl_ddi_clock_get(encoder, pipe_config);
- else if (IS_GEN9_LP(dev_priv))
- bxt_ddi_clock_get(encoder, pipe_config);
- else if (IS_GEN9_BC(dev_priv))
- skl_ddi_clock_get(encoder, pipe_config);
- else if (INTEL_GEN(dev_priv) <= 8)
- hsw_ddi_clock_get(encoder, pipe_config);
+ if (intel_phy_is_tc(dev_priv, phy) &&
+ intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll) ==
+ DPLL_ID_ICL_TBTPLL)
+ pipe_config->port_clock = icl_calc_tbt_pll_link(dev_priv,
+ encoder->port);
+ else
+ pipe_config->port_clock =
+ intel_dpll_get_freq(dev_priv, pipe_config->shared_dpll);
+
+ ddi_dotclock_get(pipe_config);
}
void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
@@ -1822,7 +1401,7 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
if (!intel_crtc_has_dp_encoder(crtc_state))
return;
- WARN_ON(transcoder_is_dsi(cpu_transcoder));
+ drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder));
temp = DP_MSA_MISC_SYNC_CLOCK;
@@ -1845,8 +1424,8 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
}
/* nonsense combination */
- WARN_ON(crtc_state->limited_color_range &&
- crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
+ drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
if (crtc_state->limited_color_range)
temp |= DP_MSA_MISC_COLOR_CEA_RGB;
@@ -1962,7 +1541,8 @@ intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state)
enum transcoder master;
master = crtc_state->mst_master_transcoder;
- WARN_ON(master == INVALID_TRANSCODER);
+ drm_WARN_ON(&dev_priv->drm,
+ master == INVALID_TRANSCODER);
temp |= TRANS_DDI_MST_TRANSPORT_SELECT(master);
}
} else {
@@ -2043,10 +1623,11 @@ int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
wakeref = intel_display_power_get_if_enabled(dev_priv,
intel_encoder->power_domain);
- if (WARN_ON(!wakeref))
+ if (drm_WARN_ON(dev, !wakeref))
return -ENXIO;
- if (WARN_ON(!intel_encoder->get_hw_state(intel_encoder, &pipe))) {
+ if (drm_WARN_ON(dev,
+ !intel_encoder->get_hw_state(intel_encoder, &pipe))) {
ret = -EIO;
goto out;
}
@@ -2283,7 +1864,8 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
* happen since fake-MST encoders don't set their get_power_domains()
* hook.
*/
- if (WARN_ON(intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)))
+ if (drm_WARN_ON(&dev_priv->drm,
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)))
return;
dig_port = enc_to_dig_port(encoder);
@@ -2381,9 +1963,9 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
else
ddi_translations = intel_ddi_get_buf_trans_dp(dev_priv, port, &n_entries);
- if (WARN_ON_ONCE(!ddi_translations))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
return;
- if (WARN_ON_ONCE(level >= n_entries))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
level = n_entries - 1;
iboost = ddi_translations[level].i_boost;
@@ -2416,9 +1998,9 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder,
else
ddi_translations = bxt_get_buf_trans_dp(dev_priv, &n_entries);
- if (WARN_ON_ONCE(!ddi_translations))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
return;
- if (WARN_ON_ONCE(level >= n_entries))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
level = n_entries - 1;
bxt_ddi_phy_set_signal_level(dev_priv, port,
@@ -2468,9 +2050,10 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
intel_ddi_get_buf_trans_dp(dev_priv, port, &n_entries);
}
- if (WARN_ON(n_entries < 1))
+ if (drm_WARN_ON(&dev_priv->drm, n_entries < 1))
n_entries = 1;
- if (WARN_ON(n_entries > ARRAY_SIZE(index_to_dp_signal_levels)))
+ if (drm_WARN_ON(&dev_priv->drm,
+ n_entries > ARRAY_SIZE(index_to_dp_signal_levels)))
n_entries = ARRAY_SIZE(index_to_dp_signal_levels);
return index_to_dp_signal_levels[n_entries - 1] &
@@ -2513,9 +2096,9 @@ static void cnl_ddi_vswing_program(struct intel_encoder *encoder,
else
ddi_translations = cnl_get_buf_trans_dp(dev_priv, &n_entries);
- if (WARN_ON_ONCE(!ddi_translations))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
return;
- if (WARN_ON_ONCE(level >= n_entries))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
level = n_entries - 1;
/* Set PORT_TX_DW5 Scaling Mode Sel to 010b. */
@@ -3044,10 +2627,11 @@ static void icl_map_plls_to_ports(struct intel_encoder *encoder,
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
u32 val;
- mutex_lock(&dev_priv->dpll_lock);
+ mutex_lock(&dev_priv->dpll.lock);
val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
- WARN_ON((val & icl_dpclka_cfgcr0_clk_off(dev_priv, phy)) == 0);
+ drm_WARN_ON(&dev_priv->drm,
+ (val & icl_dpclka_cfgcr0_clk_off(dev_priv, phy)) == 0);
if (intel_phy_is_combo(dev_priv, phy)) {
/*
@@ -3069,7 +2653,7 @@ static void icl_map_plls_to_ports(struct intel_encoder *encoder,
val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
- mutex_unlock(&dev_priv->dpll_lock);
+ mutex_unlock(&dev_priv->dpll.lock);
}
static void icl_unmap_plls_to_ports(struct intel_encoder *encoder)
@@ -3078,13 +2662,13 @@ static void icl_unmap_plls_to_ports(struct intel_encoder *encoder)
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
u32 val;
- mutex_lock(&dev_priv->dpll_lock);
+ mutex_lock(&dev_priv->dpll.lock);
val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
- mutex_unlock(&dev_priv->dpll_lock);
+ mutex_unlock(&dev_priv->dpll.lock);
}
static void icl_sanitize_port_clk_off(struct drm_i915_private *dev_priv,
@@ -3106,7 +2690,7 @@ static void icl_sanitize_port_clk_off(struct drm_i915_private *dev_priv,
* Punt on the case now where clock is gated, but it would
* be needed by the port. Something else is really broken then.
*/
- if (WARN_ON(ddi_clk_needed))
+ if (drm_WARN_ON(&dev_priv->drm, ddi_clk_needed))
continue;
DRM_NOTE("PHY %c is disabled/in DSI mode with an ungated DDI clock, gate it\n",
@@ -3138,7 +2722,7 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
* In the unlikely case that BIOS enables DP in MST mode, just
* warn since our MST HW readout is incomplete.
*/
- if (WARN_ON(is_mst))
+ if (drm_WARN_ON(&dev_priv->drm, is_mst))
return;
}
@@ -3157,7 +2741,8 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
if (other_encoder == encoder)
continue;
- if (WARN_ON(port_mask & BIT(other_encoder->port)))
+ if (drm_WARN_ON(&dev_priv->drm,
+ port_mask & BIT(other_encoder->port)))
return;
}
/*
@@ -3179,10 +2764,10 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
u32 val;
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- if (WARN_ON(!pll))
+ if (drm_WARN_ON(&dev_priv->drm, !pll))
return;
- mutex_lock(&dev_priv->dpll_lock);
+ mutex_lock(&dev_priv->dpll.lock);
if (INTEL_GEN(dev_priv) >= 11) {
if (!intel_phy_is_combo(dev_priv, phy))
@@ -3226,7 +2811,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
hsw_pll_to_ddi_pll_sel(pll));
}
- mutex_unlock(&dev_priv->dpll_lock);
+ mutex_unlock(&dev_priv->dpll.lock);
}
static void intel_ddi_clk_disable(struct intel_encoder *encoder)
@@ -3285,7 +2870,8 @@ icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port,
switch (pin_assignment) {
case 0x0:
- WARN_ON(intel_dig_port->tc_mode != TC_PORT_LEGACY);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_dig_port->tc_mode != TC_PORT_LEGACY);
if (width == 1) {
ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
} else {
@@ -3542,9 +3128,10 @@ static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder,
int level = intel_ddi_dp_level(intel_dp);
if (INTEL_GEN(dev_priv) < 11)
- WARN_ON(is_mst && (port == PORT_A || port == PORT_E));
+ drm_WARN_ON(&dev_priv->drm,
+ is_mst && (port == PORT_A || port == PORT_E));
else
- WARN_ON(is_mst && port == PORT_A);
+ drm_WARN_ON(&dev_priv->drm, is_mst && port == PORT_A);
intel_dp_set_link_params(intel_dp, crtc_state->port_clock,
crtc_state->lane_count, is_mst);
@@ -3683,7 +3270,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder,
* the DP link parameteres
*/
- WARN_ON(crtc_state->has_pch_encoder);
+ drm_WARN_ON(&dev_priv->drm, crtc_state->has_pch_encoder);
if (INTEL_GEN(dev_priv) >= 11)
icl_map_plls_to_ports(encoder, crtc_state);
@@ -3958,9 +3545,9 @@ gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv,
[PORT_E] = TRANSCODER_A,
};
- WARN_ON(INTEL_GEN(dev_priv) < 9);
+ drm_WARN_ON(&dev_priv->drm, INTEL_GEN(dev_priv) < 9);
- if (WARN_ON(port < PORT_A || port > PORT_E))
+ if (drm_WARN_ON(&dev_priv->drm, port < PORT_A || port > PORT_E))
port = PORT_A;
return CHICKEN_TRANS(trans[port]);
@@ -3978,8 +3565,9 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
crtc_state->hdmi_high_tmds_clock_ratio,
crtc_state->hdmi_scrambling))
- DRM_ERROR("[CONNECTOR:%d:%s] Failed to configure sink scrambling/TMDS bit clock ratio\n",
- connector->base.id, connector->name);
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Failed to configure sink "
+ "scrambling/TMDS bit clock ratio\n",
+ connector->base.id, connector->name);
/* Display WA #1143: skl,kbl,cfl */
if (IS_GEN9_BC(dev_priv)) {
@@ -4258,7 +3846,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
u32 temp, flags = 0;
/* XXX: DSI transcoder paranoia */
- if (WARN_ON(transcoder_is_dsi(cpu_transcoder)))
+ if (drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder)))
return;
intel_dsc_get_config(encoder, pipe_config);
@@ -4661,7 +4249,8 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
crtc_state = to_intel_crtc_state(crtc->base.state);
- WARN_ON(!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI));
+ drm_WARN_ON(&dev_priv->drm,
+ !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI));
if (!crtc_state->hw.active)
return 0;
@@ -4913,7 +4502,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
encoder->update_complete = intel_ddi_update_complete;
}
- WARN_ON(port > PORT_I);
+ drm_WARN_ON(&dev_priv->drm, port > PORT_I);
intel_dig_port->ddi_io_power_domain = POWER_DOMAIN_PORT_DDI_A_IO +
port - PORT_A;
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index 167c6579d972..55fd72b901fe 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -6,8 +6,6 @@
#ifndef __INTEL_DDI_H__
#define __INTEL_DDI_H__
-#include <drm/i915_drm.h>
-
#include "intel_display.h"
struct drm_connector_state;
@@ -47,7 +45,5 @@ u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder,
int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
bool enable);
void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
-int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
- struct intel_dpll_hw_state *state);
#endif /* __INTEL_DDI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 86ca35d160b8..346846609f45 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -41,7 +41,6 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
-#include <drm/i915_drm.h>
#include "display/intel_crt.h"
#include "display/intel_ddi.h"
@@ -203,9 +202,9 @@ int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
val = vlv_cck_read(dev_priv, reg);
divider = val & CCK_FREQUENCY_VALUES;
- WARN((val & CCK_FREQUENCY_STATUS) !=
- (divider << CCK_FREQUENCY_STATUS_SHIFT),
- "%s change in progress\n", name);
+ drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
+ (divider << CCK_FREQUENCY_STATUS_SHIFT),
+ "%s change in progress\n", name);
return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
}
@@ -882,7 +881,7 @@ static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
return calculated_clock->p > best_clock->p;
}
- if (WARN_ON_ONCE(!target_freq))
+ if (drm_WARN_ON_ONCE(dev, !target_freq))
return false;
*error_ppm = div_u64(1000000ULL *
@@ -1090,7 +1089,8 @@ intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
/* Wait for the Pipe State to go off */
if (intel_de_wait_for_clear(dev_priv, reg,
I965_PIPECONF_ACTIVE, 100))
- WARN(1, "pipe_off wait timed out\n");
+ drm_WARN(&dev_priv->drm, 1,
+ "pipe_off wait timed out\n");
} else {
intel_wait_for_pipe_scanline_stopped(crtc);
}
@@ -1205,7 +1205,7 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
enum pipe panel_pipe = INVALID_PIPE;
bool locked = true;
- if (WARN_ON(HAS_DDI(dev_priv)))
+ if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv)))
return;
if (HAS_PCH_SPLIT(dev_priv)) {
@@ -1241,7 +1241,8 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
pp_reg = PP_CONTROL(0);
port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
- WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
+ drm_WARN_ON(&dev_priv->drm,
+ port_sel != PANEL_PORT_SELECT_LVDS);
intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
}
@@ -1482,7 +1483,9 @@ static void chv_enable_pll(struct intel_crtc *crtc,
* DPLLB VGA mode also seems to cause problems.
* We should always have it disabled.
*/
- WARN_ON((intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
+ drm_WARN_ON(&dev_priv->drm,
+ (intel_de_read(dev_priv, DPLL(PIPE_B)) &
+ DPLL_VGA_MODE_DIS) == 0);
} else {
intel_de_write(dev_priv, DPLL_MD(pipe),
pipe_config->dpll_hw_state.dpll_md);
@@ -1630,10 +1633,11 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
if (intel_de_wait_for_register(dev_priv, dpll_reg,
port_mask, expected_mask, 1000))
- WARN(1, "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
- dport->base.base.base.id, dport->base.base.name,
- intel_de_read(dev_priv, dpll_reg) & port_mask,
- expected_mask);
+ drm_WARN(&dev_priv->drm, 1,
+ "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
+ dport->base.base.base.id, dport->base.base.name,
+ intel_de_read(dev_priv, dpll_reg) & port_mask,
+ expected_mask);
}
static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
@@ -1872,7 +1876,7 @@ void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
val = intel_de_read(dev_priv, reg);
if (val & PIPECONF_ENABLE) {
/* we keep both pipes enabled on 830 */
- WARN_ON(!IS_I830(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
return;
}
@@ -2218,11 +2222,11 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
unsigned int pinctl;
u32 alignment;
- if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
+ if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj)))
return ERR_PTR(-EINVAL);
alignment = intel_surf_alignment(fb, 0);
- if (WARN_ON(alignment && !is_power_of_2(alignment)))
+ if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment)))
return ERR_PTR(-EINVAL);
/* Note that the w/a also requires 64 PTE of padding following the
@@ -2393,7 +2397,7 @@ static u32 intel_adjust_aligned_offset(int *x, int *y,
struct drm_i915_private *dev_priv = to_i915(fb->dev);
unsigned int cpp = fb->format->cpp[color_plane];
- WARN_ON(new_offset > old_offset);
+ drm_WARN_ON(&dev_priv->drm, new_offset > old_offset);
if (!is_surface_linear(fb, color_plane)) {
unsigned int tile_size, tile_width, tile_height;
@@ -2715,9 +2719,10 @@ u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
/*
* We assume the primary plane for pipe A has
- * the highest stride limits of them all.
+ * the highest stride limits of them all,
+ * if in case pipe A is disabled, use the first pipe from pipe_mask.
*/
- crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
+ crtc = intel_get_first_crtc(dev_priv);
if (!crtc)
return 0;
@@ -3155,7 +3160,7 @@ intel_plane_remap_gtt(struct intel_plane_state *plane_state)
src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
- WARN_ON(is_ccs_modifier(fb->modifier));
+ drm_WARN_ON(&dev_priv->drm, is_ccs_modifier(fb->modifier));
/* Make src coordinates relative to the viewport */
drm_rect_translate(&plane_state->uapi.src,
@@ -3196,7 +3201,7 @@ intel_plane_remap_gtt(struct intel_plane_state *plane_state)
DRM_MODE_ROTATE_0, tile_size);
offset /= tile_size;
- WARN_ON(i >= ARRAY_SIZE(info->plane));
+ drm_WARN_ON(&dev_priv->drm, i >= ARRAY_SIZE(info->plane));
info->plane[i].offset = offset;
info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
tile_width * cpp);
@@ -3847,7 +3852,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
intel_add_fb_offsets(&x, &y, plane_state, 0);
offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
alignment = intel_surf_alignment(fb, 0);
- if (WARN_ON(alignment && !is_power_of_2(alignment)))
+ if (drm_WARN_ON(&dev_priv->drm, alignment && !is_power_of_2(alignment)))
return -EINVAL;
/*
@@ -4494,10 +4499,15 @@ static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- intel_de_write(dev_priv, SKL_PS_CTRL(intel_crtc->pipe, id), 0);
- intel_de_write(dev_priv, SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
- intel_de_write(dev_priv, SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
+ intel_de_write_fw(dev_priv, SKL_PS_CTRL(intel_crtc->pipe, id), 0);
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
/*
@@ -4841,7 +4851,7 @@ __intel_display_resume(struct drm_device *dev,
ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
- WARN_ON(ret == -EDEADLK);
+ drm_WARN_ON(dev, ret == -EDEADLK);
return ret;
}
@@ -5621,10 +5631,10 @@ static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
}
/* This should not happen with any sane values */
- WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
- ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
- WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
- ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
+ drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
+ ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
+ drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) &
+ ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
drm_dbg_kms(&dev_priv->drm,
"iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
@@ -5730,8 +5740,12 @@ static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool e
if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
return;
- WARN_ON(intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
- WARN_ON(intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
+ FDI_RX_ENABLE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
+ FDI_RX_ENABLE);
temp &= ~FDI_BC_BIFURCATION_SELECT;
if (enable)
@@ -5880,7 +5894,7 @@ static void ilk_pch_enable(const struct intel_atomic_state *state,
temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
port = intel_get_crtc_new_encoder(state, crtc_state)->port;
- WARN_ON(port < PORT_B || port > PORT_D);
+ drm_WARN_ON(dev, port < PORT_B || port > PORT_D);
temp |= TRANS_DP_PORT_SEL(port);
intel_de_write(dev_priv, reg, temp);
@@ -6234,9 +6248,11 @@ static void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
if (crtc_state->pch_pfit.enabled) {
u16 uv_rgb_hphase, uv_rgb_vphase;
int pfit_w, pfit_h, hscale, vscale;
+ unsigned long irqflags;
int id;
- if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
+ if (drm_WARN_ON(&dev_priv->drm,
+ crtc_state->scaler_state.scaler_id < 0))
return;
pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
@@ -6249,16 +6265,21 @@ static void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
id = scaler_state->scaler_id;
- intel_de_write(dev_priv, SKL_PS_CTRL(pipe, id),
- PS_SCALER_EN | PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
+ PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id),
PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id),
PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
- intel_de_write(dev_priv, SKL_PS_WIN_POS(pipe, id),
- crtc_state->pch_pfit.pos);
- intel_de_write(dev_priv, SKL_PS_WIN_SZ(pipe, id),
- crtc_state->pch_pfit.size);
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id),
+ crtc_state->pch_pfit.pos);
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id),
+ crtc_state->pch_pfit.size);
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
}
@@ -6300,11 +6321,11 @@ void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
* This function is called from post_plane_update, which is run after
* a vblank wait.
*/
- WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
+ drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
if (IS_BROADWELL(dev_priv)) {
- WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
- IPS_ENABLE | IPS_PCODE_CONTROL));
+ drm_WARN_ON(dev, sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
+ IPS_ENABLE | IPS_PCODE_CONTROL));
/* Quoting Art Runyan: "its not safe to expect any particular
* value in IPS_CTL bit 31 after enabling IPS through the
* mailbox." Moreover, the mailbox may return a bogus state,
@@ -6333,7 +6354,8 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
return;
if (IS_BROADWELL(dev_priv)) {
- WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
+ drm_WARN_ON(dev,
+ sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
/*
* Wait for PCODE to finish disabling IPS. The BSpec specified
* 42ms timeout value leads to occasional timeouts so use 100ms
@@ -6846,7 +6868,7 @@ static void ilk_crtc_enable(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- if (WARN_ON(crtc->active))
+ if (drm_WARN_ON(&dev_priv->drm, crtc->active))
return;
/*
@@ -7002,7 +7024,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
bool psl_clkgate_wa;
- if (WARN_ON(crtc->active))
+ if (drm_WARN_ON(&dev_priv->drm, crtc->active))
return;
intel_encoders_pre_pll_enable(state, crtc);
@@ -7185,7 +7207,8 @@ static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
* The panel fitter should only be adjusted whilst the pipe is disabled,
* according to register description and PRM.
*/
- WARN_ON(intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
intel_de_write(dev_priv, PFIT_PGM_RATIOS,
@@ -7381,7 +7404,7 @@ static void valleyview_crtc_enable(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- if (WARN_ON(crtc->active))
+ if (drm_WARN_ON(&dev_priv->drm, crtc->active))
return;
if (intel_crtc_has_dp_encoder(new_crtc_state))
@@ -7447,7 +7470,7 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- if (WARN_ON(crtc->active))
+ if (drm_WARN_ON(&dev_priv->drm, crtc->active))
return;
i9xx_set_pll_dividers(new_crtc_state);
@@ -7593,7 +7616,7 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
ret = drm_atomic_add_affected_connectors(state, &crtc->base);
- WARN_ON(IS_ERR(temp_crtc_state) || ret);
+ drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret);
dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc);
@@ -7606,7 +7629,8 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
crtc->active = false;
crtc->base.enabled = false;
- WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
+ drm_WARN_ON(&dev_priv->drm,
+ drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
crtc_state->uapi.active = false;
crtc_state->uapi.connector_mask = 0;
crtc_state->uapi.encoder_mask = 0;
@@ -9216,7 +9240,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
if (!plane->get_hw_state(plane, &pipe))
return;
- WARN_ON(pipe != crtc->pipe);
+ drm_WARN_ON(dev, pipe != crtc->pipe);
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
if (!intel_fb) {
@@ -9328,7 +9352,8 @@ bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
if (tmp & PIPEMISC_YUV420_ENABLE) {
/* We support 4:2:0 in full blend mode only */
- WARN_ON((tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
+ drm_WARN_ON(&dev_priv->drm,
+ (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
return INTEL_OUTPUT_FORMAT_YCBCR420;
} else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
@@ -9517,7 +9542,7 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
}
/* Check if any DPLLs are using the SSC source */
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
u32 temp = intel_de_read(dev_priv, PCH_DPLL(i));
if (!(temp & DPLL_VCO_ENABLE))
@@ -9755,10 +9780,11 @@ static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
{
u32 reg, tmp;
- if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
+ if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread,
+ "FDI requires downspread\n"))
with_spread = true;
- if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
- with_fdi, "LP PCH doesn't have FDI\n"))
+ if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) &&
+ with_fdi, "LP PCH doesn't have FDI\n"))
with_fdi = false;
mutex_lock(&dev_priv->sb_lock);
@@ -9852,10 +9878,10 @@ static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
u32 tmp;
int idx = BEND_IDX(steps);
- if (WARN_ON(steps % 5 != 0))
+ if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0))
return;
- if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
+ if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
return;
mutex_lock(&dev_priv->sb_lock);
@@ -10023,8 +10049,8 @@ static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
* This would end up with an odd purple hue over
* the entire display. Make sure we don't do it.
*/
- WARN_ON(crtc_state->limited_color_range &&
- crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
+ drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
if (crtc_state->limited_color_range)
val |= PIPECONF_COLOR_RANGE_SELECT;
@@ -10103,6 +10129,9 @@ static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
BIT(PLANE_CURSOR))) == 0)
val |= PIPEMISC_HDR_MODE_PRECISION;
+ if (INTEL_GEN(dev_priv) >= 12)
+ val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
+
intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
}
@@ -10435,7 +10464,7 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
if (!plane->get_hw_state(plane, &pipe))
return;
- WARN_ON(pipe != crtc->pipe);
+ drm_WARN_ON(dev, pipe != crtc->pipe);
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
if (!intel_fb) {
@@ -10570,8 +10599,8 @@ static void ilk_get_pfit_config(struct intel_crtc *crtc,
* ivb/hsw (since we don't use the higher upscaling modes which
* differentiates them) so just WARN about this case for now. */
if (IS_GEN(dev_priv, 7)) {
- WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
- PF_PIPE_SEL_IVB(crtc->pipe));
+ drm_WARN_ON(dev, (tmp & PF_PIPE_SEL_MASK_IVB) !=
+ PF_PIPE_SEL_IVB(crtc->pipe));
}
}
}
@@ -10669,8 +10698,8 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc,
intel_get_shared_dpll_by_id(dev_priv, pll_id);
pll = pipe_config->shared_dpll;
- WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
- &pipe_config->dpll_hw_state));
+ drm_WARN_ON(dev, !pll->info->funcs->get_hw_state(dev_priv, pll,
+ &pipe_config->dpll_hw_state));
tmp = pipe_config->dpll_hw_state.dpll;
pipe_config->pixel_multiplier =
@@ -10727,7 +10756,7 @@ static void cnl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
temp = intel_de_read(dev_priv, DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
- if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
+ if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL2))
return;
pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
@@ -10754,12 +10783,13 @@ static void icl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
port));
port_dpll_id = ICL_PORT_DPLL_MG_PHY;
} else {
- WARN_ON(clk_sel < DDI_CLK_SEL_TBT_162);
+ drm_WARN_ON(&dev_priv->drm,
+ clk_sel < DDI_CLK_SEL_TBT_162);
id = DPLL_ID_ICL_TBTPLL;
port_dpll_id = ICL_PORT_DPLL_DEFAULT;
}
} else {
- WARN(1, "Invalid port %x\n", port);
+ drm_WARN(&dev_priv->drm, 1, "Invalid port %x\n", port);
return;
}
@@ -10802,7 +10832,7 @@ static void skl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
temp = intel_de_read(dev_priv, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
id = temp >> (port * 3 + 1);
- if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
+ if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL3))
return;
pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
@@ -10896,8 +10926,9 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
default:
- WARN(1, "unknown pipe linked to transcoder %s\n",
- transcoder_name(panel_transcoder));
+ drm_WARN(dev, 1,
+ "unknown pipe linked to transcoder %s\n",
+ transcoder_name(panel_transcoder));
/* fall through */
case TRANS_DDI_EDP_INPUT_A_ONOFF:
force_thru = true;
@@ -10925,11 +10956,11 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
/*
* Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
*/
- WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
- enabled_panel_transcoders != BIT(TRANSCODER_EDP));
+ drm_WARN_ON(dev, (enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
+ enabled_panel_transcoders != BIT(TRANSCODER_EDP));
power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
- WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
+ drm_WARN_ON(dev, *power_domain_mask & BIT_ULL(power_domain));
wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
if (!wf)
@@ -10963,7 +10994,7 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
cpu_transcoder = TRANSCODER_DSI_C;
power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
- WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
+ drm_WARN_ON(dev, *power_domain_mask & BIT_ULL(power_domain));
wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
if (!wf)
@@ -11032,7 +11063,8 @@ static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
pll = pipe_config->shared_dpll;
if (pll) {
- WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
+ drm_WARN_ON(&dev_priv->drm,
+ !pll->info->funcs->get_hw_state(dev_priv, pll,
&pipe_config->dpll_hw_state));
}
@@ -11103,8 +11135,9 @@ static void icl_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
intel_display_power_put(dev_priv, power_domain, trans_wakeref);
}
- WARN_ON(crtc_state->master_transcoder != INVALID_TRANSCODER &&
- crtc_state->sync_mode_slaves_mask);
+ drm_WARN_ON(&dev_priv->drm,
+ crtc_state->master_transcoder != INVALID_TRANSCODER &&
+ crtc_state->sync_mode_slaves_mask);
}
static bool hsw_get_pipe_config(struct intel_crtc *crtc,
@@ -11135,7 +11168,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
if (IS_GEN9_LP(dev_priv) &&
bxt_get_dsi_transcoder_state(crtc, pipe_config,
&power_domain_mask, wakerefs)) {
- WARN_ON(active);
+ drm_WARN_ON(&dev_priv->drm, active);
active = true;
}
@@ -11202,7 +11235,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
- WARN_ON(power_domain_mask & BIT_ULL(power_domain));
+ drm_WARN_ON(&dev_priv->drm, power_domain_mask & BIT_ULL(power_domain));
wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
if (wf) {
@@ -11449,8 +11482,8 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state,
return -EINVAL;
}
- WARN_ON(plane_state->uapi.visible &&
- plane_state->color_plane[0].stride != fb->pitches[0]);
+ drm_WARN_ON(&i915->drm, plane_state->uapi.visible &&
+ plane_state->color_plane[0].stride != fb->pitches[0]);
switch (fb->pitches[0]) {
case 256:
@@ -11666,8 +11699,8 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
return -EINVAL;
}
- WARN_ON(plane_state->uapi.visible &&
- plane_state->color_plane[0].stride != fb->pitches[0]);
+ drm_WARN_ON(&dev_priv->drm, plane_state->uapi.visible &&
+ plane_state->color_plane[0].stride != fb->pitches[0]);
if (fb->pitches[0] !=
drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) {
@@ -11886,7 +11919,7 @@ int intel_get_load_detect_pipe(struct drm_connector *connector,
old->restore_state = NULL;
- WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
+ drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
/*
* Algorithm gets a little messy:
@@ -12330,7 +12363,7 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
was_visible = old_plane_state->uapi.visible;
visible = plane_state->uapi.visible;
- if (!was_crtc_enabled && WARN_ON(was_visible))
+ if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
was_visible = false;
/*
@@ -12676,7 +12709,7 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
if (mode_changed && crtc_state->hw.enable &&
dev_priv->display.crtc_compute_clock &&
- !WARN_ON(crtc_state->shared_dpll)) {
+ !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
if (ret)
return ret;
@@ -12706,7 +12739,8 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
}
if (dev_priv->display.compute_intermediate_wm) {
- if (WARN_ON(!dev_priv->display.compute_pipe_wm))
+ if (drm_WARN_ON(&dev_priv->drm,
+ !dev_priv->display.compute_pipe_wm))
return 0;
/*
@@ -13141,24 +13175,21 @@ static bool check_digital_port_conflicts(struct intel_atomic_state *state)
encoder = to_intel_encoder(connector_state->best_encoder);
- WARN_ON(!connector_state->crtc);
+ drm_WARN_ON(dev, !connector_state->crtc);
switch (encoder->type) {
- unsigned int port_mask;
case INTEL_OUTPUT_DDI:
- if (WARN_ON(!HAS_DDI(to_i915(dev))))
+ if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
break;
/* else, fall through */
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_HDMI:
case INTEL_OUTPUT_EDP:
- port_mask = 1 << encoder->port;
-
/* the same port mustn't appear more than once */
- if (used_ports & port_mask)
+ if (used_ports & BIT(encoder->port))
ret = false;
- used_ports |= port_mask;
+ used_ports |= BIT(encoder->port);
break;
case INTEL_OUTPUT_DP_MST:
used_mst_ports |=
@@ -13365,7 +13396,8 @@ encoder_retry:
}
if (ret == RETRY) {
- if (WARN(!retry, "loop in pipe configuration computation\n"))
+ if (drm_WARN(&i915->drm, !retry,
+ "loop in pipe configuration computation\n"))
return -EINVAL;
drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n");
@@ -13925,9 +13957,10 @@ static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
* FDI already provided one idea for the dotclock.
* Yell if the encoder disagrees.
*/
- WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
- "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
- fdi_dotclock, dotclock);
+ drm_WARN(&dev_priv->drm,
+ !intel_fuzzy_clock_check(fdi_dotclock, dotclock),
+ "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
+ fdi_dotclock, dotclock);
}
}
@@ -14269,11 +14302,11 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv,
if (new_crtc_state->hw.active)
I915_STATE_WARN(!(pll->active_mask & crtc_mask),
"pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
- pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
+ pipe_name(crtc->pipe), pll->active_mask);
else
I915_STATE_WARN(pll->active_mask & crtc_mask,
"pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
- pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
+ pipe_name(crtc->pipe), pll->active_mask);
I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
"pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
@@ -14302,10 +14335,10 @@ verify_shared_dpll_state(struct intel_crtc *crtc,
I915_STATE_WARN(pll->active_mask & crtc_mask,
"pll active mismatch (didn't expect pipe %c in active mask)\n",
- pipe_name(drm_crtc_index(&crtc->base)));
+ pipe_name(crtc->pipe));
I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
"pll enabled crtcs mismatch (found %x in enabled mask)\n",
- pipe_name(drm_crtc_index(&crtc->base)));
+ pipe_name(crtc->pipe));
}
}
@@ -14329,8 +14362,10 @@ verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
{
int i;
- for (i = 0; i < dev_priv->num_shared_dpll; i++)
- verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
+ for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++)
+ verify_single_dpll_state(dev_priv,
+ &dev_priv->dpll.shared_dplls[i],
+ NULL, NULL);
}
static void
@@ -14713,8 +14748,8 @@ static int intel_atomic_check(struct drm_device *dev,
/* Catch I915_MODE_FLAG_INHERITED */
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (new_crtc_state->hw.mode.private_flags !=
- old_crtc_state->hw.mode.private_flags)
+ if (new_crtc_state->uapi.mode.private_flags !=
+ old_crtc_state->uapi.mode.private_flags)
new_crtc_state->uapi.mode_changed = true;
}
@@ -15054,7 +15089,8 @@ static struct intel_crtc *intel_get_slave_crtc(const struct intel_crtc_state *ne
struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
enum transcoder slave_transcoder;
- WARN_ON(!is_power_of_2(new_crtc_state->sync_mode_slaves_mask));
+ drm_WARN_ON(&dev_priv->drm,
+ !is_power_of_2(new_crtc_state->sync_mode_slaves_mask));
slave_transcoder = ffs(new_crtc_state->sync_mode_slaves_mask) - 1;
return intel_get_crtc_for_pipe(dev_priv,
@@ -15222,8 +15258,8 @@ static void intel_update_trans_port_sync_crtcs(struct intel_crtc *crtc,
struct intel_crtc_state *old_slave_crtc_state =
intel_atomic_get_old_crtc_state(state, slave_crtc);
- WARN_ON(!slave_crtc || !new_slave_crtc_state ||
- !old_slave_crtc_state);
+ drm_WARN_ON(&i915->drm, !slave_crtc || !new_slave_crtc_state ||
+ !old_slave_crtc_state);
drm_dbg_kms(&i915->drm,
"Updating Transcoder Port Sync Master CRTC = %d %s and Slave CRTC %d %s\n",
@@ -15287,7 +15323,6 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
struct intel_crtc *crtc;
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
- const u8 num_pipes = INTEL_NUM_PIPES(dev_priv);
u8 update_pipes = 0, modeset_pipes = 0;
int i;
@@ -15324,7 +15359,7 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
continue;
if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
- entries, num_pipes, pipe))
+ entries, I915_MAX_PIPES, pipe))
continue;
entries[pipe] = new_crtc_state->wm.skl.ddb;
@@ -15361,8 +15396,8 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
is_trans_port_sync_slave(new_crtc_state))
continue;
- WARN_ON(skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
- entries, num_pipes, pipe));
+ drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
+ entries, I915_MAX_PIPES, pipe));
entries[pipe] = new_crtc_state->wm.skl.ddb;
modeset_pipes &= ~BIT(pipe);
@@ -15396,8 +15431,8 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
if ((modeset_pipes & BIT(pipe)) == 0)
continue;
- WARN_ON(skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
- entries, num_pipes, pipe));
+ drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
+ entries, I915_MAX_PIPES, pipe));
entries[pipe] = new_crtc_state->wm.skl.ddb;
modeset_pipes &= ~BIT(pipe);
@@ -15405,7 +15440,7 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
intel_update_crtc(crtc, state, old_crtc_state, new_crtc_state);
}
- WARN_ON(modeset_pipes);
+ drm_WARN_ON(&dev_priv->drm, modeset_pipes);
}
@@ -16289,7 +16324,6 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
struct intel_plane *plane;
const struct drm_plane_funcs *plane_funcs;
unsigned int supported_rotations;
- unsigned int possible_crtcs;
const u32 *formats;
int num_formats;
int ret, zpos;
@@ -16370,18 +16404,16 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
plane->get_hw_state = i9xx_plane_get_hw_state;
plane->check_plane = i9xx_plane_check;
- possible_crtcs = BIT(pipe);
-
if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
- possible_crtcs, plane_funcs,
+ 0, plane_funcs,
formats, num_formats,
i9xx_format_modifiers,
DRM_PLANE_TYPE_PRIMARY,
"primary %c", pipe_name(pipe));
else
ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
- possible_crtcs, plane_funcs,
+ 0, plane_funcs,
formats, num_formats,
i9xx_format_modifiers,
DRM_PLANE_TYPE_PRIMARY,
@@ -16423,7 +16455,6 @@ static struct intel_plane *
intel_cursor_plane_create(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- unsigned int possible_crtcs;
struct intel_plane *cursor;
int ret, zpos;
@@ -16456,10 +16487,8 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
cursor->cursor.size = ~0;
- possible_crtcs = BIT(pipe);
-
ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
- possible_crtcs, &intel_cursor_plane_funcs,
+ 0, &intel_cursor_plane_funcs,
intel_cursor_formats,
ARRAY_SIZE(intel_cursor_formats),
cursor_format_modifiers,
@@ -16504,6 +16533,7 @@ static const struct drm_crtc_funcs bdw_crtc_funcs = {
.get_vblank_counter = g4x_get_vblank_counter,
.enable_vblank = bdw_enable_vblank,
.disable_vblank = bdw_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
};
static const struct drm_crtc_funcs ilk_crtc_funcs = {
@@ -16512,6 +16542,7 @@ static const struct drm_crtc_funcs ilk_crtc_funcs = {
.get_vblank_counter = g4x_get_vblank_counter,
.enable_vblank = ilk_enable_vblank,
.disable_vblank = ilk_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
};
static const struct drm_crtc_funcs g4x_crtc_funcs = {
@@ -16520,6 +16551,7 @@ static const struct drm_crtc_funcs g4x_crtc_funcs = {
.get_vblank_counter = g4x_get_vblank_counter,
.enable_vblank = i965_enable_vblank,
.disable_vblank = i965_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
};
static const struct drm_crtc_funcs i965_crtc_funcs = {
@@ -16528,6 +16560,7 @@ static const struct drm_crtc_funcs i965_crtc_funcs = {
.get_vblank_counter = i915_get_vblank_counter,
.enable_vblank = i965_enable_vblank,
.disable_vblank = i965_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
};
static const struct drm_crtc_funcs i915gm_crtc_funcs = {
@@ -16536,6 +16569,7 @@ static const struct drm_crtc_funcs i915gm_crtc_funcs = {
.get_vblank_counter = i915_get_vblank_counter,
.enable_vblank = i915gm_enable_vblank,
.disable_vblank = i915gm_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
};
static const struct drm_crtc_funcs i915_crtc_funcs = {
@@ -16544,6 +16578,7 @@ static const struct drm_crtc_funcs i915_crtc_funcs = {
.get_vblank_counter = i915_get_vblank_counter,
.enable_vblank = i8xx_enable_vblank,
.disable_vblank = i8xx_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
};
static const struct drm_crtc_funcs i8xx_crtc_funcs = {
@@ -16552,6 +16587,7 @@ static const struct drm_crtc_funcs i8xx_crtc_funcs = {
/* no hw vblank counter */
.enable_vblank = i8xx_enable_vblank,
.disable_vblank = i8xx_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
};
static struct intel_crtc *intel_crtc_alloc(void)
@@ -16581,6 +16617,18 @@ static void intel_crtc_free(struct intel_crtc *crtc)
kfree(crtc);
}
+static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
+{
+ struct intel_plane *plane;
+
+ for_each_intel_plane(&dev_priv->drm, plane) {
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
+ plane->pipe);
+
+ plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
+ }
+}
+
static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
{
struct intel_plane *primary, *cursor;
@@ -16659,7 +16707,9 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
intel_color_init(crtc);
- WARN_ON(drm_crtc_index(&crtc->base) != crtc->pipe);
+ intel_crtc_crc_init(crtc);
+
+ drm_WARN_ON(&dev_priv->drm, drm_crtc_index(&crtc->base) != crtc->pipe);
return 0;
@@ -17526,7 +17576,7 @@ static void sanitize_watermarks(struct drm_i915_private *dev_priv)
return;
state = drm_atomic_state_alloc(&dev_priv->drm);
- if (WARN_ON(!state))
+ if (drm_WARN_ON(&dev_priv->drm, !state))
return;
intel_state = to_intel_atomic_state(state);
@@ -17578,7 +17628,8 @@ fail:
* If this actually happens, we'll have to just leave the
* BIOS-programmed watermarks untouched and hope for the best.
*/
- WARN(ret, "Could not determine valid watermarks for inherited state\n");
+ drm_WARN(&dev_priv->drm, ret,
+ "Could not determine valid watermarks for inherited state\n");
drm_atomic_state_put(state);
@@ -17746,11 +17797,9 @@ static void plane_config_fini(struct intel_initial_plane_config *plane_config)
i915_vma_put(plane_config->vma);
}
-int intel_modeset_init(struct drm_i915_private *i915)
+/* part #1: call before irq install */
+int intel_modeset_init_noirq(struct drm_i915_private *i915)
{
- struct drm_device *dev = &i915->drm;
- enum pipe pipe;
- struct intel_crtc *crtc;
int ret;
i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
@@ -17775,6 +17824,17 @@ int intel_modeset_init(struct drm_i915_private *i915)
intel_fbc_init(i915);
+ return 0;
+}
+
+/* part #2: call after irq install */
+int intel_modeset_init(struct drm_i915_private *i915)
+{
+ struct drm_device *dev = &i915->drm;
+ enum pipe pipe;
+ struct intel_crtc *crtc;
+ int ret;
+
intel_init_pm(i915);
intel_panel_sanitize_ssc(i915);
@@ -17795,6 +17855,7 @@ int intel_modeset_init(struct drm_i915_private *i915)
}
}
+ intel_plane_possible_crtcs_init(i915);
intel_shared_dpll_init(dev);
intel_update_fdi_pll_freq(i915);
@@ -17873,7 +17934,8 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
u32 dpll, fp;
int i;
- WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
+ drm_WARN_ON(&dev_priv->drm,
+ i9xx_calc_dpll_params(48000, &clock) != 25154);
drm_dbg_kms(&dev_priv->drm,
"enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
@@ -17938,11 +18000,19 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
pipe_name(pipe));
- WARN_ON(intel_de_read(dev_priv, DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
- WARN_ON(intel_de_read(dev_priv, DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
- WARN_ON(intel_de_read(dev_priv, DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
- WARN_ON(intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE);
- WARN_ON(intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, DSPCNTR(PLANE_A)) &
+ DISPLAY_PLANE_ENABLE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, DSPCNTR(PLANE_B)) &
+ DISPLAY_PLANE_ENABLE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, DSPCNTR(PLANE_C)) &
+ DISPLAY_PLANE_ENABLE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE);
intel_de_write(dev_priv, PIPECONF(pipe), 0);
intel_de_posting_read(dev_priv, PIPECONF(pipe));
@@ -18263,7 +18333,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
u8 active_pipes = 0;
- int i;
for_each_intel_crtc(dev, crtc) {
struct intel_crtc_state *crtc_state =
@@ -18292,33 +18361,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
readout_plane_state(dev_priv);
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
- struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
-
- pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
- &pll->state.hw_state);
-
- if (IS_ELKHARTLAKE(dev_priv) && pll->on &&
- pll->info->id == DPLL_ID_EHL_DPLL4) {
- pll->wakeref = intel_display_power_get(dev_priv,
- POWER_DOMAIN_DPLL_DC_OFF);
- }
-
- pll->state.crtc_mask = 0;
- for_each_intel_crtc(dev, crtc) {
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
-
- if (crtc_state->hw.active &&
- crtc_state->shared_dpll == pll)
- pll->state.crtc_mask |= 1 << crtc->pipe;
- }
- pll->active_mask = pll->state.crtc_mask;
-
- drm_dbg_kms(&dev_priv->drm,
- "%s hw state readout: crtc_mask 0x%08x, on %i\n",
- pll->info->name, pll->state.crtc_mask, pll->on);
- }
+ intel_dpll_readout_hw_state(dev_priv);
for_each_intel_encoder(dev, encoder) {
pipe = 0;
@@ -18446,7 +18489,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
if (crtc_state->hw.active) {
min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
- if (WARN_ON(min_cdclk < 0))
+ if (drm_WARN_ON(dev, min_cdclk < 0))
min_cdclk = 0;
}
@@ -18575,7 +18618,6 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
struct intel_encoder *encoder;
struct intel_crtc *crtc;
intel_wakeref_t wakeref;
- int i;
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
@@ -18628,19 +18670,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
intel_modeset_update_connector_atomic_state(dev);
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
- struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
-
- if (!pll->on || pll->active_mask)
- continue;
-
- drm_dbg_kms(&dev_priv->drm,
- "%s enabled but not in use, disabling\n",
- pll->info->name);
-
- pll->info->funcs->disable(dev_priv, pll);
- pll->on = false;
- }
+ intel_dpll_sanitize_state(dev_priv);
if (IS_G4X(dev_priv)) {
g4x_wm_get_hw_state(dev_priv);
@@ -18660,7 +18690,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
u64 put_domains;
put_domains = modeset_get_crtc_power_domains(crtc_state);
- if (WARN_ON(put_domains))
+ if (drm_WARN_ON(dev, put_domains))
modeset_put_power_domains(dev_priv, put_domains);
}
@@ -18727,7 +18757,7 @@ void intel_modeset_driver_remove(struct drm_i915_private *i915)
flush_workqueue(i915->modeset_wq);
flush_work(&i915->atomic_helper.free_work);
- WARN_ON(!llist_empty(&i915->atomic_helper.free_list));
+ drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list));
}
/* part #2: call after irq uninstall */
@@ -18772,6 +18802,15 @@ void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+static bool
+has_transcoder(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder)
+{
+ if (cpu_transcoder == TRANSCODER_EDP)
+ return HAS_TRANSCODER_EDP(dev_priv);
+ else
+ return INTEL_INFO(dev_priv)->pipe_mask & BIT(cpu_transcoder);
+}
+
struct intel_display_error_state {
u32 power_well_driver;
@@ -18880,7 +18919,7 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
enum transcoder cpu_transcoder = transcoders[i];
- if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder])
+ if (!has_transcoder(dev_priv, cpu_transcoder))
continue;
error->transcoder[i].available = true;
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index f92efbbec838..adb1225a3480 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -26,7 +26,6 @@
#define _INTEL_DISPLAY_H_
#include <drm/drm_util.h>
-#include <drm/i915_drm.h>
enum link_m_n_set;
struct dpll;
@@ -40,6 +39,7 @@ struct drm_framebuffer;
struct drm_i915_error_state_buf;
struct drm_i915_gem_object;
struct drm_i915_private;
+struct drm_mode_fb_cmd2;
struct drm_modeset_acquire_ctx;
struct drm_plane;
struct drm_plane_state;
@@ -47,6 +47,7 @@ struct i915_ggtt_view;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_crtc_state;
struct intel_digital_port;
struct intel_dp;
struct intel_encoder;
@@ -55,7 +56,6 @@ struct intel_plane;
struct intel_plane_state;
struct intel_remapped_info;
struct intel_rotation_info;
-struct intel_crtc_state;
enum i915_gpio {
GPIOA,
@@ -313,10 +313,11 @@ enum phy_fia {
};
#define for_each_pipe(__dev_priv, __p) \
- for ((__p) = 0; (__p) < INTEL_NUM_PIPES(__dev_priv); (__p)++)
+ for ((__p) = 0; (__p) < I915_MAX_PIPES; (__p)++) \
+ for_each_if(INTEL_INFO(__dev_priv)->pipe_mask & BIT(__p))
#define for_each_pipe_masked(__dev_priv, __p, __mask) \
- for ((__p) = 0; (__p) < INTEL_NUM_PIPES(__dev_priv); (__p)++) \
+ for_each_pipe(__dev_priv, __p) \
for_each_if((__mask) & BIT(__p))
#define for_each_cpu_transcoder_masked(__dev_priv, __t, __mask) \
@@ -614,6 +615,7 @@ intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
/* modesetting */
void intel_modeset_init_hw(struct drm_i915_private *i915);
+int intel_modeset_init_noirq(struct drm_i915_private *i915);
int intel_modeset_init(struct drm_i915_private *i915);
void intel_modeset_driver_remove(struct drm_i915_private *i915);
void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index 46954cc7b6c0..1e6eb7f2f72d 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -920,8 +920,13 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
int i;
drm_modeset_lock_all(dev);
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
- struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
+
+ seq_printf(m, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n",
+ dev_priv->dpll.ref_clks.nssc,
+ dev_priv->dpll.ref_clks.ssc);
+
+ for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
+ struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
pll->info->id);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 722399fc2ace..246e406bb385 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -183,8 +183,9 @@ static void intel_power_well_get(struct drm_i915_private *dev_priv,
static void intel_power_well_put(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- WARN(!power_well->count, "Use count on power well %s is already zero",
- power_well->desc->name);
+ drm_WARN(&dev_priv->drm, !power_well->count,
+ "Use count on power well %s is already zero",
+ power_well->desc->name);
if (!--power_well->count)
intel_power_well_disable(dev_priv, power_well);
@@ -294,7 +295,7 @@ static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
power_well->desc->name);
/* An AUX timeout is expected if the TBT DP tunnel is down. */
- WARN_ON(!power_well->desc->hsw.is_tc_tbt);
+ drm_WARN_ON(&dev_priv->drm, !power_well->desc->hsw.is_tc_tbt);
}
}
@@ -347,8 +348,9 @@ static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
enum skl_power_gate pg)
{
/* Timeout 5us for PG#0, for other PGs 1us */
- WARN_ON(intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,
- SKL_FUSE_PG_DIST_STATUS(pg), 1));
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,
+ SKL_FUSE_PG_DIST_STATUS(pg), 1));
}
static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
@@ -423,7 +425,7 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
u32 val;
- WARN_ON(!IS_ICELAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
val = intel_de_read(dev_priv, regs->driver);
intel_de_write(dev_priv, regs->driver,
@@ -455,7 +457,7 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
u32 val;
- WARN_ON(!IS_ICELAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
@@ -493,7 +495,7 @@ static int power_well_async_ref_count(struct drm_i915_private *dev_priv,
int refs = hweight64(power_well->desc->domains &
async_put_domains_mask(&dev_priv->power_domains));
- WARN_ON(refs > power_well->count);
+ drm_WARN_ON(&dev_priv->drm, refs > power_well->count);
return refs;
}
@@ -523,7 +525,7 @@ static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
continue;
dig_port = enc_to_dig_port(encoder);
- if (WARN_ON(!dig_port))
+ if (drm_WARN_ON(&dev_priv->drm, !dig_port))
continue;
if (dig_port->aux_ch != aux_ch) {
@@ -534,10 +536,10 @@ static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
break;
}
- if (WARN_ON(!dig_port))
+ if (drm_WARN_ON(&dev_priv->drm, !dig_port))
return;
- WARN_ON(!intel_tc_port_ref_held(dig_port));
+ drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port));
}
#else
@@ -623,15 +625,19 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
{
- WARN_ONCE((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9),
- "DC9 already programmed to be enabled.\n");
- WARN_ONCE(intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
- "DC5 still not disabled to enable DC9.\n");
- WARN_ONCE(intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) &
- HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
- "Power well 2 on.\n");
- WARN_ONCE(intel_irqs_enabled(dev_priv),
- "Interrupts not disabled yet.\n");
+ drm_WARN_ONCE(&dev_priv->drm,
+ (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9),
+ "DC9 already programmed to be enabled.\n");
+ drm_WARN_ONCE(&dev_priv->drm,
+ intel_de_read(dev_priv, DC_STATE_EN) &
+ DC_STATE_EN_UPTO_DC5,
+ "DC5 still not disabled to enable DC9.\n");
+ drm_WARN_ONCE(&dev_priv->drm,
+ intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) &
+ HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
+ "Power well 2 on.\n");
+ drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
+ "Interrupts not disabled yet.\n");
/*
* TODO: check for the following to verify the conditions to enter DC9
@@ -644,10 +650,12 @@ static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
{
- WARN_ONCE(intel_irqs_enabled(dev_priv),
- "Interrupts not disabled yet.\n");
- WARN_ONCE(intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
- "DC5 still not disabled.\n");
+ drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
+ "Interrupts not disabled yet.\n");
+ drm_WARN_ONCE(&dev_priv->drm,
+ intel_de_read(dev_priv, DC_STATE_EN) &
+ DC_STATE_EN_UPTO_DC5,
+ "DC5 still not disabled.\n");
/*
* TODO: check for the following to verify DC9 state was indeed
@@ -756,7 +764,8 @@ static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
u32 val;
u32 mask;
- if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm,
+ state & ~dev_priv->csr.allowed_dc_mask))
state &= dev_priv->csr.allowed_dc_mask;
val = intel_de_read(dev_priv, DC_STATE_EN);
@@ -851,11 +860,13 @@ static void bxt_disable_dc9(struct drm_i915_private *dev_priv)
static void assert_csr_loaded(struct drm_i915_private *dev_priv)
{
- WARN_ONCE(!intel_de_read(dev_priv, CSR_PROGRAM(0)),
- "CSR program storage start is NULL\n");
- WARN_ONCE(!intel_de_read(dev_priv, CSR_SSP_BASE),
- "CSR SSP Base Not fine\n");
- WARN_ONCE(!intel_de_read(dev_priv, CSR_HTP_SKL), "CSR HTP Not fine\n");
+ drm_WARN_ONCE(&dev_priv->drm,
+ !intel_de_read(dev_priv, CSR_PROGRAM(0)),
+ "CSR program storage start is NULL\n");
+ drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, CSR_SSP_BASE),
+ "CSR SSP Base Not fine\n");
+ drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, CSR_HTP_SKL),
+ "CSR HTP Not fine\n");
}
static struct i915_power_well *
@@ -875,7 +886,9 @@ lookup_power_well(struct drm_i915_private *dev_priv,
* the first power well and hope the WARN gets reported so we can fix
* our driver.
*/
- WARN(1, "Power well %d not defined for this platform\n", power_well_id);
+ drm_WARN(&dev_priv->drm, 1,
+ "Power well %d not defined for this platform\n",
+ power_well_id);
return &dev_priv->power_domains.power_wells[0];
}
@@ -898,7 +911,7 @@ void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
mutex_lock(&power_domains->lock);
power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
- if (WARN_ON(!power_well))
+ if (drm_WARN_ON(&dev_priv->drm, !power_well))
goto unlock;
state = sanitize_target_dc_state(dev_priv, state);
@@ -926,13 +939,22 @@ unlock:
static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
{
- bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
- SKL_DISP_PW_2);
+ enum i915_power_well_id high_pg;
- WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
+ /* Power wells at this level and above must be disabled for DC5 entry */
+ if (INTEL_GEN(dev_priv) >= 12)
+ high_pg = TGL_DISP_PW_3;
+ else
+ high_pg = SKL_DISP_PW_2;
+
+ drm_WARN_ONCE(&dev_priv->drm,
+ intel_display_power_well_is_enabled(dev_priv, high_pg),
+ "Power wells above platform's DC5 limit still enabled.\n");
- WARN_ONCE((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
- "DC5 already programmed to be enabled.\n");
+ drm_WARN_ONCE(&dev_priv->drm,
+ (intel_de_read(dev_priv, DC_STATE_EN) &
+ DC_STATE_EN_UPTO_DC5),
+ "DC5 already programmed to be enabled.\n");
assert_rpm_wakelock_held(&dev_priv->runtime_pm);
assert_csr_loaded(dev_priv);
@@ -954,10 +976,13 @@ static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
{
- WARN_ONCE(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
- "Backlight is not disabled.\n");
- WARN_ONCE((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
- "DC6 already programmed to be enabled.\n");
+ drm_WARN_ONCE(&dev_priv->drm,
+ intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
+ "Backlight is not disabled.\n");
+ drm_WARN_ONCE(&dev_priv->drm,
+ (intel_de_read(dev_priv, DC_STATE_EN) &
+ DC_STATE_EN_UPTO_DC6),
+ "DC6 already programmed to be enabled.\n");
assert_csr_loaded(dev_priv);
}
@@ -1045,10 +1070,11 @@ static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv);
u8 enabled_dbuf_slices = dev_priv->enabled_dbuf_slices_mask;
- WARN(hw_enabled_dbuf_slices != enabled_dbuf_slices,
- "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n",
- hw_enabled_dbuf_slices,
- enabled_dbuf_slices);
+ drm_WARN(&dev_priv->drm,
+ hw_enabled_dbuf_slices != enabled_dbuf_slices,
+ "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n",
+ hw_enabled_dbuf_slices,
+ enabled_dbuf_slices);
}
static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
@@ -1064,7 +1090,9 @@ static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
dev_priv->display.get_cdclk(dev_priv, &cdclk_config);
/* Can't read out voltage_level so can't use intel_cdclk_changed() */
- WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_config));
+ drm_WARN_ON(&dev_priv->drm,
+ intel_cdclk_needs_modeset(&dev_priv->cdclk.hw,
+ &cdclk_config));
gen9_assert_dbuf_enabled(dev_priv);
@@ -1221,8 +1249,8 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
* We only ever set the power-on and power-gate states, anything
* else is unexpected.
*/
- WARN_ON(state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
- state != PUNIT_PWRGT_PWR_GATE(pw_idx));
+ drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
+ state != PUNIT_PWRGT_PWR_GATE(pw_idx));
if (state == ctrl)
enabled = true;
@@ -1231,7 +1259,7 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
* is poking at the power controls too.
*/
ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
- WARN_ON(ctrl != state);
+ drm_WARN_ON(&dev_priv->drm, ctrl != state);
vlv_punit_put(dev_priv);
@@ -1260,7 +1288,7 @@ static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
intel_de_write(dev_priv, CBR1_VLV, 0);
- WARN_ON(RUNTIME_INFO(dev_priv)->rawclk_freq == 0);
+ drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0);
intel_de_write(dev_priv, RAWCLK_FREQ_VLV,
DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq,
1000));
@@ -1502,8 +1530,9 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
enum pipe pipe;
u32 tmp;
- WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
- power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
+ drm_WARN_ON_ONCE(&dev_priv->drm,
+ power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
+ power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
pipe = PIPE_A;
@@ -1564,8 +1593,9 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
{
enum dpio_phy phy;
- WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
- power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
+ drm_WARN_ON_ONCE(&dev_priv->drm,
+ power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
+ power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
phy = DPIO_PHY0;
@@ -1647,11 +1677,13 @@ static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpi
actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
- WARN(actual != expected,
- "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
- !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
- !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
- reg, val);
+ drm_WARN(&dev_priv->drm, actual != expected,
+ "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
+ !!(actual & DPIO_ALLDL_POWERDOWN),
+ !!(actual & DPIO_ANYDL_POWERDOWN),
+ !!(expected & DPIO_ALLDL_POWERDOWN),
+ !!(expected & DPIO_ANYDL_POWERDOWN),
+ reg, val);
}
bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
@@ -1733,7 +1765,8 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
* We only ever set the power-on and power-gate states, anything
* else is unexpected.
*/
- WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
+ drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) &&
+ state != DP_SSS_PWR_GATE(pipe));
enabled = state == DP_SSS_PWR_ON(pipe);
/*
@@ -1741,7 +1774,7 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
* is poking at the power controls too.
*/
ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
- WARN_ON(ctrl << 16 != state);
+ drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state);
vlv_punit_put(dev_priv);
@@ -2019,12 +2052,13 @@ __intel_display_power_put_domain(struct drm_i915_private *dev_priv,
power_domains = &dev_priv->power_domains;
- WARN(!power_domains->domain_use_count[domain],
- "Use count on domain %s is already zero\n",
- name);
- WARN(async_put_domains_mask(power_domains) & BIT_ULL(domain),
- "Async disabling of domain %s is pending\n",
- name);
+ drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain],
+ "Use count on domain %s is already zero\n",
+ name);
+ drm_WARN(&dev_priv->drm,
+ async_put_domains_mask(power_domains) & BIT_ULL(domain),
+ "Async disabling of domain %s is pending\n",
+ name);
power_domains->domain_use_count[domain]--;
@@ -2169,7 +2203,7 @@ void __intel_display_power_put_async(struct drm_i915_private *i915,
goto out_verify;
}
- WARN_ON(power_domains->domain_use_count[domain] != 1);
+ drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1);
/* Let a pending work requeue itself or queue a new one. */
if (power_domains->async_put_wakeref) {
@@ -2244,7 +2278,7 @@ intel_display_power_flush_work_sync(struct drm_i915_private *i915)
verify_async_put_domains_state(power_domains);
- WARN_ON(power_domains->async_put_wakeref);
+ drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
@@ -2712,7 +2746,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_INIT))
#define TGL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
- TGL_PW_2_POWER_DOMAINS | \
+ TGL_PW_3_POWER_DOMAINS | \
BIT_ULL(POWER_DOMAIN_MODESET) | \
BIT_ULL(POWER_DOMAIN_AUX_A) | \
BIT_ULL(POWER_DOMAIN_AUX_B) | \
@@ -3908,7 +3942,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
.name = "power well 3",
.domains = TGL_PW_3_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
+ .id = TGL_DISP_PW_3,
{
.hsw.regs = &hsw_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_PW_3,
@@ -4443,8 +4477,8 @@ void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
int max_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
struct i915_power_domains *power_domains = &dev_priv->power_domains;
- WARN(hweight8(req_slices) > max_slices,
- "Invalid number of dbuf slices requested\n");
+ drm_WARN(&dev_priv->drm, hweight8(req_slices) > max_slices,
+ "Invalid number of dbuf slices requested\n");
DRM_DEBUG_KMS("Updating dbuf slices to 0x%x\n", req_slices);
@@ -4486,14 +4520,22 @@ static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
static void icl_mbus_init(struct drm_i915_private *dev_priv)
{
- u32 val;
+ u32 mask, val;
+ mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
+ MBUS_ABOX_BT_CREDIT_POOL2_MASK |
+ MBUS_ABOX_B_CREDIT_MASK |
+ MBUS_ABOX_BW_CREDIT_MASK;
val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
- MBUS_ABOX_BT_CREDIT_POOL2(16) |
- MBUS_ABOX_B_CREDIT(1) |
- MBUS_ABOX_BW_CREDIT(1);
+ MBUS_ABOX_BT_CREDIT_POOL2(16) |
+ MBUS_ABOX_B_CREDIT(1) |
+ MBUS_ABOX_BW_CREDIT(1);
- intel_de_write(dev_priv, MBUS_ABOX_CTL, val);
+ intel_de_rmw(dev_priv, MBUS_ABOX_CTL, mask, val);
+ if (INTEL_GEN(dev_priv) >= 12) {
+ intel_de_rmw(dev_priv, MBUS_ABOX1_CTL, mask, val);
+ intel_de_rmw(dev_priv, MBUS_ABOX2_CTL, mask, val);
+ }
}
static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
@@ -4995,6 +5037,14 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
table[i].page_mask);
intel_de_write(dev_priv, BW_BUDDY2_PAGE_MASK,
table[i].page_mask);
+
+ /* Wa_22010178259:tgl */
+ intel_de_rmw(dev_priv, BW_BUDDY1_CTL,
+ BW_BUDDY_TLB_REQ_TIMER_MASK,
+ REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8));
+ intel_de_rmw(dev_priv, BW_BUDDY2_CTL,
+ BW_BUDDY_TLB_REQ_TIMER_MASK,
+ REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8));
}
}
@@ -5195,8 +5245,9 @@ static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0
static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
{
- WARN(!vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
- "VED not power gated\n");
+ drm_WARN(&dev_priv->drm,
+ !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
+ "VED not power gated\n");
}
static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
@@ -5207,9 +5258,9 @@ static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
{}
};
- WARN(!pci_dev_present(isp_ids) &&
- !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
- "ISP not power gated\n");
+ drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) &&
+ !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
+ "ISP not power gated\n");
}
static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
@@ -5339,7 +5390,7 @@ void intel_power_domains_disable(struct drm_i915_private *i915)
{
struct i915_power_domains *power_domains = &i915->power_domains;
- WARN_ON(power_domains->wakeref);
+ drm_WARN_ON(&i915->drm, power_domains->wakeref);
power_domains->wakeref =
intel_display_power_get(i915, POWER_DOMAIN_INIT);
@@ -5421,7 +5472,7 @@ void intel_power_domains_resume(struct drm_i915_private *i915)
intel_power_domains_init_hw(i915, true);
power_domains->display_core_suspended = false;
} else {
- WARN_ON(power_domains->wakeref);
+ drm_WARN_ON(&i915->drm, power_domains->wakeref);
power_domains->wakeref =
intel_display_power_get(i915, POWER_DOMAIN_INIT);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index 601e000ffd0d..da64a5edae7a 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -100,6 +100,7 @@ enum i915_power_well_id {
SKL_DISP_PW_MISC_IO,
SKL_DISP_PW_1,
SKL_DISP_PW_2,
+ TGL_DISP_PW_3,
SKL_DISP_DC_OFF,
};
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index e6147364c413..5e00e611f077 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -39,7 +39,6 @@
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
#include <drm/drm_vblank.h>
-#include <drm/i915_drm.h>
#include <drm/i915_mei_hdcp_interface.h>
#include <media/cec-notifier.h>
@@ -642,6 +641,14 @@ struct intel_crtc_scaler_state {
/* Flag to use the scanline counter instead of the pixel counter */
#define I915_MODE_FLAG_USE_SCANLINE_COUNTER (1<<2)
+struct intel_wm_level {
+ bool enable;
+ u32 pri_val;
+ u32 spr_val;
+ u32 cur_val;
+ u32 fbc_val;
+};
+
struct intel_pipe_wm {
struct intel_wm_level wm[5];
bool fbc_wm_enabled;
@@ -650,6 +657,14 @@ struct intel_pipe_wm {
bool sprites_scaled;
};
+struct skl_wm_level {
+ u16 min_ddb_alloc;
+ u16 plane_res_b;
+ u8 plane_res_l;
+ bool plane_en;
+ bool ignore_lines;
+};
+
struct skl_plane_wm {
struct skl_wm_level wm[8];
struct skl_wm_level uv_wm[8];
@@ -1046,6 +1061,32 @@ struct intel_crtc_state {
enum transcoder mst_master_transcoder;
};
+enum intel_pipe_crc_source {
+ INTEL_PIPE_CRC_SOURCE_NONE,
+ INTEL_PIPE_CRC_SOURCE_PLANE1,
+ INTEL_PIPE_CRC_SOURCE_PLANE2,
+ INTEL_PIPE_CRC_SOURCE_PLANE3,
+ INTEL_PIPE_CRC_SOURCE_PLANE4,
+ INTEL_PIPE_CRC_SOURCE_PLANE5,
+ INTEL_PIPE_CRC_SOURCE_PLANE6,
+ INTEL_PIPE_CRC_SOURCE_PLANE7,
+ INTEL_PIPE_CRC_SOURCE_PIPE,
+ /* TV/DP on pre-gen5/vlv can't use the pipe source. */
+ INTEL_PIPE_CRC_SOURCE_TV,
+ INTEL_PIPE_CRC_SOURCE_DP_B,
+ INTEL_PIPE_CRC_SOURCE_DP_C,
+ INTEL_PIPE_CRC_SOURCE_DP_D,
+ INTEL_PIPE_CRC_SOURCE_AUTO,
+ INTEL_PIPE_CRC_SOURCE_MAX,
+};
+
+#define INTEL_PIPE_CRC_ENTRIES_NR 128
+struct intel_pipe_crc {
+ spinlock_t lock;
+ int skipped;
+ enum intel_pipe_crc_source source;
+};
+
struct intel_crtc {
struct drm_crtc base;
enum pipe pipe;
@@ -1089,6 +1130,10 @@ struct intel_crtc {
/* per pipe DSB related info */
struct intel_dsb dsb;
+
+#ifdef CONFIG_DEBUG_FS
+ struct intel_pipe_crc pipe_crc;
+#endif
};
struct intel_plane {
@@ -1168,8 +1213,6 @@ struct intel_hdmi {
};
struct intel_dp_mst_encoder;
-#define DP_MAX_DOWNSTREAM_PORTS 0x10
-
/*
* enum link_m_n_set:
* When platform provides two set of M_N registers for dp, we can
@@ -1237,6 +1280,7 @@ struct intel_dp {
int max_link_rate;
/* sink or branch descriptor */
struct drm_dp_desc desc;
+ u32 edid_quirks;
struct drm_dp_aux aux;
u32 aux_busy_last_status;
u8 train_set[4];
@@ -1409,8 +1453,17 @@ vlv_pipe_to_channel(enum pipe pipe)
}
static inline struct intel_crtc *
+intel_get_first_crtc(struct drm_i915_private *dev_priv)
+{
+ return to_intel_crtc(drm_crtc_from_index(&dev_priv->drm, 0));
+}
+
+static inline struct intel_crtc *
intel_get_crtc_for_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
{
+ /* pipe_to_crtc_mapping may have hole on any of 3 display pipe system */
+ drm_WARN_ON(&dev_priv->drm,
+ !(INTEL_INFO(dev_priv)->pipe_mask & BIT(pipe)));
return dev_priv->pipe_to_crtc_mapping[pipe];
}
@@ -1600,11 +1653,15 @@ intel_crtc_has_dp_encoder(const struct intel_crtc_state *crtc_state)
(1 << INTEL_OUTPUT_DP_MST) |
(1 << INTEL_OUTPUT_EDP));
}
+
static inline void
intel_wait_for_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- drm_wait_one_vblank(&dev_priv->drm, pipe);
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+
+ drm_crtc_wait_one_vblank(&crtc->base);
}
+
static inline void
intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, enum pipe pipe)
{
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 7d69b4f43fe2..0a417cd2af2b 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -40,7 +40,6 @@
#include <drm/drm_edid.h>
#include <drm/drm_hdcp.h>
#include <drm/drm_probe_helper.h>
-#include <drm/i915_drm.h>
#include "i915_debugfs.h"
#include "i915_drv.h"
@@ -325,7 +324,8 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
int size, max_rate = 0, vbt_max_rate;
/* This should only be done once */
- WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_dp->source_rates || intel_dp->num_source_rates);
if (INTEL_GEN(dev_priv) >= 10) {
source_rates = cnl_rates;
@@ -757,10 +757,11 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
enum dpio_channel ch = vlv_pipe_to_channel(pipe);
u32 DP;
- if (WARN(intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN,
- "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
- pipe_name(pipe), intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name))
+ if (drm_WARN(&dev_priv->drm,
+ intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN,
+ "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
+ pipe_name(pipe), intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name))
return;
drm_dbg_kms(&dev_priv->drm,
@@ -836,13 +837,16 @@ static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
if (encoder->type == INTEL_OUTPUT_EDP) {
- WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
- intel_dp->active_pipe != intel_dp->pps_pipe);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_dp->active_pipe != INVALID_PIPE &&
+ intel_dp->active_pipe !=
+ intel_dp->pps_pipe);
if (intel_dp->pps_pipe != INVALID_PIPE)
pipes &= ~(1 << intel_dp->pps_pipe);
} else {
- WARN_ON(intel_dp->pps_pipe != INVALID_PIPE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_dp->pps_pipe != INVALID_PIPE);
if (intel_dp->active_pipe != INVALID_PIPE)
pipes &= ~(1 << intel_dp->active_pipe);
@@ -865,10 +869,10 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
/* We should never land here with regular DP ports */
- WARN_ON(!intel_dp_is_edp(intel_dp));
+ drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
- WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
- intel_dp->active_pipe != intel_dp->pps_pipe);
+ drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE &&
+ intel_dp->active_pipe != intel_dp->pps_pipe);
if (intel_dp->pps_pipe != INVALID_PIPE)
return intel_dp->pps_pipe;
@@ -879,7 +883,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
* Didn't find one. This should not happen since there
* are two power sequencers and up to two eDP ports.
*/
- if (WARN_ON(pipe == INVALID_PIPE))
+ if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE))
pipe = PIPE_A;
vlv_steal_power_sequencer(dev_priv, pipe);
@@ -913,7 +917,7 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
/* We should never land here with regular DP ports */
- WARN_ON(!intel_dp_is_edp(intel_dp));
+ drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
if (!intel_dp->pps_reset)
return backlight_controller;
@@ -1018,8 +1022,10 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
{
struct intel_encoder *encoder;
- if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
- !IS_GEN9_LP(dev_priv)))
+ if (drm_WARN_ON(&dev_priv->drm,
+ !(IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv) ||
+ IS_GEN9_LP(dev_priv))))
return;
/*
@@ -1035,7 +1041,8 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
for_each_intel_dp(&dev_priv->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_dp->active_pipe != INVALID_PIPE);
if (encoder->type != INTEL_OUTPUT_EDP)
continue;
@@ -1170,7 +1177,8 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
return;
if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
- WARN(1, "eDP powered off while attempting aux channel communication.\n");
+ drm_WARN(&dev_priv->drm, 1,
+ "eDP powered off while attempting aux channel communication.\n");
drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n",
intel_de_read(dev_priv, _pp_stat_reg(intel_dp)),
intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)));
@@ -1384,8 +1392,9 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
const u32 status = intel_uncore_read(uncore, ch_ctl);
if (status != intel_dp->aux_busy_last_status) {
- WARN(1, "%s: not started (status 0x%08x)\n",
- intel_dp->aux.name, status);
+ drm_WARN(&i915->drm, 1,
+ "%s: not started (status 0x%08x)\n",
+ intel_dp->aux.name, status);
intel_dp->aux_busy_last_status = status;
}
@@ -1394,7 +1403,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
}
/* Only 5 data registers! */
- if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
+ if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) {
ret = -E2BIG;
goto out;
}
@@ -2389,7 +2398,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_connector *intel_connector = intel_dp->attached_connector;
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
- bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
+ bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0,
DP_DPCD_QUIRK_CONSTANT_N);
int ret = 0, output_bpp;
@@ -2679,8 +2688,8 @@ static u32 ilk_get_pp_control(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp));
- if (WARN_ON(!HAS_DDI(dev_priv) &&
- (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
+ if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) &&
+ (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
control &= ~PANEL_UNLOCK_MASK;
control |= PANEL_UNLOCK_REGS;
}
@@ -2779,7 +2788,7 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
- WARN_ON(intel_dp->want_panel_vdd);
+ drm_WARN_ON(&dev_priv->drm, intel_dp->want_panel_vdd);
if (!edp_have_panel_vdd(intel_dp))
return;
@@ -2876,10 +2885,10 @@ static void edp_panel_on(struct intel_dp *intel_dp)
dp_to_dig_port(intel_dp)->base.base.base.id,
dp_to_dig_port(intel_dp)->base.base.name);
- if (WARN(edp_have_panel_power(intel_dp),
- "[ENCODER:%d:%s] panel power already on\n",
- dp_to_dig_port(intel_dp)->base.base.base.id,
- dp_to_dig_port(intel_dp)->base.base.name))
+ if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp),
+ "[ENCODER:%d:%s] panel power already on\n",
+ dp_to_dig_port(intel_dp)->base.base.base.id,
+ dp_to_dig_port(intel_dp)->base.base.name))
return;
wait_panel_power_cycle(intel_dp);
@@ -2937,8 +2946,9 @@ static void edp_panel_off(struct intel_dp *intel_dp)
drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n",
dig_port->base.base.base.id, dig_port->base.base.name);
- WARN(!intel_dp->want_panel_vdd, "Need [ENCODER:%d:%s] VDD to turn off panel\n",
- dig_port->base.base.base.id, dig_port->base.base.name);
+ drm_WARN(&dev_priv->drm, !intel_dp->want_panel_vdd,
+ "Need [ENCODER:%d:%s] VDD to turn off panel\n",
+ dig_port->base.base.base.id, dig_port->base.base.name);
pp = ilk_get_pp_control(intel_dp);
/* We need to switch off panel power _and_ force vdd, for otherwise some
@@ -3170,7 +3180,7 @@ static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
* FIXME should really check all downstream ports...
*/
return intel_dp->dpcd[DP_DPCD_REV] == 0x11 &&
- intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT &&
+ drm_dp_is_branch(intel_dp->dpcd) &&
intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
}
@@ -3581,7 +3591,7 @@ static void intel_enable_dp(struct intel_encoder *encoder,
enum pipe pipe = crtc->pipe;
intel_wakeref_t wakeref;
- if (WARN_ON(dp_reg & DP_PORT_EN))
+ if (drm_WARN_ON(&dev_priv->drm, dp_reg & DP_PORT_EN))
return;
with_pps_lock(intel_dp, wakeref) {
@@ -3652,9 +3662,9 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
enum pipe pipe = intel_dp->pps_pipe;
i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
- WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
+ drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE);
- if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+ if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
return;
edp_panel_vdd_off_sync(intel_dp);
@@ -3688,10 +3698,10 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
for_each_intel_dp(&dev_priv->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- WARN(intel_dp->active_pipe == pipe,
- "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
- pipe_name(pipe), encoder->base.base.id,
- encoder->base.name);
+ drm_WARN(&dev_priv->drm, intel_dp->active_pipe == pipe,
+ "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
+ pipe_name(pipe), encoder->base.base.id,
+ encoder->base.name);
if (intel_dp->pps_pipe != pipe)
continue;
@@ -3715,7 +3725,7 @@ static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
lockdep_assert_held(&dev_priv->pps_mutex);
- WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
+ drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE);
if (intel_dp->pps_pipe != INVALID_PIPE &&
intel_dp->pps_pipe != crtc->pipe) {
@@ -4243,7 +4253,9 @@ intel_dp_link_down(struct intel_encoder *encoder,
enum port port = encoder->port;
u32 DP = intel_dp->DP;
- if (WARN_ON((intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN) == 0))
+ if (drm_WARN_ON(&dev_priv->drm,
+ (intel_de_read(dev_priv, intel_dp->output_reg) &
+ DP_PORT_EN) == 0))
return;
drm_dbg_kms(&dev_priv->drm, "\n");
@@ -4405,7 +4417,7 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
/* this function is meant to be called only once */
- WARN_ON(intel_dp->dpcd[DP_DPCD_REV] != 0);
+ drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0);
if (!intel_dp_read_dpcd(intel_dp))
return false;
@@ -4502,7 +4514,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
* it don't care about read it here and in intel_edp_init_dpcd().
*/
if (!intel_dp_is_edp(intel_dp) &&
- !drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_SINK_COUNT)) {
+ !drm_dp_has_quirk(&intel_dp->desc, 0,
+ DP_DPCD_QUIRK_NO_SINK_COUNT)) {
u8 count;
ssize_t r;
@@ -5161,7 +5174,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
crtc_state = to_intel_crtc_state(crtc->base.state);
- WARN_ON(!intel_crtc_has_dp_encoder(crtc_state));
+ drm_WARN_ON(&dev_priv->drm, !intel_crtc_has_dp_encoder(crtc_state));
if (!crtc_state->hw.active)
return 0;
@@ -5669,6 +5682,7 @@ intel_dp_set_edid(struct intel_dp *intel_dp)
intel_dp->has_audio = drm_detect_monitor_audio(edid);
drm_dp_cec_set_edid(&intel_dp->aux, edid);
+ intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid);
}
static void
@@ -5681,6 +5695,7 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
intel_connector->detect_edid = NULL;
intel_dp->has_audio = false;
+ intel_dp->edid_quirks = 0;
}
static int
@@ -5696,7 +5711,8 @@ intel_dp_detect(struct drm_connector *connector,
drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
- WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
+ drm_WARN_ON(&dev_priv->drm,
+ !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
/* Can't disconnect eDP */
if (intel_dp_is_edp(intel_dp))
@@ -6435,6 +6451,7 @@ static
int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
bool is_repeater, u8 content_type)
{
+ int ret;
struct hdcp2_dp_errata_stream_type stream_type_msg;
if (is_repeater)
@@ -6450,8 +6467,11 @@ int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE;
stream_type_msg.stream_type = content_type;
- return intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg,
+ ret = intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg,
sizeof(stream_type_msg));
+
+ return ret < 0 ? ret : 0;
+
}
static
@@ -6622,7 +6642,7 @@ static int intel_modeset_tile_group(struct intel_atomic_state *state,
if (ret)
break;
}
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_connector_list_iter_end(&conn_iter);
return ret;
}
@@ -6662,7 +6682,7 @@ static int intel_modeset_affected_transcoders(struct intel_atomic_state *state,
transcoders &= ~BIT(crtc_state->cpu_transcoder);
}
- WARN_ON(transcoders != 0);
+ drm_WARN_ON(&dev_priv->drm, transcoders != 0);
return 0;
}
@@ -7043,7 +7063,8 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
if (force_disable_vdd) {
u32 pp = ilk_get_pp_control(intel_dp);
- WARN(pp & PANEL_POWER_ON, "Panel power already on\n");
+ drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON,
+ "Panel power already on\n");
if (pp & EDP_FORCE_VDD)
drm_dbg_kms(&dev_priv->drm,
@@ -7519,7 +7540,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
* with an already powered-on LVDS power sequencer.
*/
if (intel_get_lvds_encoder(dev_priv)) {
- WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
+ drm_WARN_ON(dev,
+ !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
drm_info(&dev_priv->drm,
"LVDS was detected, not registering eDP\n");
@@ -7546,8 +7568,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
edid = drm_get_edid(connector, &intel_dp->aux.ddc);
if (edid) {
if (drm_add_edid_modes(connector, edid)) {
- drm_connector_update_edid_property(connector,
- edid);
+ drm_connector_update_edid_property(connector, edid);
+ intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid);
} else {
kfree(edid);
edid = ERR_PTR(-EINVAL);
@@ -7592,9 +7614,11 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
intel_connector->panel.backlight.power = intel_edp_backlight_power;
intel_panel_setup_backlight(connector, pipe);
- if (fixed_mode)
- drm_connector_init_panel_orientation_property(
- connector, fixed_mode->hdisplay, fixed_mode->vdisplay);
+ if (fixed_mode) {
+ drm_connector_set_panel_orientation_with_quirk(connector,
+ dev_priv->vbt.orientation,
+ fixed_mode->hdisplay, fixed_mode->vdisplay);
+ }
return true;
@@ -7650,10 +7674,10 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
INIT_WORK(&intel_connector->modeset_retry_work,
intel_dp_modeset_retry_work_fn);
- if (WARN(intel_dig_port->max_lanes < 1,
- "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
- intel_dig_port->max_lanes, intel_encoder->base.base.id,
- intel_encoder->base.name))
+ if (drm_WARN(dev, intel_dig_port->max_lanes < 1,
+ "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
+ intel_dig_port->max_lanes, intel_encoder->base.base.id,
+ intel_encoder->base.name))
return false;
intel_dp_set_source_rates(intel_dp);
@@ -7671,7 +7695,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
* Currently we don't support eDP on TypeC ports, although in
* theory it could work on TypeC legacy ports.
*/
- WARN_ON(intel_phy_is_tc(dev_priv, phy));
+ drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy));
type = DRM_MODE_CONNECTOR_eDP;
} else {
type = DRM_MODE_CONNECTOR_DisplayPort;
@@ -7689,9 +7713,10 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_encoder->type = INTEL_OUTPUT_EDP;
/* eDP only on port B and/or C on vlv/chv */
- if (WARN_ON((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- intel_dp_is_edp(intel_dp) &&
- port != PORT_B && port != PORT_C))
+ if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv)) &&
+ intel_dp_is_edp(intel_dp) &&
+ port != PORT_B && port != PORT_C))
return false;
drm_dbg_kms(&dev_priv->drm,
@@ -7710,6 +7735,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
connector->ycbcr_420_allowed = true;
intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
+ intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
intel_dp_aux_init(intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index 3da166054788..0c7be8ed1423 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -8,8 +8,6 @@
#include <linux/types.h>
-#include <drm/i915_drm.h>
-
#include "i915_reg.h"
enum pipe;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 48276237b362..3e706bb850a8 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -328,15 +328,31 @@ intel_dp_aux_display_control_capable(struct intel_connector *connector)
int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
{
struct intel_panel *panel = &intel_connector->panel;
- struct drm_i915_private *dev_priv = to_i915(intel_connector->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(intel_connector->encoder);
+ struct drm_device *dev = intel_connector->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (i915_modparams.enable_dpcd_backlight == 0 ||
- (i915_modparams.enable_dpcd_backlight == -1 &&
- dev_priv->vbt.backlight.type != INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE))
+ !intel_dp_aux_display_control_capable(intel_connector))
return -ENODEV;
- if (!intel_dp_aux_display_control_capable(intel_connector))
+ /*
+ * There are a lot of machines that don't advertise the backlight
+ * control interface to use properly in their VBIOS, :\
+ */
+ if (dev_priv->vbt.backlight.type !=
+ INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE &&
+ !drm_dp_has_quirk(&intel_dp->desc, intel_dp->edid_quirks,
+ DP_QUIRK_FORCE_DPCD_BACKLIGHT)) {
+ DRM_DEV_INFO(dev->dev,
+ "Panel advertises DPCD backlight support, but "
+ "VBT disagrees. If your backlight controls "
+ "don't work try booting with "
+ "i915.enable_dpcd_backlight=1. If your machine "
+ "needs this, please file a _new_ bug report on "
+ "drm/i915, see " FDO_BUG_URL " for details.\n");
return -ENODEV;
+ }
panel->backlight.setup = intel_dp_aux_setup_backlight;
panel->backlight.enable = intel_dp_aux_enable_backlight;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index d7bfa7c350e9..44f3fd251ca1 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -50,7 +50,7 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
void *port = connector->port;
- bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
+ bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0,
DP_DPCD_QUIRK_CONSTANT_N);
int bpp, slots = -EINVAL;
@@ -548,12 +548,41 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
return ret;
}
+static int
+intel_dp_mst_connector_late_register(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ int ret;
+
+ ret = drm_dp_mst_connector_late_register(connector,
+ intel_connector->port);
+ if (ret < 0)
+ return ret;
+
+ ret = intel_connector_register(connector);
+ if (ret < 0)
+ drm_dp_mst_connector_early_unregister(connector,
+ intel_connector->port);
+
+ return ret;
+}
+
+static void
+intel_dp_mst_connector_early_unregister(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ intel_connector_unregister(connector);
+ drm_dp_mst_connector_early_unregister(connector,
+ intel_connector->port);
+}
+
static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_get_property = intel_digital_connector_atomic_get_property,
.atomic_set_property = intel_digital_connector_atomic_set_property,
- .late_register = intel_connector_register,
- .early_unregister = intel_connector_unregister,
+ .late_register = intel_dp_mst_connector_late_register,
+ .early_unregister = intel_dp_mst_connector_early_unregister,
.destroy = intel_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
@@ -719,36 +748,8 @@ err:
return NULL;
}
-static void intel_dp_register_mst_connector(struct drm_connector *connector)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
-
- if (dev_priv->fbdev)
- drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper,
- connector);
-
- drm_connector_register(connector);
-}
-
-static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_connector *connector)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
-
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name);
- drm_connector_unregister(connector);
-
- if (dev_priv->fbdev)
- drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper,
- connector);
-
- drm_connector_put(connector);
-}
-
static const struct drm_dp_mst_topology_cbs mst_cbs = {
.add_connector = intel_dp_add_mst_connector,
- .register_connector = intel_dp_register_mst_connector,
- .destroy_connector = intel_dp_destroy_mst_connector,
};
static struct intel_dp_mst_encoder *
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index e5bfe5245276..2d47f1f756a2 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -45,6 +45,22 @@
* commit phase.
*/
+struct intel_dpll_mgr {
+ const struct dpll_info *dpll_info;
+
+ bool (*get_dplls)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder);
+ void (*put_dplls)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ void (*update_active_dpll)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder);
+ void (*update_ref_clks)(struct drm_i915_private *i915);
+ void (*dump_hw_state)(struct drm_i915_private *dev_priv,
+ const struct intel_dpll_hw_state *hw_state);
+};
+
static void
intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll_state *shared_dpll)
@@ -52,8 +68,8 @@ intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
enum intel_dpll_id i;
/* Copy shared dpll state */
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
- struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
+ for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
+ struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
shared_dpll[i] = pll->state;
}
@@ -88,7 +104,7 @@ struct intel_shared_dpll *
intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
enum intel_dpll_id id)
{
- return &dev_priv->shared_dplls[id];
+ return &dev_priv->dpll.shared_dplls[id];
}
/**
@@ -103,11 +119,14 @@ enum intel_dpll_id
intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
- if (drm_WARN_ON(&dev_priv->drm, pll < dev_priv->shared_dplls ||
- pll > &dev_priv->shared_dplls[dev_priv->num_shared_dpll]))
+ long pll_idx = pll - dev_priv->dpll.shared_dplls;
+
+ if (drm_WARN_ON(&dev_priv->drm,
+ pll_idx < 0 ||
+ pll_idx >= dev_priv->dpll.num_shared_dpll))
return -1;
- return (enum intel_dpll_id) (pll - dev_priv->shared_dplls);
+ return pll_idx;
}
/* For ILK+ */
@@ -144,7 +163,7 @@ void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
return;
- mutex_lock(&dev_priv->dpll_lock);
+ mutex_lock(&dev_priv->dpll.lock);
drm_WARN_ON(&dev_priv->drm, !pll->state.crtc_mask);
if (!pll->active_mask) {
drm_dbg(&dev_priv->drm, "setting up %s\n", pll->info->name);
@@ -153,7 +172,7 @@ void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
pll->info->funcs->prepare(dev_priv, pll);
}
- mutex_unlock(&dev_priv->dpll_lock);
+ mutex_unlock(&dev_priv->dpll.lock);
}
/**
@@ -173,7 +192,7 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
return;
- mutex_lock(&dev_priv->dpll_lock);
+ mutex_lock(&dev_priv->dpll.lock);
old_mask = pll->active_mask;
if (drm_WARN_ON(&dev_priv->drm, !(pll->state.crtc_mask & crtc_mask)) ||
@@ -199,7 +218,7 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
pll->on = true;
out:
- mutex_unlock(&dev_priv->dpll_lock);
+ mutex_unlock(&dev_priv->dpll.lock);
}
/**
@@ -222,7 +241,7 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
if (pll == NULL)
return;
- mutex_lock(&dev_priv->dpll_lock);
+ mutex_lock(&dev_priv->dpll.lock);
if (drm_WARN_ON(&dev_priv->drm, !(pll->active_mask & crtc_mask)))
goto out;
@@ -243,7 +262,7 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
pll->on = false;
out:
- mutex_unlock(&dev_priv->dpll_lock);
+ mutex_unlock(&dev_priv->dpll.lock);
}
static struct intel_shared_dpll *
@@ -262,7 +281,7 @@ intel_find_shared_dpll(struct intel_atomic_state *state,
drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
- pll = &dev_priv->shared_dplls[i];
+ pll = &dev_priv->dpll.shared_dplls[i];
/* Only want to check enabled timings first */
if (shared_dpll[i].crtc_mask == 0) {
@@ -362,9 +381,9 @@ void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
if (!state->dpll_set)
return;
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
struct intel_shared_dpll *pll =
- &dev_priv->shared_dplls[i];
+ &dev_priv->dpll.shared_dplls[i];
swap(pll->state, shared_dpll[i]);
}
@@ -462,7 +481,7 @@ static bool ibx_get_dpll(struct intel_atomic_state *state,
if (HAS_PCH_IBX(dev_priv)) {
/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
i = (enum intel_dpll_id) crtc->pipe;
- pll = &dev_priv->shared_dplls[i];
+ pll = &dev_priv->dpll.shared_dplls[i];
drm_dbg_kms(&dev_priv->drm,
"[CRTC:%d:%s] using pre-allocated %s\n",
@@ -506,6 +525,19 @@ static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
.get_hw_state = ibx_pch_dpll_get_hw_state,
};
+static const struct dpll_info pch_plls[] = {
+ { "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
+ { "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
+ { },
+};
+
+static const struct intel_dpll_mgr pch_pll_mgr = {
+ .dpll_info = pch_plls,
+ .get_dplls = ibx_get_dpll,
+ .put_dplls = intel_put_dpll,
+ .dump_hw_state = ibx_dump_hw_state,
+};
+
static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
@@ -818,8 +850,8 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */,
}
static struct intel_shared_dpll *
-hsw_ddi_hdmi_get_dpll(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@@ -846,8 +878,47 @@ hsw_ddi_hdmi_get_dpll(struct intel_atomic_state *state,
return pll;
}
+static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
+ const struct intel_shared_dpll *pll)
+{
+ int refclk;
+ int n, p, r;
+ u32 wrpll = pll->state.hw_state.wrpll;
+
+ switch (wrpll & WRPLL_REF_MASK) {
+ case WRPLL_REF_SPECIAL_HSW:
+ /* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
+ if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
+ refclk = dev_priv->dpll.ref_clks.nssc;
+ break;
+ }
+ /* fall through */
+ case WRPLL_REF_PCH_SSC:
+ /*
+ * We could calculate spread here, but our checking
+ * code only cares about 5% accuracy, and spread is a max of
+ * 0.5% downspread.
+ */
+ refclk = dev_priv->dpll.ref_clks.ssc;
+ break;
+ case WRPLL_REF_LCPLL:
+ refclk = 2700000;
+ break;
+ default:
+ MISSING_CASE(wrpll);
+ return 0;
+ }
+
+ r = wrpll & WRPLL_DIVIDER_REF_MASK;
+ p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
+ n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
+
+ /* Convert to KHz, p & r have a fixed point portion */
+ return (refclk * n / 10) / (p * r) * 2;
+}
+
static struct intel_shared_dpll *
-hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
+hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
struct intel_shared_dpll *pll;
@@ -878,6 +949,69 @@ hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
return pll;
}
+static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ int link_clock = 0;
+
+ switch (pll->info->id) {
+ case DPLL_ID_LCPLL_810:
+ link_clock = 81000;
+ break;
+ case DPLL_ID_LCPLL_1350:
+ link_clock = 135000;
+ break;
+ case DPLL_ID_LCPLL_2700:
+ link_clock = 270000;
+ break;
+ default:
+ drm_WARN(&i915->drm, 1, "bad port clock sel\n");
+ break;
+ }
+
+ return link_clock * 2;
+}
+
+static struct intel_shared_dpll *
+hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (WARN_ON(crtc_state->port_clock / 2 != 135000))
+ return NULL;
+
+ crtc_state->dpll_hw_state.spll = SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz |
+ SPLL_REF_MUXED_SSC;
+
+ return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
+ BIT(DPLL_ID_SPLL));
+}
+
+static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ int link_clock = 0;
+
+ switch (pll->state.hw_state.spll & SPLL_FREQ_MASK) {
+ case SPLL_FREQ_810MHz:
+ link_clock = 81000;
+ break;
+ case SPLL_FREQ_1350MHz:
+ link_clock = 135000;
+ break;
+ case SPLL_FREQ_2700MHz:
+ link_clock = 270000;
+ break;
+ default:
+ drm_WARN(&i915->drm, 1, "bad spll freq\n");
+ break;
+ }
+
+ return link_clock * 2;
+}
+
static bool hsw_get_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
@@ -889,23 +1023,14 @@ static bool hsw_get_dpll(struct intel_atomic_state *state,
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
- pll = hsw_ddi_hdmi_get_dpll(state, crtc);
- } else if (intel_crtc_has_dp_encoder(crtc_state)) {
- pll = hsw_ddi_dp_get_dpll(crtc_state);
- } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
- if (WARN_ON(crtc_state->port_clock / 2 != 135000))
- return false;
-
- crtc_state->dpll_hw_state.spll =
- SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
-
- pll = intel_find_shared_dpll(state, crtc,
- &crtc_state->dpll_hw_state,
- BIT(DPLL_ID_SPLL));
- } else {
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ pll = hsw_ddi_wrpll_get_dpll(state, crtc);
+ else if (intel_crtc_has_dp_encoder(crtc_state))
+ pll = hsw_ddi_lcpll_get_dpll(crtc_state);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
+ pll = hsw_ddi_spll_get_dpll(state, crtc);
+ else
return false;
- }
if (!pll)
return false;
@@ -918,6 +1043,16 @@ static bool hsw_get_dpll(struct intel_atomic_state *state,
return true;
}
+static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
+{
+ i915->dpll.ref_clks.ssc = 135000;
+ /* Non-SSC is only used on non-ULT HSW. */
+ if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
+ i915->dpll.ref_clks.nssc = 24000;
+ else
+ i915->dpll.ref_clks.nssc = 135000;
+}
+
static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state)
{
@@ -929,12 +1064,14 @@ static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
.enable = hsw_ddi_wrpll_enable,
.disable = hsw_ddi_wrpll_disable,
.get_hw_state = hsw_ddi_wrpll_get_hw_state,
+ .get_freq = hsw_ddi_wrpll_get_freq,
};
static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
.enable = hsw_ddi_spll_enable,
.disable = hsw_ddi_spll_disable,
.get_hw_state = hsw_ddi_spll_get_hw_state,
+ .get_freq = hsw_ddi_spll_get_freq,
};
static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
@@ -958,6 +1095,25 @@ static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
.enable = hsw_ddi_lcpll_enable,
.disable = hsw_ddi_lcpll_disable,
.get_hw_state = hsw_ddi_lcpll_get_hw_state,
+ .get_freq = hsw_ddi_lcpll_get_freq,
+};
+
+static const struct dpll_info hsw_plls[] = {
+ { "WRPLL 1", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1, 0 },
+ { "WRPLL 2", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2, 0 },
+ { "SPLL", &hsw_ddi_spll_funcs, DPLL_ID_SPLL, 0 },
+ { "LCPLL 810", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810, INTEL_DPLL_ALWAYS_ON },
+ { "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
+ { "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
+ { },
+};
+
+static const struct intel_dpll_mgr hsw_pll_mgr = {
+ .dpll_info = hsw_plls,
+ .get_dplls = hsw_get_dpll,
+ .put_dplls = intel_put_dpll,
+ .update_ref_clks = hsw_update_dpll_ref_clks,
+ .dump_hw_state = hsw_dump_hw_state,
};
struct skl_dpll_regs {
@@ -1230,6 +1386,7 @@ struct skl_wrpll_params {
static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
u64 afe_clock,
+ int ref_clock,
u64 central_freq,
u32 p0, u32 p1, u32 p2)
{
@@ -1289,14 +1446,15 @@ static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
* Intermediate values are in Hz.
* Divide by MHz to match bsepc
*/
- params->dco_integer = div_u64(dco_freq, 24 * MHz(1));
+ params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
params->dco_fraction =
- div_u64((div_u64(dco_freq, 24) -
+ div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
params->dco_integer * MHz(1)) * 0x8000, MHz(1));
}
static bool
skl_ddi_calculate_wrpll(int clock /* in Hz */,
+ int ref_clock,
struct skl_wrpll_params *wrpll_params)
{
u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
@@ -1362,14 +1520,15 @@ skip_remaining_dividers:
*/
p0 = p1 = p2 = 0;
skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
- skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq,
- p0, p1, p2);
+ skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
+ ctx.central_freq, p0, p1, p2);
return true;
}
static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
u32 ctrl1, cfgcr1, cfgcr2;
struct skl_wrpll_params wrpll_params = { 0, };
@@ -1382,6 +1541,7 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
+ i915->dpll.ref_clks.nssc,
&wrpll_params))
return false;
@@ -1404,6 +1564,64 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
return true;
}
+static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
+ int ref_clock = i915->dpll.ref_clks.nssc;
+ u32 p0, p1, p2, dco_freq;
+
+ p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
+ p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
+
+ if (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_MODE(1))
+ p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
+ else
+ p1 = 1;
+
+
+ switch (p0) {
+ case DPLL_CFGCR2_PDIV_1:
+ p0 = 1;
+ break;
+ case DPLL_CFGCR2_PDIV_2:
+ p0 = 2;
+ break;
+ case DPLL_CFGCR2_PDIV_3:
+ p0 = 3;
+ break;
+ case DPLL_CFGCR2_PDIV_7:
+ p0 = 7;
+ break;
+ }
+
+ switch (p2) {
+ case DPLL_CFGCR2_KDIV_5:
+ p2 = 5;
+ break;
+ case DPLL_CFGCR2_KDIV_2:
+ p2 = 2;
+ break;
+ case DPLL_CFGCR2_KDIV_3:
+ p2 = 3;
+ break;
+ case DPLL_CFGCR2_KDIV_1:
+ p2 = 1;
+ break;
+ }
+
+ dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
+ ref_clock;
+
+ dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
+ ref_clock / 0x8000;
+
+ if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0))
+ return 0;
+
+ return dco_freq / (p0 * p1 * p2 * 5);
+}
+
static bool
skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
{
@@ -1444,6 +1662,40 @@ skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
return true;
}
+static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ int link_clock = 0;
+
+ switch ((pll->state.hw_state.ctrl1 &
+ DPLL_CTRL1_LINK_RATE_MASK(0)) >>
+ DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
+ case DPLL_CTRL1_LINK_RATE_810:
+ link_clock = 81000;
+ break;
+ case DPLL_CTRL1_LINK_RATE_1080:
+ link_clock = 108000;
+ break;
+ case DPLL_CTRL1_LINK_RATE_1350:
+ link_clock = 135000;
+ break;
+ case DPLL_CTRL1_LINK_RATE_1620:
+ link_clock = 162000;
+ break;
+ case DPLL_CTRL1_LINK_RATE_2160:
+ link_clock = 216000;
+ break;
+ case DPLL_CTRL1_LINK_RATE_2700:
+ link_clock = 270000;
+ break;
+ default:
+ drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
+ break;
+ }
+
+ return link_clock * 2;
+}
+
static bool skl_get_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
@@ -1493,6 +1745,25 @@ static bool skl_get_dpll(struct intel_atomic_state *state,
return true;
}
+static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ /*
+ * ctrl1 register is already shifted for each pll, just use 0 to get
+ * the internal shift for each field
+ */
+ if (pll->state.hw_state.ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
+ return skl_ddi_wrpll_get_freq(i915, pll);
+ else
+ return skl_ddi_lcpll_get_freq(i915, pll);
+}
+
+static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
+{
+ /* No SSC ref */
+ i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
+}
+
static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state)
{
@@ -1507,12 +1778,30 @@ static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
.enable = skl_ddi_pll_enable,
.disable = skl_ddi_pll_disable,
.get_hw_state = skl_ddi_pll_get_hw_state,
+ .get_freq = skl_ddi_pll_get_freq,
};
static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
.enable = skl_ddi_dpll0_enable,
.disable = skl_ddi_dpll0_disable,
.get_hw_state = skl_ddi_dpll0_get_hw_state,
+ .get_freq = skl_ddi_pll_get_freq,
+};
+
+static const struct dpll_info skl_plls[] = {
+ { "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
+ { "DPLL 1", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
+ { "DPLL 2", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
+ { "DPLL 3", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL3, 0 },
+ { },
+};
+
+static const struct intel_dpll_mgr skl_pll_mgr = {
+ .dpll_info = skl_plls,
+ .get_dplls = skl_get_dpll,
+ .put_dplls = intel_put_dpll,
+ .update_ref_clks = skl_update_dpll_ref_clks,
+ .dump_hw_state = skl_dump_hw_state,
};
static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
@@ -1903,6 +2192,23 @@ bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
}
+static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
+ struct dpll clock;
+
+ clock.m1 = 2;
+ clock.m2 = (pll_state->pll0 & PORT_PLL_M2_MASK) << 22;
+ if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
+ clock.m2 |= pll_state->pll2 & PORT_PLL_M2_FRAC_MASK;
+ clock.n = (pll_state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
+ clock.p1 = (pll_state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
+ clock.p2 = (pll_state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
+
+ return chv_calc_dpll_params(i915->dpll.ref_clks.nssc, &clock);
+}
+
static bool bxt_get_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
@@ -1936,6 +2242,13 @@ static bool bxt_get_dpll(struct intel_atomic_state *state,
return true;
}
+static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
+{
+ i915->dpll.ref_clks.ssc = 100000;
+ i915->dpll.ref_clks.nssc = 100000;
+ /* DSI non-SSC ref 19.2MHz */
+}
+
static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state)
{
@@ -1959,66 +2272,7 @@ static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
.enable = bxt_ddi_pll_enable,
.disable = bxt_ddi_pll_disable,
.get_hw_state = bxt_ddi_pll_get_hw_state,
-};
-
-struct intel_dpll_mgr {
- const struct dpll_info *dpll_info;
-
- bool (*get_dplls)(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder);
- void (*put_dplls)(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
- void (*update_active_dpll)(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder);
- void (*dump_hw_state)(struct drm_i915_private *dev_priv,
- const struct intel_dpll_hw_state *hw_state);
-};
-
-static const struct dpll_info pch_plls[] = {
- { "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
- { "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
- { },
-};
-
-static const struct intel_dpll_mgr pch_pll_mgr = {
- .dpll_info = pch_plls,
- .get_dplls = ibx_get_dpll,
- .put_dplls = intel_put_dpll,
- .dump_hw_state = ibx_dump_hw_state,
-};
-
-static const struct dpll_info hsw_plls[] = {
- { "WRPLL 1", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1, 0 },
- { "WRPLL 2", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2, 0 },
- { "SPLL", &hsw_ddi_spll_funcs, DPLL_ID_SPLL, 0 },
- { "LCPLL 810", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810, INTEL_DPLL_ALWAYS_ON },
- { "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
- { "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
- { },
-};
-
-static const struct intel_dpll_mgr hsw_pll_mgr = {
- .dpll_info = hsw_plls,
- .get_dplls = hsw_get_dpll,
- .put_dplls = intel_put_dpll,
- .dump_hw_state = hsw_dump_hw_state,
-};
-
-static const struct dpll_info skl_plls[] = {
- { "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
- { "DPLL 1", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
- { "DPLL 2", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
- { "DPLL 3", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL3, 0 },
- { },
-};
-
-static const struct intel_dpll_mgr skl_pll_mgr = {
- .dpll_info = skl_plls,
- .get_dplls = skl_get_dpll,
- .put_dplls = intel_put_dpll,
- .dump_hw_state = skl_dump_hw_state,
+ .get_freq = bxt_ddi_pll_get_freq,
};
static const struct dpll_info bxt_plls[] = {
@@ -2032,6 +2286,7 @@ static const struct intel_dpll_mgr bxt_pll_mgr = {
.dpll_info = bxt_plls,
.get_dplls = bxt_get_dpll,
.put_dplls = intel_put_dpll,
+ .update_ref_clks = bxt_update_dpll_ref_clks,
.dump_hw_state = bxt_dump_hw_state,
};
@@ -2275,27 +2530,12 @@ static void cnl_wrpll_params_populate(struct skl_wrpll_params *params,
params->dco_fraction = dco & 0x7fff;
}
-int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv)
-{
- int ref_clock = dev_priv->cdclk.hw.ref;
-
- /*
- * For ICL+, the spec states: if reference frequency is 38.4,
- * use 19.2 because the DPLL automatically divides that by 2.
- */
- if (INTEL_GEN(dev_priv) >= 11 && ref_clock == 38400)
- ref_clock = 19200;
-
- return ref_clock;
-}
-
static bool
-cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
- struct skl_wrpll_params *wrpll_params)
+__cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
+ struct skl_wrpll_params *wrpll_params,
+ int ref_clock)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
u32 afe_clock = crtc_state->port_clock * 5;
- u32 ref_clock;
u32 dco_min = 7998000;
u32 dco_max = 10000000;
u32 dco_mid = (dco_min + dco_max) / 2;
@@ -2327,15 +2567,22 @@ cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
return false;
cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
-
- ref_clock = cnl_hdmi_pll_ref_clock(dev_priv);
-
cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
pdiv, qdiv, kdiv);
return true;
}
+static bool
+cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
+ struct skl_wrpll_params *wrpll_params)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ return __cnl_ddi_calculate_wrpll(crtc_state, wrpll_params,
+ i915->dpll.ref_clks.nssc);
+}
+
static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
{
u32 cfgcr0, cfgcr1;
@@ -2363,6 +2610,68 @@ static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
return true;
}
+static int __cnl_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
+ const struct intel_shared_dpll *pll,
+ int ref_clock)
+{
+ const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
+ u32 p0, p1, p2, dco_freq;
+
+ p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
+ p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
+
+ if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
+ p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
+ DPLL_CFGCR1_QDIV_RATIO_SHIFT;
+ else
+ p1 = 1;
+
+
+ switch (p0) {
+ case DPLL_CFGCR1_PDIV_2:
+ p0 = 2;
+ break;
+ case DPLL_CFGCR1_PDIV_3:
+ p0 = 3;
+ break;
+ case DPLL_CFGCR1_PDIV_5:
+ p0 = 5;
+ break;
+ case DPLL_CFGCR1_PDIV_7:
+ p0 = 7;
+ break;
+ }
+
+ switch (p2) {
+ case DPLL_CFGCR1_KDIV_1:
+ p2 = 1;
+ break;
+ case DPLL_CFGCR1_KDIV_2:
+ p2 = 2;
+ break;
+ case DPLL_CFGCR1_KDIV_3:
+ p2 = 3;
+ break;
+ }
+
+ dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
+ ref_clock;
+
+ dco_freq += (((pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
+ DPLL_CFGCR0_DCO_FRACTION_SHIFT) * ref_clock) / 0x8000;
+
+ if (drm_WARN_ON(&dev_priv->drm, p0 == 0 || p1 == 0 || p2 == 0))
+ return 0;
+
+ return dco_freq / (p0 * p1 * p2 * 5);
+}
+
+static int cnl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ return __cnl_ddi_wrpll_get_freq(i915, pll, i915->dpll.ref_clks.nssc);
+}
+
static bool
cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
{
@@ -2408,6 +2717,44 @@ cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
return true;
}
+static int cnl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ int link_clock = 0;
+
+ switch (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK) {
+ case DPLL_CFGCR0_LINK_RATE_810:
+ link_clock = 81000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_1080:
+ link_clock = 108000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_1350:
+ link_clock = 135000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_1620:
+ link_clock = 162000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_2160:
+ link_clock = 216000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_2700:
+ link_clock = 270000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_3240:
+ link_clock = 324000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_4050:
+ link_clock = 405000;
+ break;
+ default:
+ drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
+ break;
+ }
+
+ return link_clock * 2;
+}
+
static bool cnl_get_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
@@ -2457,6 +2804,21 @@ static bool cnl_get_dpll(struct intel_atomic_state *state,
return true;
}
+static int cnl_ddi_pll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE)
+ return cnl_ddi_wrpll_get_freq(i915, pll);
+ else
+ return cnl_ddi_lcpll_get_freq(i915, pll);
+}
+
+static void cnl_update_dpll_ref_clks(struct drm_i915_private *i915)
+{
+ /* No SSC reference */
+ i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
+}
+
static void cnl_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state)
{
@@ -2470,6 +2832,7 @@ static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = {
.enable = cnl_ddi_pll_enable,
.disable = cnl_ddi_pll_disable,
.get_hw_state = cnl_ddi_pll_get_hw_state,
+ .get_freq = cnl_ddi_pll_get_freq,
};
static const struct dpll_info cnl_plls[] = {
@@ -2483,6 +2846,7 @@ static const struct intel_dpll_mgr cnl_pll_mgr = {
.dpll_info = cnl_plls,
.get_dplls = cnl_get_dpll,
.put_dplls = intel_put_dpll,
+ .update_ref_clks = cnl_update_dpll_ref_clks,
.dump_hw_state = cnl_dump_hw_state,
};
@@ -2578,7 +2942,7 @@ static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
const struct icl_combo_pll_params *params =
- dev_priv->cdclk.hw.ref == 24000 ?
+ dev_priv->dpll.ref_clks.nssc == 24000 ?
icl_dp_combo_pll_24MHz_values :
icl_dp_combo_pll_19_2MHz_values;
int clock = crtc_state->port_clock;
@@ -2601,9 +2965,9 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
if (INTEL_GEN(dev_priv) >= 12) {
- switch (dev_priv->cdclk.hw.ref) {
+ switch (dev_priv->dpll.ref_clks.nssc) {
default:
- MISSING_CASE(dev_priv->cdclk.hw.ref);
+ MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
/* fall-through */
case 19200:
case 38400:
@@ -2614,9 +2978,9 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
break;
}
} else {
- switch (dev_priv->cdclk.hw.ref) {
+ switch (dev_priv->dpll.ref_clks.nssc) {
default:
- MISSING_CASE(dev_priv->cdclk.hw.ref);
+ MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
/* fall-through */
case 19200:
case 38400:
@@ -2631,6 +2995,49 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
return true;
}
+static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ /*
+ * The PLL outputs multiple frequencies at the same time, selection is
+ * made at DDI clock mux level.
+ */
+ drm_WARN_ON(&i915->drm, 1);
+
+ return 0;
+}
+
+static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
+{
+ int ref_clock = i915->dpll.ref_clks.nssc;
+
+ /*
+ * For ICL+, the spec states: if reference frequency is 38.4,
+ * use 19.2 because the DPLL automatically divides that by 2.
+ */
+ if (ref_clock == 38400)
+ ref_clock = 19200;
+
+ return ref_clock;
+}
+
+static bool
+icl_calc_wrpll(struct intel_crtc_state *crtc_state,
+ struct skl_wrpll_params *wrpll_params)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ return __cnl_ddi_calculate_wrpll(crtc_state, wrpll_params,
+ icl_wrpll_ref_clock(i915));
+}
+
+static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ return __cnl_ddi_wrpll_get_freq(i915, pll,
+ icl_wrpll_ref_clock(i915));
+}
+
static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder,
struct intel_dpll_hw_state *pll_state)
@@ -2645,7 +3052,7 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
ret = icl_calc_tbt_pll(crtc_state, &pll_params);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
- ret = cnl_ddi_calculate_wrpll(crtc_state, &pll_params);
+ ret = icl_calc_wrpll(crtc_state, &pll_params);
else
ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
@@ -2768,7 +3175,7 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
struct intel_dpll_hw_state *pll_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- int refclk_khz = dev_priv->cdclk.hw.ref;
+ int refclk_khz = dev_priv->dpll.ref_clks.nssc;
int clock = crtc_state->port_clock;
u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
u32 iref_ndiv, iref_trim, iref_pulse_w;
@@ -2969,6 +3376,78 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
return true;
}
+static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
+ const struct intel_shared_dpll *pll)
+{
+ const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
+ u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
+ u64 tmp;
+
+ ref_clock = dev_priv->dpll.ref_clks.nssc;
+
+ if (INTEL_GEN(dev_priv) >= 12) {
+ m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
+ m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
+ m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
+
+ if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
+ m2_frac = pll_state->mg_pll_bias &
+ DKL_PLL_BIAS_FBDIV_FRAC_MASK;
+ m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
+ } else {
+ m2_frac = 0;
+ }
+ } else {
+ m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
+ m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
+
+ if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
+ m2_frac = pll_state->mg_pll_div0 &
+ MG_PLL_DIV0_FBDIV_FRAC_MASK;
+ m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
+ } else {
+ m2_frac = 0;
+ }
+ }
+
+ switch (pll_state->mg_clktop2_hsclkctl &
+ MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
+ case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
+ div1 = 2;
+ break;
+ case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
+ div1 = 3;
+ break;
+ case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
+ div1 = 5;
+ break;
+ case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
+ div1 = 7;
+ break;
+ default:
+ MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
+ return 0;
+ }
+
+ div2 = (pll_state->mg_clktop2_hsclkctl &
+ MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
+ MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
+
+ /* div2 value of 0 is same as 1 means no div */
+ if (div2 == 0)
+ div2 = 1;
+
+ /*
+ * Adjust the original formula to delay the division by 2^22 in order to
+ * minimize possible rounding errors.
+ */
+ tmp = (u64)m1 * m2_int * ref_clock +
+ (((u64)m1 * m2_frac * ref_clock) >> 22);
+ tmp = div_u64(tmp, 5 * div1 * div2);
+
+ return tmp;
+}
+
/**
* icl_set_active_port_dpll - select the active port DPLL for a given CRTC
* @crtc_state: state for the CRTC to select the DPLL for
@@ -3201,7 +3680,7 @@ static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
hw_state->mg_pll_tdc_coldst_bias =
intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
- if (dev_priv->cdclk.hw.ref == 38400) {
+ if (dev_priv->dpll.ref_clks.nssc == 38400) {
hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
hw_state->mg_pll_bias_mask = 0;
} else {
@@ -3682,6 +4161,12 @@ static void mg_pll_disable(struct drm_i915_private *dev_priv,
icl_pll_disable(dev_priv, pll, enable_reg);
}
+static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
+{
+ /* No SSC ref */
+ i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
+}
+
static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state)
{
@@ -3709,18 +4194,21 @@ static const struct intel_shared_dpll_funcs combo_pll_funcs = {
.enable = combo_pll_enable,
.disable = combo_pll_disable,
.get_hw_state = combo_pll_get_hw_state,
+ .get_freq = icl_ddi_combo_pll_get_freq,
};
static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
.enable = tbt_pll_enable,
.disable = tbt_pll_disable,
.get_hw_state = tbt_pll_get_hw_state,
+ .get_freq = icl_ddi_tbt_pll_get_freq,
};
static const struct intel_shared_dpll_funcs mg_pll_funcs = {
.enable = mg_pll_enable,
.disable = mg_pll_disable,
.get_hw_state = mg_pll_get_hw_state,
+ .get_freq = icl_ddi_mg_pll_get_freq,
};
static const struct dpll_info icl_plls[] = {
@@ -3739,6 +4227,7 @@ static const struct intel_dpll_mgr icl_pll_mgr = {
.get_dplls = icl_get_dplls,
.put_dplls = icl_put_dplls,
.update_active_dpll = icl_update_active_dpll,
+ .update_ref_clks = icl_update_dpll_ref_clks,
.dump_hw_state = icl_dump_hw_state,
};
@@ -3753,6 +4242,7 @@ static const struct intel_dpll_mgr ehl_pll_mgr = {
.dpll_info = ehl_plls,
.get_dplls = icl_get_dplls,
.put_dplls = icl_put_dplls,
+ .update_ref_clks = icl_update_dpll_ref_clks,
.dump_hw_state = icl_dump_hw_state,
};
@@ -3760,6 +4250,7 @@ static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
.enable = mg_pll_enable,
.disable = mg_pll_disable,
.get_hw_state = dkl_pll_get_hw_state,
+ .get_freq = icl_ddi_mg_pll_get_freq,
};
static const struct dpll_info tgl_plls[] = {
@@ -3780,6 +4271,7 @@ static const struct intel_dpll_mgr tgl_pll_mgr = {
.get_dplls = icl_get_dplls,
.put_dplls = icl_put_dplls,
.update_active_dpll = icl_update_active_dpll,
+ .update_ref_clks = icl_update_dpll_ref_clks,
.dump_hw_state = icl_dump_hw_state,
};
@@ -3814,7 +4306,7 @@ void intel_shared_dpll_init(struct drm_device *dev)
dpll_mgr = &pch_pll_mgr;
if (!dpll_mgr) {
- dev_priv->num_shared_dpll = 0;
+ dev_priv->dpll.num_shared_dpll = 0;
return;
}
@@ -3822,14 +4314,14 @@ void intel_shared_dpll_init(struct drm_device *dev)
for (i = 0; dpll_info[i].name; i++) {
drm_WARN_ON(dev, i != dpll_info[i].id);
- dev_priv->shared_dplls[i].info = &dpll_info[i];
+ dev_priv->dpll.shared_dplls[i].info = &dpll_info[i];
}
- dev_priv->dpll_mgr = dpll_mgr;
- dev_priv->num_shared_dpll = i;
- mutex_init(&dev_priv->dpll_lock);
+ dev_priv->dpll.mgr = dpll_mgr;
+ dev_priv->dpll.num_shared_dpll = i;
+ mutex_init(&dev_priv->dpll.lock);
- BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
+ BUG_ON(dev_priv->dpll.num_shared_dpll > I915_NUM_PLLS);
}
/**
@@ -3856,7 +4348,7 @@ bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
+ const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
return false;
@@ -3879,7 +4371,7 @@ void intel_release_shared_dplls(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
+ const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
/*
* FIXME: this function is called for every platform having a
@@ -3908,7 +4400,7 @@ void intel_update_active_dpll(struct intel_atomic_state *state,
struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
+ const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
return;
@@ -3917,6 +4409,84 @@ void intel_update_active_dpll(struct intel_atomic_state *state,
}
/**
+ * intel_dpll_get_freq - calculate the DPLL's output frequency
+ * @i915: i915 device
+ * @pll: DPLL for which to calculate the output frequency
+ *
+ * Return the output frequency corresponding to @pll's current state.
+ */
+int intel_dpll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
+ return 0;
+
+ return pll->info->funcs->get_freq(i915, pll);
+}
+
+static void readout_dpll_hw_state(struct drm_i915_private *i915,
+ struct intel_shared_dpll *pll)
+{
+ struct intel_crtc *crtc;
+
+ pll->on = pll->info->funcs->get_hw_state(i915, pll,
+ &pll->state.hw_state);
+
+ if (IS_ELKHARTLAKE(i915) && pll->on &&
+ pll->info->id == DPLL_ID_EHL_DPLL4) {
+ pll->wakeref = intel_display_power_get(i915,
+ POWER_DOMAIN_DPLL_DC_OFF);
+ }
+
+ pll->state.crtc_mask = 0;
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
+ pll->state.crtc_mask |= 1 << crtc->pipe;
+ }
+ pll->active_mask = pll->state.crtc_mask;
+
+ drm_dbg_kms(&i915->drm,
+ "%s hw state readout: crtc_mask 0x%08x, on %i\n",
+ pll->info->name, pll->state.crtc_mask, pll->on);
+}
+
+void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
+{
+ int i;
+
+ if (i915->dpll.mgr && i915->dpll.mgr->update_ref_clks)
+ i915->dpll.mgr->update_ref_clks(i915);
+
+ for (i = 0; i < i915->dpll.num_shared_dpll; i++)
+ readout_dpll_hw_state(i915, &i915->dpll.shared_dplls[i]);
+}
+
+static void sanitize_dpll_state(struct drm_i915_private *i915,
+ struct intel_shared_dpll *pll)
+{
+ if (!pll->on || pll->active_mask)
+ return;
+
+ drm_dbg_kms(&i915->drm,
+ "%s enabled but not in use, disabling\n",
+ pll->info->name);
+
+ pll->info->funcs->disable(i915, pll);
+ pll->on = false;
+}
+
+void intel_dpll_sanitize_state(struct drm_i915_private *i915)
+{
+ int i;
+
+ for (i = 0; i < i915->dpll.num_shared_dpll; i++)
+ sanitize_dpll_state(i915, &i915->dpll.shared_dplls[i]);
+}
+
+/**
* intel_shared_dpll_dump_hw_state - write hw_state to dmesg
* @dev_priv: i915 drm device
* @hw_state: hw state to be written to the log
@@ -3926,8 +4496,8 @@ void intel_update_active_dpll(struct intel_atomic_state *state,
void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state)
{
- if (dev_priv->dpll_mgr) {
- dev_priv->dpll_mgr->dump_hw_state(dev_priv, hw_state);
+ if (dev_priv->dpll.mgr) {
+ dev_priv->dpll.mgr->dump_hw_state(dev_priv, hw_state);
} else {
/* fallback for platforms that don't use the shared dpll
* infrastructure
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index 2a104c64291d..5d9a2bc371e7 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -278,6 +278,15 @@ struct intel_shared_dpll_funcs {
bool (*get_hw_state)(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state);
+
+ /**
+ * @get_freq:
+ *
+ * Hook for calculating the pll's output frequency based on its
+ * current state.
+ */
+ int (*get_freq)(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll);
};
/**
@@ -372,15 +381,18 @@ void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
void intel_update_active_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder);
+int intel_dpll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll);
void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_shared_dpll_swap_state(struct intel_atomic_state *state);
void intel_shared_dpll_init(struct drm_device *dev);
+void intel_dpll_readout_hw_state(struct drm_i915_private *dev_priv);
+void intel_dpll_sanitize_state(struct drm_i915_private *dev_priv);
void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state);
-int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv);
enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port);
bool intel_dpll_is_combophy(enum intel_dpll_id id);
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index 76ae01277fd6..d7a6bf2277df 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -52,7 +52,7 @@ static inline bool intel_dsb_enable_engine(struct intel_dsb *dsb)
dsb_ctrl = intel_de_read(dev_priv, DSB_CTRL(pipe, dsb->id));
if (DSB_STATUS & dsb_ctrl) {
- DRM_DEBUG_KMS("DSB engine is busy.\n");
+ drm_dbg_kms(&dev_priv->drm, "DSB engine is busy.\n");
return false;
}
@@ -72,7 +72,7 @@ static inline bool intel_dsb_disable_engine(struct intel_dsb *dsb)
dsb_ctrl = intel_de_read(dev_priv, DSB_CTRL(pipe, dsb->id));
if (DSB_STATUS & dsb_ctrl) {
- DRM_DEBUG_KMS("DSB engine is busy.\n");
+ drm_dbg_kms(&dev_priv->drm, "DSB engine is busy.\n");
return false;
}
@@ -115,20 +115,20 @@ intel_dsb_get(struct intel_crtc *crtc)
obj = i915_gem_object_create_internal(i915, DSB_BUF_SIZE);
if (IS_ERR(obj)) {
- DRM_ERROR("Gem object creation failed\n");
+ drm_err(&i915->drm, "Gem object creation failed\n");
goto out;
}
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
if (IS_ERR(vma)) {
- DRM_ERROR("Vma creation failed\n");
+ drm_err(&i915->drm, "Vma creation failed\n");
i915_gem_object_put(obj);
goto out;
}
buf = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
if (IS_ERR(buf)) {
- DRM_ERROR("Command buffer creation failed\n");
+ drm_err(&i915->drm, "Command buffer creation failed\n");
goto out;
}
@@ -203,7 +203,7 @@ void intel_dsb_indexed_reg_write(struct intel_dsb *dsb, i915_reg_t reg,
}
if (drm_WARN_ON(&dev_priv->drm, dsb->free_pos >= DSB_BUF_SIZE)) {
- DRM_DEBUG_KMS("DSB buffer overflow\n");
+ drm_dbg_kms(&dev_priv->drm, "DSB buffer overflow\n");
return;
}
@@ -277,7 +277,7 @@ void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val)
}
if (drm_WARN_ON(&dev_priv->drm, dsb->free_pos >= DSB_BUF_SIZE)) {
- DRM_DEBUG_KMS("DSB buffer overflow\n");
+ drm_dbg_kms(&dev_priv->drm, "DSB buffer overflow\n");
return;
}
@@ -310,7 +310,8 @@ void intel_dsb_commit(struct intel_dsb *dsb)
goto reset;
if (is_dsb_busy(dsb)) {
- DRM_ERROR("HEAD_PTR write failed - dsb engine is busy.\n");
+ drm_err(&dev_priv->drm,
+ "HEAD_PTR write failed - dsb engine is busy.\n");
goto reset;
}
intel_de_write(dev_priv, DSB_HEAD(pipe, dsb->id),
@@ -322,15 +323,18 @@ void intel_dsb_commit(struct intel_dsb *dsb)
(tail - dsb->free_pos * 4));
if (is_dsb_busy(dsb)) {
- DRM_ERROR("TAIL_PTR write failed - dsb engine is busy.\n");
+ drm_err(&dev_priv->drm,
+ "TAIL_PTR write failed - dsb engine is busy.\n");
goto reset;
}
- DRM_DEBUG_KMS("DSB execution started - head 0x%x, tail 0x%x\n",
- i915_ggtt_offset(dsb->vma), tail);
+ drm_dbg_kms(&dev_priv->drm,
+ "DSB execution started - head 0x%x, tail 0x%x\n",
+ i915_ggtt_offset(dsb->vma), tail);
intel_de_write(dev_priv, DSB_TAIL(pipe, dsb->id),
i915_ggtt_offset(dsb->vma) + tail);
if (wait_for(!is_dsb_busy(dsb), 1)) {
- DRM_ERROR("Timed out waiting for DSB workload completion.\n");
+ drm_err(&dev_priv->drm,
+ "Timed out waiting for DSB workload completion.\n");
goto reset;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index 694498f4b719..574dcfec9577 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -36,7 +36,6 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
-#include <drm/i915_drm.h>
#include <video/mipi_display.h>
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index 77f3d083b7a1..341d5ce8b062 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -30,7 +30,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_connector.h"
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index ddf8d3bb7a7d..2e5d835a9eaa 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -42,6 +42,7 @@
#include "i915_drv.h"
#include "i915_trace.h"
+#include "i915_vgpu.h"
#include "intel_display_types.h"
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
@@ -320,7 +321,7 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
SNB_CPU_FENCE_ENABLE | params->fence_id);
intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET,
params->crtc.fence_y_offset);
- } else {
+ } else if (dev_priv->ggtt.num_fences) {
intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0);
intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0);
}
@@ -508,12 +509,12 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
fbc->compressed_llb = compressed_llb;
- GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start,
- fbc->compressed_fb.start,
- U32_MAX));
- GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start,
- fbc->compressed_llb->start,
- U32_MAX));
+ GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start,
+ fbc->compressed_fb.start,
+ U32_MAX));
+ GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start,
+ fbc->compressed_llb->start,
+ U32_MAX));
intel_de_write(dev_priv, FBC_CFB_BASE,
dev_priv->dsm.start + fbc->compressed_fb.start);
intel_de_write(dev_priv, FBC_LL_BASE,
@@ -691,12 +692,37 @@ static bool intel_fbc_cfb_size_changed(struct drm_i915_private *dev_priv)
fbc->compressed_fb.size * fbc->threshold;
}
+static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv)
+{
+ struct intel_fbc *fbc = &dev_priv->fbc;
+
+ if (intel_vgpu_active(dev_priv)) {
+ fbc->no_fbc_reason = "VGPU is active";
+ return false;
+ }
+
+ if (!i915_modparams.enable_fbc) {
+ fbc->no_fbc_reason = "disabled per module param or by default";
+ return false;
+ }
+
+ if (fbc->underrun_detected) {
+ fbc->no_fbc_reason = "underrun detected";
+ return false;
+ }
+
+ return true;
+}
+
static bool intel_fbc_can_activate(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_state_cache *cache = &fbc->state_cache;
+ if (!intel_fbc_can_enable(dev_priv))
+ return false;
+
if (!cache->plane.visible) {
fbc->no_fbc_reason = "primary plane not visible";
return false;
@@ -795,28 +821,6 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
return true;
}
-static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv)
-{
- struct intel_fbc *fbc = &dev_priv->fbc;
-
- if (intel_vgpu_active(dev_priv)) {
- fbc->no_fbc_reason = "VGPU is active";
- return false;
- }
-
- if (!i915_modparams.enable_fbc) {
- fbc->no_fbc_reason = "disabled per module param or by default";
- return false;
- }
-
- if (fbc->underrun_detected) {
- fbc->no_fbc_reason = "underrun detected";
- return false;
- }
-
- return true;
-}
-
static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
struct intel_fbc_reg_params *params)
{
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index b4ff77225236..3bc804212a99 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -40,7 +40,6 @@
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_display_types.h"
@@ -453,7 +452,7 @@ int intel_fbdev_init(struct drm_device *dev)
if (!intel_fbdev_init_bios(dev, ifbdev))
ifbdev->preferred_bpp = 32;
- ret = drm_fb_helper_init(dev, &ifbdev->helper, 4);
+ ret = drm_fb_helper_init(dev, &ifbdev->helper);
if (ret) {
kfree(ifbdev);
return ret;
@@ -462,8 +461,6 @@ int intel_fbdev_init(struct drm_device *dev)
dev_priv->fbdev = ifbdev;
INIT_WORK(&dev_priv->fbdev_suspend_work, intel_fbdev_suspend_worker);
- drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
-
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
index 470b3b0b9bdb..813a4f7033e1 100644
--- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
@@ -103,7 +103,7 @@ static void i9xx_check_fifo_underruns(struct intel_crtc *crtc)
intel_de_posting_read(dev_priv, reg);
trace_intel_cpu_fifo_underrun(dev_priv, crtc->pipe);
- DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
+ drm_err(&dev_priv->drm, "pipe %c underrun\n", pipe_name(crtc->pipe));
}
static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
@@ -123,7 +123,8 @@ static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
intel_de_posting_read(dev_priv, reg);
} else {
if (old && intel_de_read(dev_priv, reg) & PIPE_FIFO_UNDERRUN_STATUS)
- DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
+ drm_err(&dev_priv->drm, "pipe %c underrun\n",
+ pipe_name(pipe));
}
}
@@ -155,7 +156,7 @@ static void ivb_check_fifo_underruns(struct intel_crtc *crtc)
intel_de_posting_read(dev_priv, GEN7_ERR_INT);
trace_intel_cpu_fifo_underrun(dev_priv, pipe);
- DRM_ERROR("fifo underrun on pipe %c\n", pipe_name(pipe));
+ drm_err(&dev_priv->drm, "fifo underrun on pipe %c\n", pipe_name(pipe));
}
static void ivb_set_fifo_underrun_reporting(struct drm_device *dev,
@@ -176,8 +177,9 @@ static void ivb_set_fifo_underrun_reporting(struct drm_device *dev,
if (old &&
intel_de_read(dev_priv, GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
- DRM_ERROR("uncleared fifo underrun on pipe %c\n",
- pipe_name(pipe));
+ drm_err(&dev_priv->drm,
+ "uncleared fifo underrun on pipe %c\n",
+ pipe_name(pipe));
}
}
}
@@ -223,8 +225,8 @@ static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc)
intel_de_posting_read(dev_priv, SERR_INT);
trace_intel_pch_fifo_underrun(dev_priv, pch_transcoder);
- DRM_ERROR("pch fifo underrun on pch transcoder %c\n",
- pipe_name(pch_transcoder));
+ drm_err(&dev_priv->drm, "pch fifo underrun on pch transcoder %c\n",
+ pipe_name(pch_transcoder));
}
static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
@@ -246,8 +248,9 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
if (old && intel_de_read(dev_priv, SERR_INT) &
SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
- DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
- pipe_name(pch_transcoder));
+ drm_err(&dev_priv->drm,
+ "uncleared pch fifo underrun on pch transcoder %c\n",
+ pipe_name(pch_transcoder));
}
}
}
@@ -381,8 +384,8 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false)) {
trace_intel_cpu_fifo_underrun(dev_priv, pipe);
- DRM_ERROR("CPU pipe %c FIFO underrun\n",
- pipe_name(pipe));
+ drm_err(&dev_priv->drm, "CPU pipe %c FIFO underrun\n",
+ pipe_name(pipe));
}
intel_fbc_handle_fifo_underrun_irq(dev_priv);
@@ -403,8 +406,8 @@ void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
if (intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder,
false)) {
trace_intel_pch_fifo_underrun(dev_priv, pch_transcoder);
- DRM_ERROR("PCH transcoder %c FIFO underrun\n",
- pipe_name(pch_transcoder));
+ drm_err(&dev_priv->drm, "PCH transcoder %c FIFO underrun\n",
+ pipe_name(pch_transcoder));
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index 4ef8a81ae0ad..1fd3a5a6296b 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -32,7 +32,6 @@
#include <linux/i2c.h>
#include <drm/drm_hdcp.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_display_types.h"
@@ -632,8 +631,9 @@ retry:
* till then let it sleep.
*/
if (gmbus_wait_idle(dev_priv)) {
- DRM_DEBUG_KMS("GMBUS [%s] timed out waiting for idle\n",
- adapter->name);
+ drm_dbg_kms(&dev_priv->drm,
+ "GMBUS [%s] timed out waiting for idle\n",
+ adapter->name);
ret = -ETIMEDOUT;
}
intel_de_write_fw(dev_priv, GMBUS0, 0);
@@ -656,8 +656,9 @@ clear_err:
*/
ret = -ENXIO;
if (gmbus_wait_idle(dev_priv)) {
- DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n",
- adapter->name);
+ drm_dbg_kms(&dev_priv->drm,
+ "GMBUS [%s] timed out after NAK\n",
+ adapter->name);
ret = -ETIMEDOUT;
}
@@ -669,9 +670,9 @@ clear_err:
intel_de_write_fw(dev_priv, GMBUS1, 0);
intel_de_write_fw(dev_priv, GMBUS0, 0);
- DRM_DEBUG_KMS("GMBUS [%s] NAK for addr: %04x %c(%d)\n",
- adapter->name, msgs[i].addr,
- (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
+ drm_dbg_kms(&dev_priv->drm, "GMBUS [%s] NAK for addr: %04x %c(%d)\n",
+ adapter->name, msgs[i].addr,
+ (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
/*
* Passive adapters sometimes NAK the first probe. Retry the first
@@ -680,16 +681,18 @@ clear_err:
* drm_do_probe_ddc_edid, which bails out on the first -ENXIO.
*/
if (ret == -ENXIO && i == 0 && try++ == 0) {
- DRM_DEBUG_KMS("GMBUS [%s] NAK on first message, retry\n",
- adapter->name);
+ drm_dbg_kms(&dev_priv->drm,
+ "GMBUS [%s] NAK on first message, retry\n",
+ adapter->name);
goto retry;
}
goto out;
timeout:
- DRM_DEBUG_KMS("GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
- bus->adapter.name, bus->reg0 & 0xff);
+ drm_dbg_kms(&dev_priv->drm,
+ "GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
+ bus->adapter.name, bus->reg0 & 0xff);
intel_de_write_fw(dev_priv, GMBUS0, 0);
/*
@@ -926,9 +929,10 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
mutex_lock(&dev_priv->gmbus_mutex);
bus->force_bit += force_bit ? 1 : -1;
- DRM_DEBUG_KMS("%sabling bit-banging on %s. force bit now %d\n",
- force_bit ? "en" : "dis", adapter->name,
- bus->force_bit);
+ drm_dbg_kms(&dev_priv->drm,
+ "%sabling bit-banging on %s. force bit now %d\n",
+ force_bit ? "en" : "dis", adapter->name,
+ bus->force_bit);
mutex_unlock(&dev_priv->gmbus_mutex);
}
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 30e0a3aa9d57..ee0f27ea2810 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -43,6 +43,7 @@ static
int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port,
const struct intel_hdcp_shim *shim, u8 *bksv)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
int ret, i, tries = 2;
/* HDCP spec states that we must retry the bksv if it is invalid */
@@ -54,7 +55,7 @@ int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port,
break;
}
if (i == tries) {
- DRM_DEBUG_KMS("Bksv is invalid\n");
+ drm_dbg_kms(&i915->drm, "Bksv is invalid\n");
return -ENODEV;
}
@@ -485,8 +486,8 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
return ret;
sha_idx += sizeof(sha_text);
} else {
- DRM_DEBUG_KMS("Invalid number of leftovers %d\n",
- sha_leftovers);
+ drm_dbg_kms(&dev_priv->drm, "Invalid number of leftovers %d\n",
+ sha_leftovers);
return -EINVAL;
}
@@ -514,11 +515,11 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
rep_ctl | HDCP_SHA1_COMPLETE_HASH);
if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL,
HDCP_SHA1_COMPLETE, 1)) {
- DRM_ERROR("Timed out waiting for SHA1 complete\n");
+ drm_err(&dev_priv->drm, "Timed out waiting for SHA1 complete\n");
return -ETIMEDOUT;
}
if (!(intel_de_read(dev_priv, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
- DRM_DEBUG_KMS("SHA-1 mismatch, HDCP failed\n");
+ drm_dbg_kms(&dev_priv->drm, "SHA-1 mismatch, HDCP failed\n");
return -ENXIO;
}
@@ -537,7 +538,8 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim);
if (ret) {
- DRM_DEBUG_KMS("KSV list failed to become ready (%d)\n", ret);
+ drm_dbg_kms(&dev_priv->drm,
+ "KSV list failed to become ready (%d)\n", ret);
return ret;
}
@@ -547,7 +549,7 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
- DRM_DEBUG_KMS("Max Topology Limit Exceeded\n");
+ drm_dbg_kms(&dev_priv->drm, "Max Topology Limit Exceeded\n");
return -EPERM;
}
@@ -560,13 +562,14 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
*/
num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
if (num_downstream == 0) {
- DRM_DEBUG_KMS("Repeater with zero downstream devices\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Repeater with zero downstream devices\n");
return -EINVAL;
}
ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
if (!ksv_fifo) {
- DRM_DEBUG_KMS("Out of mem: ksv_fifo\n");
+ drm_dbg_kms(&dev_priv->drm, "Out of mem: ksv_fifo\n");
return -ENOMEM;
}
@@ -576,7 +579,7 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, ksv_fifo,
num_downstream)) {
- DRM_ERROR("Revoked Ksv(s) in ksv_fifo\n");
+ drm_err(&dev_priv->drm, "Revoked Ksv(s) in ksv_fifo\n");
ret = -EPERM;
goto err;
}
@@ -594,12 +597,13 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
}
if (i == tries) {
- DRM_DEBUG_KMS("V Prime validation failed.(%d)\n", ret);
+ drm_dbg_kms(&dev_priv->drm,
+ "V Prime validation failed.(%d)\n", ret);
goto err;
}
- DRM_DEBUG_KMS("HDCP is enabled (%d downstream devices)\n",
- num_downstream);
+ drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (%d downstream devices)\n",
+ num_downstream);
ret = 0;
err:
kfree(ksv_fifo);
@@ -642,7 +646,8 @@ static int intel_hdcp_auth(struct intel_connector *connector)
if (ret)
return ret;
if (!hdcp_capable) {
- DRM_DEBUG_KMS("Panel is not HDCP capable\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Panel is not HDCP capable\n");
return -EINVAL;
}
}
@@ -659,7 +664,7 @@ static int intel_hdcp_auth(struct intel_connector *connector)
if (intel_de_wait_for_set(dev_priv,
HDCP_STATUS(dev_priv, cpu_transcoder, port),
HDCP_STATUS_AN_READY, 1)) {
- DRM_ERROR("Timed out waiting for An\n");
+ drm_err(&dev_priv->drm, "Timed out waiting for An\n");
return -ETIMEDOUT;
}
@@ -680,7 +685,7 @@ static int intel_hdcp_auth(struct intel_connector *connector)
return ret;
if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, bksv.shim, 1)) {
- DRM_ERROR("BKSV is revoked\n");
+ drm_err(&dev_priv->drm, "BKSV is revoked\n");
return -EPERM;
}
@@ -706,7 +711,7 @@ static int intel_hdcp_auth(struct intel_connector *connector)
/* Wait for R0 ready */
if (wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
(HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
- DRM_ERROR("Timed out waiting for R0 ready\n");
+ drm_err(&dev_priv->drm, "Timed out waiting for R0 ready\n");
return -ETIMEDOUT;
}
@@ -743,8 +748,10 @@ static int intel_hdcp_auth(struct intel_connector *connector)
}
if (i == tries) {
- DRM_DEBUG_KMS("Timed out waiting for Ri prime match (%x)\n",
- intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)));
+ drm_dbg_kms(&dev_priv->drm,
+ "Timed out waiting for Ri prime match (%x)\n",
+ intel_de_read(dev_priv, HDCP_STATUS(dev_priv,
+ cpu_transcoder, port)));
return -ETIMEDOUT;
}
@@ -753,7 +760,7 @@ static int intel_hdcp_auth(struct intel_connector *connector)
HDCP_STATUS(dev_priv, cpu_transcoder, port),
HDCP_STATUS_ENC,
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
- DRM_ERROR("Timed out waiting for encryption\n");
+ drm_err(&dev_priv->drm, "Timed out waiting for encryption\n");
return -ETIMEDOUT;
}
@@ -765,7 +772,7 @@ static int intel_hdcp_auth(struct intel_connector *connector)
if (repeater_present)
return intel_hdcp_auth_downstream(connector);
- DRM_DEBUG_KMS("HDCP is enabled (no repeater present)\n");
+ drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (no repeater present)\n");
return 0;
}
@@ -872,7 +879,8 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
goto out;
}
- if (WARN_ON(!intel_hdcp_in_use(dev_priv, cpu_transcoder, port))) {
+ if (drm_WARN_ON(&dev_priv->drm,
+ !intel_hdcp_in_use(dev_priv, cpu_transcoder, port))) {
drm_err(&dev_priv->drm,
"%s:%d HDCP link stopped encryption,%x\n",
connector->base.name, connector->base.base.id,
@@ -1270,7 +1278,7 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
return ret;
if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) {
- DRM_DEBUG_KMS("cert.rx_caps dont claim HDCP2.2\n");
+ drm_dbg_kms(&dev_priv->drm, "cert.rx_caps dont claim HDCP2.2\n");
return -EINVAL;
}
@@ -1279,7 +1287,7 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
msgs.send_cert.cert_rx.receiver_id,
1)) {
- DRM_ERROR("Receiver ID is revoked\n");
+ drm_err(&dev_priv->drm, "Receiver ID is revoked\n");
return -EPERM;
}
@@ -1454,7 +1462,7 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
- DRM_DEBUG_KMS("Topology Max Size Exceeded\n");
+ drm_dbg_kms(&dev_priv->drm, "Topology Max Size Exceeded\n");
return -EINVAL;
}
@@ -1462,9 +1470,15 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
seq_num_v =
drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
+ if (!hdcp->hdcp2_encrypted && seq_num_v) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Non zero Seq_num_v at first RecvId_List msg\n");
+ return -EINVAL;
+ }
+
if (seq_num_v < hdcp->seq_num_v) {
/* Roll over of the seq_num_v from repeater. Reauthenticate. */
- DRM_DEBUG_KMS("Seq_num_v roll over.\n");
+ drm_dbg_kms(&dev_priv->drm, "Seq_num_v roll over.\n");
return -EINVAL;
}
@@ -1473,7 +1487,7 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
msgs.recvid_list.receiver_ids,
device_cnt)) {
- DRM_ERROR("Revoked receiver ID(s) is in list\n");
+ drm_err(&dev_priv->drm, "Revoked receiver ID(s) is in list\n");
return -EPERM;
}
@@ -1506,25 +1520,27 @@ static int hdcp2_authenticate_repeater(struct intel_connector *connector)
static int hdcp2_authenticate_sink(struct intel_connector *connector)
{
struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
const struct intel_hdcp_shim *shim = hdcp->shim;
int ret;
ret = hdcp2_authentication_key_exchange(connector);
if (ret < 0) {
- DRM_DEBUG_KMS("AKE Failed. Err : %d\n", ret);
+ drm_dbg_kms(&i915->drm, "AKE Failed. Err : %d\n", ret);
return ret;
}
ret = hdcp2_locality_check(connector);
if (ret < 0) {
- DRM_DEBUG_KMS("Locality Check failed. Err : %d\n", ret);
+ drm_dbg_kms(&i915->drm,
+ "Locality Check failed. Err : %d\n", ret);
return ret;
}
ret = hdcp2_session_key_exchange(connector);
if (ret < 0) {
- DRM_DEBUG_KMS("SKE Failed. Err : %d\n", ret);
+ drm_dbg_kms(&i915->drm, "SKE Failed. Err : %d\n", ret);
return ret;
}
@@ -1539,7 +1555,8 @@ static int hdcp2_authenticate_sink(struct intel_connector *connector)
if (hdcp->is_repeater) {
ret = hdcp2_authenticate_repeater(connector);
if (ret < 0) {
- DRM_DEBUG_KMS("Repeater Auth Failed. Err: %d\n", ret);
+ drm_dbg_kms(&i915->drm,
+ "Repeater Auth Failed. Err: %d\n", ret);
return ret;
}
}
@@ -1561,8 +1578,9 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
int ret;
- WARN_ON(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
- LINK_ENCRYPTION_STATUS);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
+ LINK_ENCRYPTION_STATUS);
if (hdcp->shim->toggle_signalling) {
ret = hdcp->shim->toggle_signalling(intel_dig_port, true);
if (ret) {
@@ -1599,8 +1617,8 @@ static int hdcp2_disable_encryption(struct intel_connector *connector)
enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
int ret;
- WARN_ON(!(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
- LINK_ENCRYPTION_STATUS));
+ drm_WARN_ON(&dev_priv->drm, !(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
+ LINK_ENCRYPTION_STATUS));
intel_de_write(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port),
intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) & ~CTL_LINK_ENCRYPTION_REQ);
@@ -1628,6 +1646,7 @@ static int hdcp2_disable_encryption(struct intel_connector *connector)
static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
int ret, i, tries = 3;
for (i = 0; i < tries; i++) {
@@ -1636,10 +1655,10 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
break;
/* Clearing the mei hdcp session */
- DRM_DEBUG_KMS("HDCP2.2 Auth %d of %d Failed.(%d)\n",
- i + 1, tries, ret);
+ drm_dbg_kms(&i915->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n",
+ i + 1, tries, ret);
if (hdcp2_deauthenticate_port(connector) < 0)
- DRM_DEBUG_KMS("Port deauth failed.\n");
+ drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
}
if (i != tries) {
@@ -1650,9 +1669,10 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
ret = hdcp2_enable_encryption(connector);
if (ret < 0) {
- DRM_DEBUG_KMS("Encryption Enable Failed.(%d)\n", ret);
+ drm_dbg_kms(&i915->drm,
+ "Encryption Enable Failed.(%d)\n", ret);
if (hdcp2_deauthenticate_port(connector) < 0)
- DRM_DEBUG_KMS("Port deauth failed.\n");
+ drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
}
}
@@ -1661,23 +1681,24 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
static int _intel_hdcp2_enable(struct intel_connector *connector)
{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
- DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
- connector->base.name, connector->base.base.id,
- hdcp->content_type);
+ drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
+ connector->base.name, connector->base.base.id,
+ hdcp->content_type);
ret = hdcp2_authenticate_and_encrypt(connector);
if (ret) {
- DRM_DEBUG_KMS("HDCP2 Type%d Enabling Failed. (%d)\n",
- hdcp->content_type, ret);
+ drm_dbg_kms(&i915->drm, "HDCP2 Type%d Enabling Failed. (%d)\n",
+ hdcp->content_type, ret);
return ret;
}
- DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is enabled. Type %d\n",
- connector->base.name, connector->base.base.id,
- hdcp->content_type);
+ drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is enabled. Type %d\n",
+ connector->base.name, connector->base.base.id,
+ hdcp->content_type);
hdcp->hdcp2_encrypted = true;
return 0;
@@ -1685,15 +1706,16 @@ static int _intel_hdcp2_enable(struct intel_connector *connector)
static int _intel_hdcp2_disable(struct intel_connector *connector)
{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
int ret;
- DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being Disabled\n",
- connector->base.name, connector->base.base.id);
+ drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being Disabled\n",
+ connector->base.name, connector->base.base.id);
ret = hdcp2_disable_encryption(connector);
if (hdcp2_deauthenticate_port(connector) < 0)
- DRM_DEBUG_KMS("Port deauth failed.\n");
+ drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
connector->hdcp.hdcp2_encrypted = false;
@@ -1720,7 +1742,8 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
goto out;
}
- if (WARN_ON(!intel_hdcp2_in_use(dev_priv, cpu_transcoder, port))) {
+ if (drm_WARN_ON(&dev_priv->drm,
+ !intel_hdcp2_in_use(dev_priv, cpu_transcoder, port))) {
drm_err(&dev_priv->drm,
"HDCP2.2 link stopped the encryption, %x\n",
intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)));
@@ -1916,7 +1939,7 @@ void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
return;
mutex_lock(&dev_priv->hdcp_comp_mutex);
- WARN_ON(dev_priv->hdcp_comp_added);
+ drm_WARN_ON(&dev_priv->drm, dev_priv->hdcp_comp_added);
dev_priv->hdcp_comp_added = true;
mutex_unlock(&dev_priv->hdcp_comp_mutex);
@@ -1935,12 +1958,13 @@ void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
static void intel_hdcp2_init(struct intel_connector *connector,
const struct intel_hdcp_shim *shim)
{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
ret = initialize_hdcp_port_data(connector, shim);
if (ret) {
- DRM_DEBUG_KMS("Mei hdcp data init failed\n");
+ drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n");
return;
}
@@ -1990,7 +2014,8 @@ int intel_hdcp_enable(struct intel_connector *connector,
return -ENOENT;
mutex_lock(&hdcp->mutex);
- WARN_ON(hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
+ drm_WARN_ON(&dev_priv->drm,
+ hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
hdcp->content_type = content_type;
if (INTEL_GEN(dev_priv) >= 12) {
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h
index 1b2eacaf8949..7c12ad609b1f 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.h
@@ -8,8 +8,6 @@
#include <linux/types.h>
-#include <drm/i915_drm.h>
-
struct drm_connector;
struct drm_connector_state;
struct drm_i915_private;
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 476ce1d9c557..39930232b253 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -36,7 +36,6 @@
#include <drm/drm_edid.h>
#include <drm/drm_hdcp.h>
#include <drm/drm_scdc_helper.h>
-#include <drm/i915_drm.h>
#include <drm/intel_lpe_audio.h>
#include "i915_debugfs.h"
@@ -2276,14 +2275,9 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
}
}
- /* Display WA #1139: glk */
- if (bpc == 12 && IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1) &&
- adjusted_mode->htotal > 5460)
- return false;
-
- /* Display Wa_1405510057:icl */
+ /* Display Wa_1405510057:icl,ehl */
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
- bpc == 10 && INTEL_GEN(dev_priv) >= 11 &&
+ bpc == 10 && IS_GEN(dev_priv, 11) &&
(adjusted_mode->crtc_hblank_end -
adjusted_mode->crtc_hblank_start) % 8 == 2)
return false;
@@ -3176,6 +3170,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
connector->ycbcr_420_allowed = true;
intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
+ intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
if (HAS_DDI(dev_priv))
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h
index d3659d0b408b..8ff1f76a63df 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.h
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.h
@@ -9,8 +9,6 @@
#include <linux/hdmi.h>
#include <linux/types.h>
-#include <drm/i915_drm.h>
-
#include "i915_reg.h"
struct drm_connector;
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index 127a2f28c1ac..a091442efba4 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -23,8 +23,6 @@
#include <linux/kernel.h>
-#include <drm/i915_drm.h>
-
#include "i915_drv.h"
#include "intel_display_types.h"
#include "intel_hotplug.h"
@@ -89,29 +87,16 @@
enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
enum port port)
{
- switch (port) {
- case PORT_A:
- return HPD_PORT_A;
- case PORT_B:
- return HPD_PORT_B;
- case PORT_C:
- return HPD_PORT_C;
- case PORT_D:
- return HPD_PORT_D;
- case PORT_E:
- return HPD_PORT_E;
- case PORT_F:
- if (IS_CNL_WITH_PORT_F(dev_priv))
- return HPD_PORT_E;
- return HPD_PORT_F;
- case PORT_G:
- return HPD_PORT_G;
- case PORT_H:
- return HPD_PORT_H;
- case PORT_I:
- return HPD_PORT_I;
+ enum phy phy = intel_port_to_phy(dev_priv, port);
+
+ switch (phy) {
+ case PHY_F:
+ return IS_CNL_WITH_PORT_F(dev_priv) ? HPD_PORT_E : HPD_PORT_F;
+ case PHY_A ... PHY_E:
+ case PHY_G ... PHY_I:
+ return HPD_PORT_A + phy - PHY_A;
default:
- MISSING_CASE(port);
+ MISSING_CASE(phy);
return HPD_NONE;
}
}
@@ -185,10 +170,13 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
hpd->stats[pin].count += increment;
if (hpd->stats[pin].count > threshold) {
hpd->stats[pin].state = HPD_MARK_DISABLED;
- DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", pin);
+ drm_dbg_kms(&dev_priv->drm,
+ "HPD interrupt storm detected on PIN %d\n", pin);
storm = true;
} else {
- DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", pin,
+ drm_dbg_kms(&dev_priv->drm,
+ "Received HPD interrupt on PIN %d - cnt: %d\n",
+ pin,
hpd->stats[pin].count);
}
@@ -217,7 +205,8 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
dev_priv->hotplug.stats[pin].state != HPD_MARK_DISABLED)
continue;
- DRM_INFO("HPD interrupt storm detected on connector %s: "
+ drm_info(&dev_priv->drm,
+ "HPD interrupt storm detected on connector %s: "
"switching from hotplug detection to polling\n",
connector->base.name);
@@ -242,36 +231,38 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
container_of(work, typeof(*dev_priv),
hotplug.reenable_work.work);
struct drm_device *dev = &dev_priv->drm;
+ struct drm_connector_list_iter conn_iter;
+ struct intel_connector *connector;
intel_wakeref_t wakeref;
enum hpd_pin pin;
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
spin_lock_irq(&dev_priv->irq_lock);
- for_each_hpd_pin(pin) {
- struct drm_connector_list_iter conn_iter;
- struct intel_connector *connector;
- if (dev_priv->hotplug.stats[pin].state != HPD_DISABLED)
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ pin = intel_connector_hpd_pin(connector);
+ if (pin == HPD_NONE ||
+ dev_priv->hotplug.stats[pin].state != HPD_DISABLED)
continue;
- dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
-
- drm_connector_list_iter_begin(dev, &conn_iter);
- for_each_intel_connector_iter(connector, &conn_iter) {
- if (intel_connector_hpd_pin(connector) == pin) {
- if (connector->base.polled != connector->polled)
- DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
- connector->base.name);
- connector->base.polled = connector->polled;
- if (!connector->base.polled)
- connector->base.polled = DRM_CONNECTOR_POLL_HPD;
- }
- }
- drm_connector_list_iter_end(&conn_iter);
+ if (connector->base.polled != connector->polled)
+ drm_dbg(&dev_priv->drm,
+ "Reenabling HPD on connector %s\n",
+ connector->base.name);
+ connector->base.polled = connector->polled;
}
+ drm_connector_list_iter_end(&conn_iter);
+
+ for_each_hpd_pin(pin) {
+ if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED)
+ dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
+ }
+
if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev_priv);
+
spin_unlock_irq(&dev_priv->irq_lock);
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
@@ -294,11 +285,12 @@ intel_encoder_hotplug(struct intel_encoder *encoder,
if (old_status == connector->base.status)
return INTEL_HOTPLUG_UNCHANGED;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
- connector->base.base.id,
- connector->base.name,
- drm_get_connector_status_name(old_status),
- drm_get_connector_status_name(connector->base.status));
+ drm_dbg_kms(&to_i915(dev)->drm,
+ "[CONNECTOR:%d:%s] status updated from %s to %s\n",
+ connector->base.base.id,
+ connector->base.name,
+ drm_get_connector_status_name(old_status),
+ drm_get_connector_status_name(connector->base.status));
return INTEL_HOTPLUG_CHANGED;
}
@@ -372,7 +364,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
u32 hpd_retry_bits;
mutex_lock(&dev->mode_config.mutex);
- DRM_DEBUG_KMS("running encoder hotplug functions\n");
+ drm_dbg_kms(&dev_priv->drm, "running encoder hotplug functions\n");
spin_lock_irq(&dev_priv->irq_lock);
@@ -400,8 +392,9 @@ static void i915_hotplug_work_func(struct work_struct *work)
struct intel_encoder *encoder =
intel_attached_encoder(connector);
- DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
- connector->base.name, pin);
+ drm_dbg_kms(&dev_priv->drm,
+ "Connector %s (pin %i) received hotplug event.\n",
+ connector->base.name, pin);
switch (encoder->hotplug(encoder, connector,
hpd_event_bits & hpd_bit)) {
@@ -486,9 +479,10 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
long_hpd = long_mask & BIT(pin);
- DRM_DEBUG_DRIVER("digital hpd on [ENCODER:%d:%s] - %s\n",
- encoder->base.base.id, encoder->base.name,
- long_hpd ? "long" : "short");
+ drm_dbg(&dev_priv->drm,
+ "digital hpd on [ENCODER:%d:%s] - %s\n",
+ encoder->base.base.id, encoder->base.name,
+ long_hpd ? "long" : "short");
queue_dig = true;
if (long_hpd) {
@@ -617,16 +611,17 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
drm_connector_list_iter_begin(dev, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
- enum hpd_pin pin = intel_connector_hpd_pin(connector);
+ enum hpd_pin pin;
+
+ pin = intel_connector_hpd_pin(connector);
+ if (pin == HPD_NONE)
+ continue;
connector->base.polled = connector->polled;
- if (pin != HPD_NONE && I915_HAS_HOTPLUG(dev_priv) &&
- !connector->base.polled)
- connector->base.polled = enabled ?
- DRM_CONNECTOR_POLL_CONNECT |
- DRM_CONNECTOR_POLL_DISCONNECT :
- DRM_CONNECTOR_POLL_HPD;
+ if (enabled && connector->base.polled == DRM_CONNECTOR_POLL_HPD)
+ connector->base.polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
}
drm_connector_list_iter_end(&conn_iter);
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.h b/drivers/gpu/drm/i915/display/intel_hotplug.h
index 087b5f57b321..1e6b4fda2900 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.h
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.h
@@ -8,8 +8,6 @@
#include <linux/types.h>
-#include <drm/i915_drm.h>
-
struct drm_i915_private;
struct intel_connector;
struct intel_encoder;
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
index 516e7179a5a4..ad5cc13037ae 100644
--- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
@@ -127,7 +127,8 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
kfree(pdata);
if (IS_ERR(platdev)) {
- DRM_ERROR("Failed to allocate LPE audio platform device\n");
+ drm_err(&dev_priv->drm,
+ "Failed to allocate LPE audio platform device\n");
return platdev;
}
@@ -190,7 +191,8 @@ static bool lpe_audio_detect(struct drm_i915_private *dev_priv)
};
if (!pci_dev_present(atom_hdaudio_ids)) {
- DRM_INFO("HDaudio controller not detected, using LPE audio instead\n");
+ drm_info(&dev_priv->drm,
+ "HDaudio controller not detected, using LPE audio instead\n");
lpe_present = true;
}
}
@@ -203,18 +205,19 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv)
dev_priv->lpe_audio.irq = irq_alloc_desc(0);
if (dev_priv->lpe_audio.irq < 0) {
- DRM_ERROR("Failed to allocate IRQ desc: %d\n",
+ drm_err(&dev_priv->drm, "Failed to allocate IRQ desc: %d\n",
dev_priv->lpe_audio.irq);
ret = dev_priv->lpe_audio.irq;
goto err;
}
- DRM_DEBUG("irq = %d\n", dev_priv->lpe_audio.irq);
+ drm_dbg(&dev_priv->drm, "irq = %d\n", dev_priv->lpe_audio.irq);
ret = lpe_audio_irq_init(dev_priv);
if (ret) {
- DRM_ERROR("Failed to initialize irqchip for lpe audio: %d\n",
+ drm_err(&dev_priv->drm,
+ "Failed to initialize irqchip for lpe audio: %d\n",
ret);
goto err_free_irq;
}
@@ -223,7 +226,8 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv)
if (IS_ERR(dev_priv->lpe_audio.platdev)) {
ret = PTR_ERR(dev_priv->lpe_audio.platdev);
- DRM_ERROR("Failed to create lpe audio platform device: %d\n",
+ drm_err(&dev_priv->drm,
+ "Failed to create lpe audio platform device: %d\n",
ret);
goto err_free_irq;
}
@@ -259,8 +263,8 @@ void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv)
ret = generic_handle_irq(dev_priv->lpe_audio.irq);
if (ret)
- DRM_ERROR_RATELIMITED("error handling LPE audio irq: %d\n",
- ret);
+ drm_err_ratelimited(&dev_priv->drm,
+ "error handling LPE audio irq: %d\n", ret);
}
/**
@@ -278,7 +282,8 @@ int intel_lpe_audio_init(struct drm_i915_private *dev_priv)
if (lpe_audio_detect(dev_priv)) {
ret = lpe_audio_setup(dev_priv);
if (ret < 0)
- DRM_ERROR("failed to setup LPE Audio bridge\n");
+ drm_err(&dev_priv->drm,
+ "failed to setup LPE Audio bridge\n");
}
return ret;
}
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index b7ad0b534790..9a067effcfa0 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -37,7 +37,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_atomic.h"
@@ -182,8 +181,9 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
if (INTEL_GEN(dev_priv) <= 4 &&
pps->t1_t2 == 0 && pps->t5 == 0 && pps->t3 == 0 && pps->tx == 0) {
- DRM_DEBUG_KMS("Panel power timings uninitialized, "
- "setting defaults\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Panel power timings uninitialized, "
+ "setting defaults\n");
/* Set T2 to 40ms and T5 to 200ms in 100 usec units */
pps->t1_t2 = 40 * 10;
pps->t5 = 200 * 10;
@@ -192,10 +192,10 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
pps->tx = 200 * 10;
}
- DRM_DEBUG_DRIVER("LVDS PPS:t1+t2 %d t3 %d t4 %d t5 %d tx %d "
- "divider %d port %d powerdown_on_reset %d\n",
- pps->t1_t2, pps->t3, pps->t4, pps->t5, pps->tx,
- pps->divider, pps->port, pps->powerdown_on_reset);
+ drm_dbg(&dev_priv->drm, "LVDS PPS:t1+t2 %d t3 %d t4 %d t5 %d tx %d "
+ "divider %d port %d powerdown_on_reset %d\n",
+ pps->t1_t2, pps->t3, pps->t4, pps->t5, pps->tx,
+ pps->divider, pps->port, pps->powerdown_on_reset);
}
static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
@@ -317,7 +317,8 @@ static void intel_enable_lvds(struct intel_encoder *encoder,
intel_de_posting_read(dev_priv, lvds_encoder->reg);
if (intel_de_wait_for_set(dev_priv, PP_STATUS(0), PP_ON, 5000))
- DRM_ERROR("timed out waiting for panel to power on\n");
+ drm_err(&dev_priv->drm,
+ "timed out waiting for panel to power on\n");
intel_panel_enable_backlight(pipe_config, conn_state);
}
@@ -332,7 +333,8 @@ static void intel_disable_lvds(struct intel_encoder *encoder,
intel_de_write(dev_priv, PP_CONTROL(0),
intel_de_read(dev_priv, PP_CONTROL(0)) & ~PANEL_POWER_ON);
if (intel_de_wait_for_clear(dev_priv, PP_STATUS(0), PP_ON, 1000))
- DRM_ERROR("timed out waiting for panel to power off\n");
+ drm_err(&dev_priv->drm,
+ "timed out waiting for panel to power off\n");
intel_de_write(dev_priv, lvds_encoder->reg,
intel_de_read(dev_priv, lvds_encoder->reg) & ~LVDS_PORT_EN);
@@ -398,7 +400,7 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
/* Should never happen!! */
if (INTEL_GEN(dev_priv) < 4 && intel_crtc->pipe == 0) {
- DRM_ERROR("Can't support LVDS on pipe A\n");
+ drm_err(&dev_priv->drm, "Can't support LVDS on pipe A\n");
return -EINVAL;
}
@@ -408,8 +410,9 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
lvds_bpp = 6*3;
if (lvds_bpp != pipe_config->pipe_bpp && !pipe_config->bw_constrained) {
- DRM_DEBUG_KMS("forcing display bpp (was %d) to LVDS (%d)\n",
- pipe_config->pipe_bpp, lvds_bpp);
+ drm_dbg_kms(&dev_priv->drm,
+ "forcing display bpp (was %d) to LVDS (%d)\n",
+ pipe_config->pipe_bpp, lvds_bpp);
pipe_config->pipe_bpp = lvds_bpp;
}
@@ -833,7 +836,8 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
}
if (!dev_priv->vbt.int_lvds_support) {
- DRM_DEBUG_KMS("Internal LVDS support disabled by VBT\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Internal LVDS support disabled by VBT\n");
return;
}
@@ -852,10 +856,12 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
pin = GMBUS_PIN_PANEL;
if (!intel_bios_is_lvds_present(dev_priv, &pin)) {
if ((lvds & LVDS_PORT_EN) == 0) {
- DRM_DEBUG_KMS("LVDS is not present in VBT\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "LVDS is not present in VBT\n");
return;
}
- DRM_DEBUG_KMS("LVDS is not present in VBT, but enabled anyway\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "LVDS is not present in VBT, but enabled anyway\n");
}
lvds_encoder = kzalloc(sizeof(*lvds_encoder), GFP_KERNEL);
@@ -969,7 +975,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
*/
fixed_mode = intel_encoder_current_mode(intel_encoder);
if (fixed_mode) {
- DRM_DEBUG_KMS("using current (BIOS) mode: ");
+ drm_dbg_kms(&dev_priv->drm, "using current (BIOS) mode: ");
drm_mode_debug_printmodeline(fixed_mode);
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
}
@@ -985,8 +991,8 @@ out:
intel_panel_setup_backlight(connector, INVALID_PIPE);
lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
- DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
- lvds_encoder->is_dual_link ? "dual" : "single");
+ drm_dbg_kms(&dev_priv->drm, "detected %s-link lvds configuration\n",
+ lvds_encoder->is_dual_link ? "dual" : "single");
lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK;
@@ -995,7 +1001,7 @@ out:
failed:
mutex_unlock(&dev->mode_config.mutex);
- DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
+ drm_dbg_kms(&dev_priv->drm, "No LVDS modes found, disabling.\n");
drm_connector_cleanup(connector);
drm_encoder_cleanup(encoder);
kfree(lvds_encoder);
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index dfd78fccd456..cc6b00959586 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -30,8 +30,6 @@
#include <linux/firmware.h>
#include <acpi/video.h>
-#include <drm/i915_drm.h>
-
#include "display/intel_panel.h"
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 5f1207dec10e..481187223101 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -27,7 +27,6 @@
*/
#include <drm/drm_fourcc.h>
-#include <drm/i915_drm.h>
#include "gem/i915_gem_pm.h"
#include "gt/intel_ring.h"
@@ -324,7 +323,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
/* check for underruns */
tmp = intel_de_read(dev_priv, DOVSTA);
if (tmp & (1 << 17))
- DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
+ drm_dbg(&dev_priv->drm, "overlay underrun, DOVSTA: %x\n", tmp);
rq = alloc_request(overlay, NULL);
if (IS_ERR(rq))
@@ -1069,7 +1068,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
overlay = dev_priv->overlay;
if (!overlay) {
- DRM_DEBUG("userspace bug: no overlay\n");
+ drm_dbg(&dev_priv->drm, "userspace bug: no overlay\n");
return -ENODEV;
}
@@ -1093,7 +1092,8 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
drm_modeset_lock_all(dev);
if (i915_gem_object_is_tiled(new_bo)) {
- DRM_DEBUG_KMS("buffer used for overlay image can not be tiled\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "buffer used for overlay image can not be tiled\n");
ret = -EINVAL;
goto out_unlock;
}
@@ -1228,7 +1228,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
overlay = dev_priv->overlay;
if (!overlay) {
- DRM_DEBUG("userspace bug: no overlay\n");
+ drm_dbg(&dev_priv->drm, "userspace bug: no overlay\n");
return -ENODEV;
}
@@ -1372,7 +1372,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
update_reg_attrs(overlay, overlay->regs);
dev_priv->overlay = overlay;
- DRM_INFO("Initialized overlay support.\n");
+ drm_info(&dev_priv->drm, "Initialized overlay support.\n");
return;
out_free:
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 585688b6ebac..276f43870802 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -434,8 +434,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
/* 965+ wants fuzzy fitting */
/* FIXME: handle multiple panels by failing gracefully */
if (INTEL_GEN(dev_priv) >= 4)
- pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
- PFIT_FILTER_FUZZY);
+ pfit_control |= PFIT_PIPE(intel_crtc->pipe) | PFIT_FILTER_FUZZY;
out:
if ((pfit_control & PFIT_ENABLE) == 0) {
@@ -1883,6 +1882,7 @@ static int pwm_setup_backlight(struct intel_connector *connector,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_panel *panel = &connector->panel;
const char *desc;
+ u32 level, ns;
int retval;
/* Get the right PWM chip for DSI backlight according to VBT */
@@ -1907,8 +1907,12 @@ static int pwm_setup_backlight(struct intel_connector *connector,
*/
pwm_apply_args(panel->backlight.pwm);
- retval = pwm_config(panel->backlight.pwm, CRC_PMIC_PWM_PERIOD_NS,
- CRC_PMIC_PWM_PERIOD_NS);
+ panel->backlight.min = 0; /* 0% */
+ panel->backlight.max = 100; /* 100% */
+ level = intel_panel_compute_brightness(connector, 100);
+ ns = DIV_ROUND_UP(level * CRC_PMIC_PWM_PERIOD_NS, 100);
+
+ retval = pwm_config(panel->backlight.pwm, ns, CRC_PMIC_PWM_PERIOD_NS);
if (retval < 0) {
drm_err(&dev_priv->drm, "Failed to configure the pwm chip\n");
pwm_put(panel->backlight.pwm);
@@ -1916,11 +1920,10 @@ static int pwm_setup_backlight(struct intel_connector *connector,
return retval;
}
- panel->backlight.min = 0; /* 0% */
- panel->backlight.max = 100; /* 100% */
- panel->backlight.level = DIV_ROUND_UP(
- pwm_get_duty_cycle(panel->backlight.pwm) * 100,
- CRC_PMIC_PWM_PERIOD_NS);
+ level = DIV_ROUND_UP(pwm_get_duty_cycle(panel->backlight.pwm) * 100,
+ CRC_PMIC_PWM_PERIOD_NS);
+ panel->backlight.level =
+ intel_panel_compute_brightness(connector, level);
panel->backlight.enabled = panel->backlight.level != 0;
drm_info(&dev_priv->drm, "Using %s PWM for LCD backlight control\n",
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
index 59d7e3cb3445..a9a5df2fee4d 100644
--- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
@@ -441,15 +441,11 @@ display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
return 0;
}
-void intel_display_crc_init(struct drm_i915_private *dev_priv)
+void intel_crtc_crc_init(struct intel_crtc *crtc)
{
- enum pipe pipe;
+ struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
- for_each_pipe(dev_priv, pipe) {
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
-
- spin_lock_init(&pipe_crc->lock);
- }
+ spin_lock_init(&pipe_crc->lock);
}
static int i8xx_crc_source_valid(struct drm_i915_private *dev_priv,
@@ -587,7 +583,8 @@ int intel_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
{
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_pipe_crc *pipe_crc = &intel_crtc->pipe_crc;
enum intel_display_power_domain power_domain;
enum intel_pipe_crc_source source;
intel_wakeref_t wakeref;
@@ -640,7 +637,7 @@ void intel_crtc_enable_pipe_crc(struct intel_crtc *intel_crtc)
{
struct drm_crtc *crtc = &intel_crtc->base;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
+ struct intel_pipe_crc *pipe_crc = &intel_crtc->pipe_crc;
u32 val = 0;
if (!crtc->crc.opened)
@@ -660,7 +657,7 @@ void intel_crtc_disable_pipe_crc(struct intel_crtc *intel_crtc)
{
struct drm_crtc *crtc = &intel_crtc->base;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
+ struct intel_pipe_crc *pipe_crc = &intel_crtc->pipe_crc;
/* Swallow crc's until we stop generating them. */
spin_lock_irq(&pipe_crc->lock);
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.h b/drivers/gpu/drm/i915/display/intel_pipe_crc.h
index db258a756fc6..43012b189415 100644
--- a/drivers/gpu/drm/i915/display/intel_pipe_crc.h
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.h
@@ -13,7 +13,7 @@ struct drm_i915_private;
struct intel_crtc;
#ifdef CONFIG_DEBUG_FS
-void intel_display_crc_init(struct drm_i915_private *dev_priv);
+void intel_crtc_crc_init(struct intel_crtc *crtc);
int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name);
int intel_crtc_verify_crc_source(struct drm_crtc *crtc,
const char *source_name, size_t *values_cnt);
@@ -22,7 +22,7 @@ const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc,
void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc);
void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc);
#else
-static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {}
+static inline void intel_crtc_crc_init(struct intel_crtc *crtc) {}
#define intel_crtc_set_crc_source NULL
#define intel_crtc_verify_crc_source NULL
#define intel_crtc_get_crc_sources NULL
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index b4942b6445ae..fd9b146e3aba 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -304,7 +304,7 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n",
intel_dp->psr_dpcd[0]);
- if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
+ if (drm_dp_has_quirk(&intel_dp->desc, 0, DP_DPCD_QUIRK_NO_PSR)) {
drm_dbg_kms(&dev_priv->drm,
"PSR support not currently available for this panel\n");
return;
@@ -936,10 +936,12 @@ void intel_psr_enable(struct intel_dp *intel_dp,
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- if (!crtc_state->has_psr)
+ if (!CAN_PSR(dev_priv) || dev_priv->psr.dp != intel_dp)
return;
- if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(dev_priv)))
+ dev_priv->psr.force_mode_changed = false;
+
+ if (!crtc_state->has_psr)
return;
drm_WARN_ON(&dev_priv->drm, dev_priv->drrs.dp);
@@ -1099,6 +1101,8 @@ void intel_psr_update(struct intel_dp *intel_dp,
if (!CAN_PSR(dev_priv) || READ_ONCE(psr->dp) != intel_dp)
return;
+ dev_priv->psr.force_mode_changed = false;
+
mutex_lock(&dev_priv->psr.lock);
enable = crtc_state->has_psr && psr_global_enabled(dev_priv);
@@ -1629,7 +1633,7 @@ void intel_psr_atomic_check(struct drm_connector *connector,
struct drm_crtc_state *crtc_state;
if (!CAN_PSR(dev_priv) || !new_state->crtc ||
- dev_priv->psr.initially_probed)
+ !dev_priv->psr.force_mode_changed)
return;
intel_connector = to_intel_connector(connector);
@@ -1640,5 +1644,18 @@ void intel_psr_atomic_check(struct drm_connector *connector,
crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
new_state->crtc);
crtc_state->mode_changed = true;
- dev_priv->psr.initially_probed = true;
+}
+
+void intel_psr_set_force_mode_changed(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv;
+
+ if (!intel_dp)
+ return;
+
+ dev_priv = dp_to_i915(intel_dp);
+ if (!CAN_PSR(dev_priv) || intel_dp != dev_priv->psr.dp)
+ return;
+
+ dev_priv->psr.force_mode_changed = true;
}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index c58a1d438808..274fc6bb6221 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -40,5 +40,6 @@ bool intel_psr_enabled(struct intel_dp *intel_dp);
void intel_psr_atomic_check(struct drm_connector *connector,
struct drm_connector_state *old_state,
struct drm_connector_state *new_state);
+void intel_psr_set_force_mode_changed(struct intel_dp *intel_dp);
#endif /* __INTEL_PSR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
index 9d235d270dac..46beb155d835 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.c
+++ b/drivers/gpu/drm/i915/display/intel_quirks.c
@@ -82,6 +82,16 @@ static const struct intel_dmi_quirk intel_dmi_quirks[] = {
DMI_MATCH(DMI_PRODUCT_NAME, ""),
},
},
+ {
+ .callback = intel_dmi_reverse_brightness,
+ .ident = "Thundersoft TST178 tablet",
+ /* DMI strings are too generic, also match on BIOS date */
+ .matches = {DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "To be filled by O.E.M."),
+ DMI_EXACT_MATCH(DMI_BIOS_DATE, "04/15/2014"),
+ },
+ },
{ } /* terminating entry */
},
.hook = quirk_invert_brightness,
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index a4921b549f8b..637d8fe2f8c2 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -34,7 +34,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_atomic.h"
@@ -2721,6 +2720,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
* Some SDVO devices have one-shot hotplug interrupts.
* Ensure that they get re-enabled when an interrupt happens.
*/
+ intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
intel_encoder->hotplug = intel_sdvo_hotplug;
intel_sdvo_enable_hotplug(intel_encoder);
} else {
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.h b/drivers/gpu/drm/i915/display/intel_sdvo.h
index a66f224aa17d..72065e4360d5 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.h
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.h
@@ -8,8 +8,6 @@
#include <linux/types.h>
-#include <drm/i915_drm.h>
-
#include "i915_reg.h"
struct drm_i915_private;
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index 7abeefe8dce5..deda351719db 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -37,10 +37,10 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_rect.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
+#include "i915_vgpu.h"
#include "intel_atomic_plane.h"
#include "intel_display_types.h"
#include "intel_frontbuffer.h"
@@ -284,6 +284,16 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
bool rotated = drm_rotation_90_or_270(plane_state->hw.rotation);
/*
+ * FIXME hsub/vsub vs. block size is a mess. Pre-tgl CCS
+ * abuses hsub/vsub so we can't use them here. But as they
+ * are limited to 32bpp RGB formats we don't actually need
+ * to check anything.
+ */
+ if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS)
+ return 0;
+
+ /*
* Hardware doesn't handle subpixel coordinates.
* Adjust to (macro)pixel boundary, but be careful not to
* increase the source viewport size, because that could
@@ -297,26 +307,26 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
drm_rect_init(src, src_x << 16, src_y << 16,
src_w << 16, src_h << 16);
- if (!fb->format->is_yuv)
- return 0;
-
- /* YUV specific checks */
- if (!rotated) {
+ if (fb->format->format == DRM_FORMAT_RGB565 && rotated) {
+ hsub = 2;
+ vsub = 2;
+ } else {
hsub = fb->format->hsub;
vsub = fb->format->vsub;
- } else {
- hsub = vsub = max(fb->format->hsub, fb->format->vsub);
}
+ if (rotated)
+ hsub = vsub = max(hsub, vsub);
+
if (src_x % hsub || src_w % hsub) {
- DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of %u for %sYUV planes\n",
- src_x, src_w, hsub, rotated ? "rotated " : "");
+ DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of %u (rotated: %s)\n",
+ src_x, src_w, hsub, yesno(rotated));
return -EINVAL;
}
if (src_y % vsub || src_h % vsub) {
- DRM_DEBUG_KMS("src y/h (%u, %u) must be a multiple of %u for %sYUV planes\n",
- src_y, src_h, vsub, rotated ? "rotated " : "");
+ DRM_DEBUG_KMS("src y/h (%u, %u) must be a multiple of %u (rotated: %s)\n",
+ src_y, src_h, vsub, yesno(rotated));
return -EINVAL;
}
@@ -355,9 +365,8 @@ static int skl_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
- unsigned int pixel_rate = crtc_state->pixel_rate;
- unsigned int src_w, src_h, dst_w, dst_h;
unsigned int num, den;
+ unsigned int pixel_rate = intel_plane_pixel_rate(crtc_state, plane_state);
skl_plane_ratio(crtc_state, plane_state, &num, &den);
@@ -365,17 +374,7 @@ static int skl_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
den *= 2;
- src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
- src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
- dst_w = drm_rect_width(&plane_state->uapi.dst);
- dst_h = drm_rect_height(&plane_state->uapi.dst);
-
- /* Downscaling limits the maximum pixel rate */
- dst_w = min(src_w, dst_w);
- dst_h = min(src_h, dst_h);
-
- return DIV64_U64_ROUND_UP(mul_u32_u32(pixel_rate * num, src_w * src_h),
- mul_u32_u32(den, dst_w * dst_h));
+ return DIV_ROUND_UP(pixel_rate * num, den);
}
static unsigned int
@@ -2077,6 +2076,18 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state,
return 0;
}
+static bool intel_format_is_p01x(u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_P010:
+ case DRM_FORMAT_P012:
+ case DRM_FORMAT_P016:
+ return true;
+ default:
+ return false;
+ }
+}
+
static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
@@ -2155,6 +2166,15 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
return -EINVAL;
}
+ /* Wa_1606054188:tgl */
+ if (IS_TIGERLAKE(dev_priv) &&
+ plane_state->ckey.flags & I915_SET_COLORKEY_SOURCE &&
+ intel_format_is_p01x(fb->format->format)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Source color keying not supported with P01x formats\n");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -3011,7 +3031,6 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
struct intel_plane *plane;
enum drm_plane_type plane_type;
unsigned int supported_rotations;
- unsigned int possible_crtcs;
const u64 *modifiers;
const u32 *formats;
int num_formats;
@@ -3066,10 +3085,8 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
else
plane_type = DRM_PLANE_TYPE_OVERLAY;
- possible_crtcs = BIT(pipe);
-
ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
- possible_crtcs, plane_funcs,
+ 0, plane_funcs,
formats, num_formats, modifiers,
plane_type,
"plane %d%c", plane_id + 1,
@@ -3120,7 +3137,6 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
{
struct intel_plane *plane;
const struct drm_plane_funcs *plane_funcs;
- unsigned long possible_crtcs;
unsigned int supported_rotations;
const u64 *modifiers;
const u32 *formats;
@@ -3205,10 +3221,8 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane->id = PLANE_SPRITE0 + sprite;
plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
- possible_crtcs = BIT(pipe);
-
ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
- possible_crtcs, plane_funcs,
+ 0, plane_funcs,
formats, num_formats, modifiers,
DRM_PLANE_TYPE_OVERLAY,
"sprite %c", sprite_name(pipe, sprite));
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index 4f81ee26b7ab..d2e3a3a323e9 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -33,7 +33,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_connector.h"
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 7cba57ae72fe..95ad87d4ccb3 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -6,8 +6,6 @@
* Manasi Navare <[email protected]>
*/
-#include <drm/i915_drm.h>
-
#include "i915_drv.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index b9f2e3ce5185..f4c362dc6e15 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -1591,59 +1591,6 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = {
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
};
-static enum drm_panel_orientation
-vlv_dsi_get_hw_panel_orientation(struct intel_connector *connector)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_encoder *encoder = intel_attached_encoder(connector);
- enum intel_display_power_domain power_domain;
- enum drm_panel_orientation orientation;
- struct intel_plane *plane;
- struct intel_crtc *crtc;
- intel_wakeref_t wakeref;
- enum pipe pipe;
- u32 val;
-
- if (!encoder->get_hw_state(encoder, &pipe))
- return DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
-
- crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- plane = to_intel_plane(crtc->base.primary);
-
- power_domain = POWER_DOMAIN_PIPE(pipe);
- wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
- if (!wakeref)
- return DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
-
- val = intel_de_read(dev_priv, DSPCNTR(plane->i9xx_plane));
-
- if (!(val & DISPLAY_PLANE_ENABLE))
- orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
- else if (val & DISPPLANE_ROTATE_180)
- orientation = DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
- else
- orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL;
-
- intel_display_power_put(dev_priv, power_domain, wakeref);
-
- return orientation;
-}
-
-static enum drm_panel_orientation
-vlv_dsi_get_panel_orientation(struct intel_connector *connector)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- enum drm_panel_orientation orientation;
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- orientation = vlv_dsi_get_hw_panel_orientation(connector);
- if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
- return orientation;
- }
-
- return intel_dsi_get_panel_orientation(connector);
-}
-
static void vlv_dsi_add_properties(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -1660,10 +1607,9 @@ static void vlv_dsi_add_properties(struct intel_connector *connector)
connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT;
- connector->base.display_info.panel_orientation =
- vlv_dsi_get_panel_orientation(connector);
- drm_connector_init_panel_orientation_property(
+ drm_connector_set_panel_orientation_with_quirk(
&connector->base,
+ intel_dsi_get_panel_orientation(connector),
connector->panel.fixed_mode->hdisplay,
connector->panel.fixed_mode->vdisplay);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
index 81366aa4812b..0598e5382a1d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
@@ -217,7 +217,7 @@ static void clear_pages_worker(struct work_struct *work)
0);
out_request:
if (unlikely(err)) {
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
err = 0;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 3e82739bdbc0..68326ad3b2e0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -67,10 +67,9 @@
#include <linux/log2.h>
#include <linux/nospec.h>
-#include <drm/i915_drm.h>
-
#include "gt/gen6_ppgtt.h"
#include "gt/intel_context.h"
+#include "gt/intel_context_param.h"
#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_engine_user.h"
#include "gt/intel_ring.h"
@@ -243,7 +242,6 @@ static void __free_engines(struct i915_gem_engines *e, unsigned int count)
if (!e->engines[count])
continue;
- RCU_INIT_POINTER(e->engines[count]->gem_context, NULL);
intel_context_put(e->engines[count]);
}
kfree(e);
@@ -256,7 +254,51 @@ static void free_engines(struct i915_gem_engines *e)
static void free_engines_rcu(struct rcu_head *rcu)
{
- free_engines(container_of(rcu, struct i915_gem_engines, rcu));
+ struct i915_gem_engines *engines =
+ container_of(rcu, struct i915_gem_engines, rcu);
+
+ i915_sw_fence_fini(&engines->fence);
+ free_engines(engines);
+}
+
+static int __i915_sw_fence_call
+engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
+ struct i915_gem_engines *engines =
+ container_of(fence, typeof(*engines), fence);
+
+ switch (state) {
+ case FENCE_COMPLETE:
+ if (!list_empty(&engines->link)) {
+ struct i915_gem_context *ctx = engines->ctx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->stale.lock, flags);
+ list_del(&engines->link);
+ spin_unlock_irqrestore(&ctx->stale.lock, flags);
+ }
+ i915_gem_context_put(engines->ctx);
+ break;
+
+ case FENCE_FREE:
+ init_rcu_head(&engines->rcu);
+ call_rcu(&engines->rcu, free_engines_rcu);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct i915_gem_engines *alloc_engines(unsigned int count)
+{
+ struct i915_gem_engines *e;
+
+ e = kzalloc(struct_size(e, engines, count), GFP_KERNEL);
+ if (!e)
+ return NULL;
+
+ i915_sw_fence_init(&e->fence, engines_notify);
+ return e;
}
static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
@@ -266,12 +308,10 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
struct i915_gem_engines *e;
enum intel_engine_id id;
- e = kzalloc(struct_size(e, engines, I915_NUM_ENGINES), GFP_KERNEL);
+ e = alloc_engines(I915_NUM_ENGINES);
if (!e)
return ERR_PTR(-ENOMEM);
- e->ctx = ctx;
-
for_each_engine(engine, gt, id) {
struct intel_context *ce;
@@ -305,7 +345,6 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
list_del(&ctx->link);
spin_unlock(&ctx->i915->gem.contexts.lock);
- free_engines(rcu_access_pointer(ctx->engines));
mutex_destroy(&ctx->engines_mutex);
if (ctx->timeline)
@@ -492,30 +531,75 @@ static void kill_engines(struct i915_gem_engines *engines)
static void kill_stale_engines(struct i915_gem_context *ctx)
{
struct i915_gem_engines *pos, *next;
- unsigned long flags;
- spin_lock_irqsave(&ctx->stale.lock, flags);
+ spin_lock_irq(&ctx->stale.lock);
+ GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
list_for_each_entry_safe(pos, next, &ctx->stale.engines, link) {
- if (!i915_sw_fence_await(&pos->fence))
+ if (!i915_sw_fence_await(&pos->fence)) {
+ list_del_init(&pos->link);
continue;
+ }
- spin_unlock_irqrestore(&ctx->stale.lock, flags);
+ spin_unlock_irq(&ctx->stale.lock);
kill_engines(pos);
- spin_lock_irqsave(&ctx->stale.lock, flags);
+ spin_lock_irq(&ctx->stale.lock);
+ GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence));
list_safe_reset_next(pos, next, link);
list_del_init(&pos->link); /* decouple from FENCE_COMPLETE */
i915_sw_fence_complete(&pos->fence);
}
- spin_unlock_irqrestore(&ctx->stale.lock, flags);
+ spin_unlock_irq(&ctx->stale.lock);
}
static void kill_context(struct i915_gem_context *ctx)
{
kill_stale_engines(ctx);
- kill_engines(__context_engines_static(ctx));
+}
+
+static void engines_idle_release(struct i915_gem_context *ctx,
+ struct i915_gem_engines *engines)
+{
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
+
+ INIT_LIST_HEAD(&engines->link);
+
+ engines->ctx = i915_gem_context_get(ctx);
+
+ for_each_gem_engine(ce, engines, it) {
+ struct dma_fence *fence;
+ int err = 0;
+
+ /* serialises with execbuf */
+ set_bit(CONTEXT_CLOSED_BIT, &ce->flags);
+ if (!intel_context_pin_if_active(ce))
+ continue;
+
+ fence = i915_active_fence_get(&ce->timeline->last_request);
+ if (fence) {
+ err = i915_sw_fence_await_dma_fence(&engines->fence,
+ fence, 0,
+ GFP_KERNEL);
+ dma_fence_put(fence);
+ }
+ intel_context_unpin(ce);
+ if (err < 0)
+ goto kill;
+ }
+
+ spin_lock_irq(&ctx->stale.lock);
+ if (!i915_gem_context_is_closed(ctx))
+ list_add_tail(&engines->link, &ctx->stale.engines);
+ spin_unlock_irq(&ctx->stale.lock);
+
+kill:
+ if (list_empty(&engines->link)) /* raced, already closed */
+ kill_engines(engines);
+
+ i915_sw_fence_commit(&engines->fence);
}
static void set_closed_name(struct i915_gem_context *ctx)
@@ -539,11 +623,16 @@ static void context_close(struct i915_gem_context *ctx)
{
struct i915_address_space *vm;
+ /* Flush any concurrent set_engines() */
+ mutex_lock(&ctx->engines_mutex);
+ engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1));
i915_gem_context_set_closed(ctx);
- set_closed_name(ctx);
+ mutex_unlock(&ctx->engines_mutex);
mutex_lock(&ctx->mutex);
+ set_closed_name(ctx);
+
vm = i915_gem_context_vm(ctx);
if (vm)
i915_vm_close(vm);
@@ -668,23 +757,30 @@ err_free:
return ERR_PTR(err);
}
-static void
+static int
context_apply_all(struct i915_gem_context *ctx,
- void (*fn)(struct intel_context *ce, void *data),
+ int (*fn)(struct intel_context *ce, void *data),
void *data)
{
struct i915_gem_engines_iter it;
struct intel_context *ce;
+ int err = 0;
- for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it)
- fn(ce, data);
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ err = fn(ce, data);
+ if (err)
+ break;
+ }
i915_gem_context_unlock_engines(ctx);
+
+ return err;
}
-static void __apply_ppgtt(struct intel_context *ce, void *vm)
+static int __apply_ppgtt(struct intel_context *ce, void *vm)
{
i915_vm_put(ce->vm);
ce->vm = i915_vm_get(vm);
+ return 0;
}
static struct i915_address_space *
@@ -722,9 +818,10 @@ static void __set_timeline(struct intel_timeline **dst,
intel_timeline_put(old);
}
-static void __apply_timeline(struct intel_context *ce, void *timeline)
+static int __apply_timeline(struct intel_context *ce, void *timeline)
{
__set_timeline(&ce->timeline, timeline);
+ return 0;
}
static void __assign_timeline(struct i915_gem_context *ctx,
@@ -806,6 +903,7 @@ void i915_gem_init__contexts(struct drm_i915_private *i915)
void i915_gem_driver_release__contexts(struct drm_i915_private *i915)
{
flush_work(&i915->gem.contexts.free_work);
+ rcu_barrier(); /* and flush the left over RCU frees */
}
static int gem_context_register(struct i915_gem_context *ctx,
@@ -971,6 +1069,30 @@ static void cb_retire(struct i915_active *base)
kfree(cb);
}
+static inline struct i915_gem_engines *
+__context_engines_await(const struct i915_gem_context *ctx)
+{
+ struct i915_gem_engines *engines;
+
+ rcu_read_lock();
+ do {
+ engines = rcu_dereference(ctx->engines);
+ if (unlikely(!engines))
+ break;
+
+ if (unlikely(!i915_sw_fence_await(&engines->fence)))
+ continue;
+
+ if (likely(engines == rcu_access_pointer(ctx->engines)))
+ break;
+
+ i915_sw_fence_complete(&engines->fence);
+ } while (1);
+ rcu_read_unlock();
+
+ return engines;
+}
+
I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
static int context_barrier_task(struct i915_gem_context *ctx,
intel_engine_mask_t engines,
@@ -981,6 +1103,7 @@ static int context_barrier_task(struct i915_gem_context *ctx,
{
struct context_barrier_task *cb;
struct i915_gem_engines_iter it;
+ struct i915_gem_engines *e;
struct intel_context *ce;
int err = 0;
@@ -997,7 +1120,13 @@ static int context_barrier_task(struct i915_gem_context *ctx,
return err;
}
- for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ e = __context_engines_await(ctx);
+ if (!e) {
+ i915_active_release(&cb->base);
+ return -ENOENT;
+ }
+
+ for_each_gem_engine(ce, e, it) {
struct i915_request *rq;
if (I915_SELFTEST_ONLY(context_barrier_inject_fault &
@@ -1028,7 +1157,7 @@ static int context_barrier_task(struct i915_gem_context *ctx,
if (err)
break;
}
- i915_gem_context_unlock_engines(ctx);
+ i915_sw_fence_complete(&e->fence);
cb->task = err ? NULL : task; /* caller needs to unwind instead */
cb->data = data;
@@ -1215,6 +1344,63 @@ out:
return err;
}
+static int __apply_ringsize(struct intel_context *ce, void *sz)
+{
+ return intel_context_set_ring_size(ce, (unsigned long)sz);
+}
+
+static int set_ringsize(struct i915_gem_context *ctx,
+ struct drm_i915_gem_context_param *args)
+{
+ if (!HAS_LOGICAL_RING_CONTEXTS(ctx->i915))
+ return -ENODEV;
+
+ if (args->size)
+ return -EINVAL;
+
+ if (!IS_ALIGNED(args->value, I915_GTT_PAGE_SIZE))
+ return -EINVAL;
+
+ if (args->value < I915_GTT_PAGE_SIZE)
+ return -EINVAL;
+
+ if (args->value > 128 * I915_GTT_PAGE_SIZE)
+ return -EINVAL;
+
+ return context_apply_all(ctx,
+ __apply_ringsize,
+ __intel_context_ring_size(args->value));
+}
+
+static int __get_ringsize(struct intel_context *ce, void *arg)
+{
+ long sz;
+
+ sz = intel_context_get_ring_size(ce);
+ GEM_BUG_ON(sz > INT_MAX);
+
+ return sz; /* stop on first engine */
+}
+
+static int get_ringsize(struct i915_gem_context *ctx,
+ struct drm_i915_gem_context_param *args)
+{
+ int sz;
+
+ if (!HAS_LOGICAL_RING_CONTEXTS(ctx->i915))
+ return -ENODEV;
+
+ if (args->size)
+ return -EINVAL;
+
+ sz = context_apply_all(ctx, __get_ringsize, NULL);
+ if (sz < 0)
+ return sz;
+
+ args->value = sz;
+ return 0;
+}
+
static int
user_to_context_sseu(struct drm_i915_private *i915,
const struct drm_i915_gem_context_param_sseu *user,
@@ -1390,7 +1576,7 @@ set_engines__load_balance(struct i915_user_extension __user *base, void *data)
if (!HAS_EXECLISTS(i915))
return -ENODEV;
- if (USES_GUC_SUBMISSION(i915))
+ if (intel_uc_uses_guc_submission(&i915->gt.uc))
return -ENODEV; /* not implement yet */
if (get_user(idx, &ext->engine_index))
@@ -1562,77 +1748,6 @@ static const i915_user_extension_fn set_engines__extensions[] = {
[I915_CONTEXT_ENGINES_EXT_BOND] = set_engines__bond,
};
-static int engines_notify(struct i915_sw_fence *fence,
- enum i915_sw_fence_notify state)
-{
- struct i915_gem_engines *engines =
- container_of(fence, typeof(*engines), fence);
-
- switch (state) {
- case FENCE_COMPLETE:
- if (!list_empty(&engines->link)) {
- struct i915_gem_context *ctx = engines->ctx;
- unsigned long flags;
-
- spin_lock_irqsave(&ctx->stale.lock, flags);
- list_del(&engines->link);
- spin_unlock_irqrestore(&ctx->stale.lock, flags);
- }
- break;
-
- case FENCE_FREE:
- init_rcu_head(&engines->rcu);
- call_rcu(&engines->rcu, free_engines_rcu);
- break;
- }
-
- return NOTIFY_DONE;
-}
-
-static void engines_idle_release(struct i915_gem_engines *engines)
-{
- struct i915_gem_engines_iter it;
- struct intel_context *ce;
- unsigned long flags;
-
- GEM_BUG_ON(!engines);
- i915_sw_fence_init(&engines->fence, engines_notify);
-
- INIT_LIST_HEAD(&engines->link);
- spin_lock_irqsave(&engines->ctx->stale.lock, flags);
- if (!i915_gem_context_is_closed(engines->ctx))
- list_add(&engines->link, &engines->ctx->stale.engines);
- spin_unlock_irqrestore(&engines->ctx->stale.lock, flags);
- if (list_empty(&engines->link)) /* raced, already closed */
- goto kill;
-
- for_each_gem_engine(ce, engines, it) {
- struct dma_fence *fence;
- int err;
-
- if (!ce->timeline)
- continue;
-
- fence = i915_active_fence_get(&ce->timeline->last_request);
- if (!fence)
- continue;
-
- err = i915_sw_fence_await_dma_fence(&engines->fence,
- fence, 0,
- GFP_KERNEL);
-
- dma_fence_put(fence);
- if (err < 0)
- goto kill;
- }
- goto out;
-
-kill:
- kill_engines(engines);
-out:
- i915_sw_fence_commit(&engines->fence);
-}
-
static int
set_engines(struct i915_gem_context *ctx,
const struct drm_i915_gem_context_param *args)
@@ -1669,14 +1784,10 @@ set_engines(struct i915_gem_context *ctx,
* first 64 engines defined here.
*/
num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
-
- set.engines = kmalloc(struct_size(set.engines, engines, num_engines),
- GFP_KERNEL);
+ set.engines = alloc_engines(num_engines);
if (!set.engines)
return -ENOMEM;
- set.engines->ctx = ctx;
-
for (n = 0; n < num_engines; n++) {
struct i915_engine_class_instance ci;
struct intel_engine_cs *engine;
@@ -1729,6 +1840,11 @@ set_engines(struct i915_gem_context *ctx,
replace:
mutex_lock(&ctx->engines_mutex);
+ if (i915_gem_context_is_closed(ctx)) {
+ mutex_unlock(&ctx->engines_mutex);
+ free_engines(set.engines);
+ return -ENOENT;
+ }
if (args->size)
i915_gem_context_set_user_engines(ctx);
else
@@ -1737,7 +1853,7 @@ replace:
mutex_unlock(&ctx->engines_mutex);
/* Keep track of old engine sets for kill_context() */
- engines_idle_release(set.engines);
+ engines_idle_release(ctx, set.engines);
return 0;
}
@@ -1748,7 +1864,7 @@ __copy_engines(struct i915_gem_engines *e)
struct i915_gem_engines *copy;
unsigned int n;
- copy = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL);
+ copy = alloc_engines(e->num_engines);
if (!copy)
return ERR_PTR(-ENOMEM);
@@ -1852,17 +1968,19 @@ set_persistence(struct i915_gem_context *ctx,
return __context_set_persistence(ctx, args->value);
}
-static void __apply_priority(struct intel_context *ce, void *arg)
+static int __apply_priority(struct intel_context *ce, void *arg)
{
struct i915_gem_context *ctx = arg;
if (!intel_engine_has_semaphores(ce->engine))
- return;
+ return 0;
if (ctx->sched.priority >= I915_PRIORITY_NORMAL)
intel_context_set_use_semaphores(ce);
else
intel_context_clear_use_semaphores(ce);
+
+ return 0;
}
static int set_priority(struct i915_gem_context *ctx,
@@ -1955,6 +2073,10 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv,
ret = set_persistence(ctx, args);
break;
+ case I915_CONTEXT_PARAM_RINGSIZE:
+ ret = set_ringsize(ctx, args);
+ break;
+
case I915_CONTEXT_PARAM_BAN_PERIOD:
default:
ret = -EINVAL;
@@ -1983,6 +2105,18 @@ static int create_setparam(struct i915_user_extension __user *ext, void *data)
return ctx_setparam(arg->fpriv, arg->ctx, &local.param);
}
+static int copy_ring_size(struct intel_context *dst,
+ struct intel_context *src)
+{
+ long sz;
+
+ sz = intel_context_get_ring_size(src);
+ if (sz < 0)
+ return sz;
+
+ return intel_context_set_ring_size(dst, sz);
+}
+
static int clone_engines(struct i915_gem_context *dst,
struct i915_gem_context *src)
{
@@ -1991,12 +2125,10 @@ static int clone_engines(struct i915_gem_context *dst,
bool user_engines;
unsigned long n;
- clone = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL);
+ clone = alloc_engines(e->num_engines);
if (!clone)
goto err_unlock;
- clone->ctx = dst;
-
for (n = 0; n < e->num_engines; n++) {
struct intel_engine_cs *engine;
@@ -2026,6 +2158,12 @@ static int clone_engines(struct i915_gem_context *dst,
}
intel_context_set_gem(clone->engines[n], dst);
+
+ /* Copy across the preferred ringsize */
+ if (copy_ring_size(clone->engines[n], e->engines[n])) {
+ __free_engines(clone, n + 1);
+ goto err_unlock;
+ }
}
clone->num_engines = n;
@@ -2033,8 +2171,7 @@ static int clone_engines(struct i915_gem_context *dst,
i915_gem_context_unlock_engines(src);
/* Serialised by constructor */
- free_engines(__context_engines_static(dst));
- RCU_INIT_POINTER(dst->engines, clone);
+ engines_idle_release(dst, rcu_replace_pointer(dst->engines, clone, 1));
if (user_engines)
i915_gem_context_set_user_engines(dst);
else
@@ -2388,6 +2525,10 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
args->value = i915_gem_context_is_persistent(ctx);
break;
+ case I915_CONTEXT_PARAM_RINGSIZE:
+ ret = get_ringsize(ctx, args);
+ break;
+
case I915_CONTEXT_PARAM_BAN_PERIOD:
default:
ret = -EINVAL;
@@ -2461,6 +2602,9 @@ i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
const struct i915_gem_engines *e = it->engines;
struct intel_context *ctx;
+ if (unlikely(!e))
+ return NULL;
+
do {
if (it->idx >= e->num_engines)
return NULL;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index 3ae61a355d87..f1d884d304bd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -192,12 +192,16 @@ i915_gem_context_unlock_engines(struct i915_gem_context *ctx)
static inline struct intel_context *
i915_gem_context_get_engine(struct i915_gem_context *ctx, unsigned int idx)
{
- struct intel_context *ce = ERR_PTR(-EINVAL);
+ struct intel_context *ce;
rcu_read_lock(); {
struct i915_gem_engines *e = rcu_dereference(ctx->engines);
- if (likely(idx < e->num_engines && e->engines[idx]))
+ if (unlikely(!e)) /* context was closed! */
+ ce = ERR_PTR(-ENOENT);
+ else if (likely(idx < e->num_engines && e->engines[idx]))
ce = intel_context_get(e->engines[idx]);
+ else
+ ce = ERR_PTR(-EINVAL);
} rcu_read_unlock();
return ce;
@@ -207,7 +211,6 @@ static inline void
i915_gem_engines_iter_init(struct i915_gem_engines_iter *it,
struct i915_gem_engines *engines)
{
- GEM_BUG_ON(!engines);
it->engines = engines;
it->idx = 0;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 87fa5f42c39a..36d069504836 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -10,7 +10,6 @@
#include <linux/uaccess.h>
#include <drm/drm_syncobj.h>
-#include <drm/i915_drm.h>
#include "display/intel_frontbuffer.h"
@@ -28,6 +27,19 @@
#include "i915_sw_fence_work.h"
#include "i915_trace.h"
+struct eb_vma {
+ struct i915_vma *vma;
+ unsigned int flags;
+
+ /** This vma's place in the execbuf reservation list */
+ struct drm_i915_gem_exec_object2 *exec;
+ struct list_head bind_link;
+ struct list_head reloc_link;
+
+ struct hlist_node node;
+ u32 handle;
+};
+
enum {
FORCE_CPU_RELOC = 1,
FORCE_GTT_RELOC,
@@ -35,17 +47,15 @@ enum {
#define DBG_FORCE_RELOC 0 /* choose one of the above! */
};
-#define __EXEC_OBJECT_HAS_REF BIT(31)
-#define __EXEC_OBJECT_HAS_PIN BIT(30)
-#define __EXEC_OBJECT_HAS_FENCE BIT(29)
-#define __EXEC_OBJECT_NEEDS_MAP BIT(28)
-#define __EXEC_OBJECT_NEEDS_BIAS BIT(27)
-#define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 27) /* all of the above */
+#define __EXEC_OBJECT_HAS_PIN BIT(31)
+#define __EXEC_OBJECT_HAS_FENCE BIT(30)
+#define __EXEC_OBJECT_NEEDS_MAP BIT(29)
+#define __EXEC_OBJECT_NEEDS_BIAS BIT(28)
+#define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 28) /* all of the above */
#define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE)
#define __EXEC_HAS_RELOC BIT(31)
-#define __EXEC_VALIDATED BIT(30)
-#define __EXEC_INTERNAL_FLAGS (~0u << 30)
+#define __EXEC_INTERNAL_FLAGS (~0u << 31)
#define UPDATE PIN_OFFSET_FIXED
#define BATCH_OFFSET_BIAS (256*1024)
@@ -220,15 +230,14 @@ struct i915_execbuffer {
struct drm_file *file; /** per-file lookup tables and limits */
struct drm_i915_gem_execbuffer2 *args; /** ioctl parameters */
struct drm_i915_gem_exec_object2 *exec; /** ioctl execobj[] */
- struct i915_vma **vma;
- unsigned int *flags;
+ struct eb_vma *vma;
struct intel_engine_cs *engine; /** engine to queue the request to */
struct intel_context *context; /* logical state for the request */
struct i915_gem_context *gem_context; /** caller's context */
struct i915_request *request; /** our request to build */
- struct i915_vma *batch; /** identity of the batch obj/vma */
+ struct eb_vma *batch; /** identity of the batch obj/vma */
struct i915_vma *trampoline; /** trampoline used for chaining */
/** actual size of execobj[] as we may extend it for the cmdparser */
@@ -276,8 +285,6 @@ struct i915_execbuffer {
struct hlist_head *buckets; /** ht for relocation handles */
};
-#define exec_entry(EB, VMA) (&(EB)->exec[(VMA)->exec_flags - (EB)->flags])
-
static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
{
return intel_engine_requires_cmd_parser(eb->engine) ||
@@ -364,9 +371,9 @@ eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
static inline bool
eb_pin_vma(struct i915_execbuffer *eb,
const struct drm_i915_gem_exec_object2 *entry,
- struct i915_vma *vma)
+ struct eb_vma *ev)
{
- unsigned int exec_flags = *vma->exec_flags;
+ struct i915_vma *vma = ev->vma;
u64 pin_flags;
if (vma->node.size)
@@ -375,24 +382,24 @@ eb_pin_vma(struct i915_execbuffer *eb,
pin_flags = entry->offset & PIN_OFFSET_MASK;
pin_flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED;
- if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_GTT))
+ if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_GTT))
pin_flags |= PIN_GLOBAL;
if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags)))
return false;
- if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
+ if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) {
if (unlikely(i915_vma_pin_fence(vma))) {
i915_vma_unpin(vma);
return false;
}
if (vma->fence)
- exec_flags |= __EXEC_OBJECT_HAS_FENCE;
+ ev->flags |= __EXEC_OBJECT_HAS_FENCE;
}
- *vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
- return !eb_vma_misplaced(entry, vma, exec_flags);
+ ev->flags |= __EXEC_OBJECT_HAS_PIN;
+ return !eb_vma_misplaced(entry, vma, ev->flags);
}
static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags)
@@ -406,13 +413,13 @@ static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags)
}
static inline void
-eb_unreserve_vma(struct i915_vma *vma, unsigned int *flags)
+eb_unreserve_vma(struct eb_vma *ev)
{
- if (!(*flags & __EXEC_OBJECT_HAS_PIN))
+ if (!(ev->flags & __EXEC_OBJECT_HAS_PIN))
return;
- __eb_unreserve_vma(vma, *flags);
- *flags &= ~__EXEC_OBJECT_RESERVED;
+ __eb_unreserve_vma(ev->vma, ev->flags);
+ ev->flags &= ~__EXEC_OBJECT_RESERVED;
}
static int
@@ -420,11 +427,11 @@ eb_validate_vma(struct i915_execbuffer *eb,
struct drm_i915_gem_exec_object2 *entry,
struct i915_vma *vma)
{
- struct drm_i915_private *i915 = eb->i915;
if (unlikely(entry->flags & eb->invalid_flags))
return -EINVAL;
- if (unlikely(entry->alignment && !is_power_of_2(entry->alignment)))
+ if (unlikely(entry->alignment &&
+ !is_power_of_2_u64(entry->alignment)))
return -EINVAL;
/*
@@ -442,14 +449,6 @@ eb_validate_vma(struct i915_execbuffer *eb,
} else {
entry->pad_to_size = 0;
}
-
- if (unlikely(vma->exec_flags)) {
- drm_dbg(&i915->drm,
- "Object [handle %d, index %d] appears more than once in object list\n",
- entry->handle, (int)(entry - eb->exec));
- return -EINVAL;
- }
-
/*
* From drm_mm perspective address space is continuous,
* so from this point we're always using non-canonical
@@ -472,41 +471,29 @@ eb_validate_vma(struct i915_execbuffer *eb,
return 0;
}
-static int
+static void
eb_add_vma(struct i915_execbuffer *eb,
unsigned int i, unsigned batch_idx,
struct i915_vma *vma)
{
struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
- int err;
+ struct eb_vma *ev = &eb->vma[i];
GEM_BUG_ON(i915_vma_is_closed(vma));
- if (!(eb->args->flags & __EXEC_VALIDATED)) {
- err = eb_validate_vma(eb, entry, vma);
- if (unlikely(err))
- return err;
- }
+ ev->vma = i915_vma_get(vma);
+ ev->exec = entry;
+ ev->flags = entry->flags;
if (eb->lut_size > 0) {
- vma->exec_handle = entry->handle;
- hlist_add_head(&vma->exec_node,
+ ev->handle = entry->handle;
+ hlist_add_head(&ev->node,
&eb->buckets[hash_32(entry->handle,
eb->lut_size)]);
}
if (entry->relocation_count)
- list_add_tail(&vma->reloc_link, &eb->relocs);
-
- /*
- * Stash a pointer from the vma to execobj, so we can query its flags,
- * size, alignment etc as provided by the user. Also we stash a pointer
- * to the vma inside the execobj so that we can use a direct lookup
- * to find the right target VMA when doing relocations.
- */
- eb->vma[i] = vma;
- eb->flags[i] = entry->flags;
- vma->exec_flags = &eb->flags[i];
+ list_add_tail(&ev->reloc_link, &eb->relocs);
/*
* SNA is doing fancy tricks with compressing batch buffers, which leads
@@ -519,30 +506,23 @@ eb_add_vma(struct i915_execbuffer *eb,
*/
if (i == batch_idx) {
if (entry->relocation_count &&
- !(eb->flags[i] & EXEC_OBJECT_PINNED))
- eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS;
+ !(ev->flags & EXEC_OBJECT_PINNED))
+ ev->flags |= __EXEC_OBJECT_NEEDS_BIAS;
if (eb->reloc_cache.has_fence)
- eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE;
+ ev->flags |= EXEC_OBJECT_NEEDS_FENCE;
- eb->batch = vma;
+ eb->batch = ev;
}
- err = 0;
- if (eb_pin_vma(eb, entry, vma)) {
+ if (eb_pin_vma(eb, entry, ev)) {
if (entry->offset != vma->node.start) {
entry->offset = vma->node.start | UPDATE;
eb->args->flags |= __EXEC_HAS_RELOC;
}
} else {
- eb_unreserve_vma(vma, vma->exec_flags);
-
- list_add_tail(&vma->exec_link, &eb->unbound);
- if (drm_mm_node_allocated(&vma->node))
- err = i915_vma_unbind(vma);
- if (unlikely(err))
- vma->exec_flags = NULL;
+ eb_unreserve_vma(ev);
+ list_add_tail(&ev->bind_link, &eb->unbound);
}
- return err;
}
static inline int use_cpu_reloc(const struct reloc_cache *cache,
@@ -563,14 +543,14 @@ static inline int use_cpu_reloc(const struct reloc_cache *cache,
}
static int eb_reserve_vma(const struct i915_execbuffer *eb,
- struct i915_vma *vma)
+ struct eb_vma *ev,
+ u64 pin_flags)
{
- struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
- unsigned int exec_flags = *vma->exec_flags;
- u64 pin_flags;
+ struct drm_i915_gem_exec_object2 *entry = ev->exec;
+ unsigned int exec_flags = ev->flags;
+ struct i915_vma *vma = ev->vma;
int err;
- pin_flags = PIN_USER | PIN_NONBLOCK;
if (exec_flags & EXEC_OBJECT_NEEDS_GTT)
pin_flags |= PIN_GLOBAL;
@@ -584,11 +564,16 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
if (exec_flags & __EXEC_OBJECT_NEEDS_MAP)
pin_flags |= PIN_MAPPABLE;
- if (exec_flags & EXEC_OBJECT_PINNED) {
+ if (exec_flags & EXEC_OBJECT_PINNED)
pin_flags |= entry->offset | PIN_OFFSET_FIXED;
- pin_flags &= ~PIN_NONBLOCK; /* force overlapping checks */
- } else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS) {
+ else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS)
pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
+
+ if (drm_mm_node_allocated(&vma->node) &&
+ eb_vma_misplaced(entry, vma, ev->flags)) {
+ err = i915_vma_unbind(vma);
+ if (err)
+ return err;
}
err = i915_vma_pin(vma,
@@ -613,8 +598,8 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
exec_flags |= __EXEC_OBJECT_HAS_FENCE;
}
- *vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
- GEM_BUG_ON(eb_vma_misplaced(entry, vma, exec_flags));
+ ev->flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
+ GEM_BUG_ON(eb_vma_misplaced(entry, vma, ev->flags));
return 0;
}
@@ -622,10 +607,11 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
static int eb_reserve(struct i915_execbuffer *eb)
{
const unsigned int count = eb->buffer_count;
+ unsigned int pin_flags = PIN_USER | PIN_NONBLOCK;
struct list_head last;
- struct i915_vma *vma;
+ struct eb_vma *ev;
unsigned int i, pass;
- int err;
+ int err = 0;
/*
* Attempt to pin all of the buffers into the GTT.
@@ -641,44 +627,54 @@ static int eb_reserve(struct i915_execbuffer *eb)
* room for the earlier objects *unless* we need to defragment.
*/
+ if (mutex_lock_interruptible(&eb->i915->drm.struct_mutex))
+ return -EINTR;
+
pass = 0;
- err = 0;
do {
- list_for_each_entry(vma, &eb->unbound, exec_link) {
- err = eb_reserve_vma(eb, vma);
+ list_for_each_entry(ev, &eb->unbound, bind_link) {
+ err = eb_reserve_vma(eb, ev, pin_flags);
if (err)
break;
}
- if (err != -ENOSPC)
- return err;
+ if (!(err == -ENOSPC || err == -EAGAIN))
+ break;
/* Resort *all* the objects into priority order */
INIT_LIST_HEAD(&eb->unbound);
INIT_LIST_HEAD(&last);
for (i = 0; i < count; i++) {
- unsigned int flags = eb->flags[i];
- struct i915_vma *vma = eb->vma[i];
+ unsigned int flags;
+ ev = &eb->vma[i];
+ flags = ev->flags;
if (flags & EXEC_OBJECT_PINNED &&
flags & __EXEC_OBJECT_HAS_PIN)
continue;
- eb_unreserve_vma(vma, &eb->flags[i]);
+ eb_unreserve_vma(ev);
if (flags & EXEC_OBJECT_PINNED)
/* Pinned must have their slot */
- list_add(&vma->exec_link, &eb->unbound);
+ list_add(&ev->bind_link, &eb->unbound);
else if (flags & __EXEC_OBJECT_NEEDS_MAP)
/* Map require the lowest 256MiB (aperture) */
- list_add_tail(&vma->exec_link, &eb->unbound);
+ list_add_tail(&ev->bind_link, &eb->unbound);
else if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
/* Prioritise 4GiB region for restricted bo */
- list_add(&vma->exec_link, &last);
+ list_add(&ev->bind_link, &last);
else
- list_add_tail(&vma->exec_link, &last);
+ list_add_tail(&ev->bind_link, &last);
}
list_splice_tail(&last, &eb->unbound);
+ if (err == -EAGAIN) {
+ mutex_unlock(&eb->i915->drm.struct_mutex);
+ flush_workqueue(eb->i915->mm.userptr_wq);
+ mutex_lock(&eb->i915->drm.struct_mutex);
+ continue;
+ }
+
switch (pass++) {
case 0:
break;
@@ -689,13 +685,20 @@ static int eb_reserve(struct i915_execbuffer *eb)
err = i915_gem_evict_vm(eb->context->vm);
mutex_unlock(&eb->context->vm->mutex);
if (err)
- return err;
+ goto unlock;
break;
default:
- return -ENOSPC;
+ err = -ENOSPC;
+ goto unlock;
}
+
+ pin_flags = PIN_USER;
} while (1);
+
+unlock:
+ mutex_unlock(&eb->i915->drm.struct_mutex);
+ return err;
}
static unsigned int eb_batch_index(const struct i915_execbuffer *eb)
@@ -732,17 +735,14 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
unsigned int i, batch;
int err;
+ if (unlikely(i915_gem_context_is_closed(eb->gem_context)))
+ return -ENOENT;
+
INIT_LIST_HEAD(&eb->relocs);
INIT_LIST_HEAD(&eb->unbound);
batch = eb_batch_index(eb);
- mutex_lock(&eb->gem_context->mutex);
- if (unlikely(i915_gem_context_is_closed(eb->gem_context))) {
- err = -ENOENT;
- goto err_ctx;
- }
-
for (i = 0; i < eb->buffer_count; i++) {
u32 handle = eb->exec[i].handle;
struct i915_lut_handle *lut;
@@ -787,45 +787,37 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
i915_gem_object_unlock(obj);
add_vma:
- err = eb_add_vma(eb, i, batch, vma);
+ err = eb_validate_vma(eb, &eb->exec[i], vma);
if (unlikely(err))
goto err_vma;
- GEM_BUG_ON(vma != eb->vma[i]);
- GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
- GEM_BUG_ON(drm_mm_node_allocated(&vma->node) &&
- eb_vma_misplaced(&eb->exec[i], vma, eb->flags[i]));
+ eb_add_vma(eb, i, batch, vma);
}
- mutex_unlock(&eb->gem_context->mutex);
-
- eb->args->flags |= __EXEC_VALIDATED;
- return eb_reserve(eb);
+ return 0;
err_obj:
i915_gem_object_put(obj);
err_vma:
- eb->vma[i] = NULL;
-err_ctx:
- mutex_unlock(&eb->gem_context->mutex);
+ eb->vma[i].vma = NULL;
return err;
}
-static struct i915_vma *
+static struct eb_vma *
eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle)
{
if (eb->lut_size < 0) {
if (handle >= -eb->lut_size)
return NULL;
- return eb->vma[handle];
+ return &eb->vma[handle];
} else {
struct hlist_head *head;
- struct i915_vma *vma;
+ struct eb_vma *ev;
head = &eb->buckets[hash_32(handle, eb->lut_size)];
- hlist_for_each_entry(vma, head, exec_node) {
- if (vma->exec_handle == handle)
- return vma;
+ hlist_for_each_entry(ev, head, node) {
+ if (ev->handle == handle)
+ return ev;
}
return NULL;
}
@@ -837,32 +829,21 @@ static void eb_release_vmas(const struct i915_execbuffer *eb)
unsigned int i;
for (i = 0; i < count; i++) {
- struct i915_vma *vma = eb->vma[i];
- unsigned int flags = eb->flags[i];
+ struct eb_vma *ev = &eb->vma[i];
+ struct i915_vma *vma = ev->vma;
if (!vma)
break;
- GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
- vma->exec_flags = NULL;
- eb->vma[i] = NULL;
+ eb->vma[i].vma = NULL;
- if (flags & __EXEC_OBJECT_HAS_PIN)
- __eb_unreserve_vma(vma, flags);
+ if (ev->flags & __EXEC_OBJECT_HAS_PIN)
+ __eb_unreserve_vma(vma, ev->flags);
- if (flags & __EXEC_OBJECT_HAS_REF)
- i915_vma_put(vma);
+ i915_vma_put(vma);
}
}
-static void eb_reset_vmas(const struct i915_execbuffer *eb)
-{
- eb_release_vmas(eb);
- if (eb->lut_size > 0)
- memset(eb->buckets, 0,
- sizeof(struct hlist_head) << eb->lut_size);
-}
-
static void eb_destroy(const struct i915_execbuffer *eb)
{
GEM_BUG_ON(eb->reloc_cache.rq);
@@ -1198,7 +1179,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
goto out_pool;
skip_request:
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
err_request:
i915_request_add(rq);
err_unpin:
@@ -1329,11 +1310,11 @@ out:
static u64
eb_relocate_entry(struct i915_execbuffer *eb,
- struct i915_vma *vma,
+ struct eb_vma *ev,
const struct drm_i915_gem_relocation_entry *reloc)
{
struct drm_i915_private *i915 = eb->i915;
- struct i915_vma *target;
+ struct eb_vma *target;
int err;
/* we've already hold a reference to all valid objects */
@@ -1365,7 +1346,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
}
if (reloc->write_domain) {
- *target->exec_flags |= EXEC_OBJECT_WRITE;
+ target->flags |= EXEC_OBJECT_WRITE;
/*
* Sandybridge PPGTT errata: We need a global gtt mapping
@@ -1375,7 +1356,8 @@ eb_relocate_entry(struct i915_execbuffer *eb,
*/
if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
IS_GEN(eb->i915, 6)) {
- err = i915_vma_bind(target, target->obj->cache_level,
+ err = i915_vma_bind(target->vma,
+ target->vma->obj->cache_level,
PIN_GLOBAL, NULL);
if (WARN_ONCE(err,
"Unexpected failure to bind target VMA!"))
@@ -1388,17 +1370,17 @@ eb_relocate_entry(struct i915_execbuffer *eb,
* more work needs to be done.
*/
if (!DBG_FORCE_RELOC &&
- gen8_canonical_addr(target->node.start) == reloc->presumed_offset)
+ gen8_canonical_addr(target->vma->node.start) == reloc->presumed_offset)
return 0;
/* Check that the relocation address is valid... */
if (unlikely(reloc->offset >
- vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) {
+ ev->vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) {
drm_dbg(&i915->drm, "Relocation beyond object bounds: "
"target %d offset %d size %d.\n",
reloc->target_handle,
(int)reloc->offset,
- (int)vma->size);
+ (int)ev->vma->size);
return -EINVAL;
}
if (unlikely(reloc->offset & 3)) {
@@ -1417,18 +1399,18 @@ eb_relocate_entry(struct i915_execbuffer *eb,
* do relocations we are already stalling, disable the user's opt
* out of our synchronisation.
*/
- *vma->exec_flags &= ~EXEC_OBJECT_ASYNC;
+ ev->flags &= ~EXEC_OBJECT_ASYNC;
/* and update the user's relocation entry */
- return relocate_entry(vma, reloc, eb, target);
+ return relocate_entry(ev->vma, reloc, eb, target->vma);
}
-static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
+static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
{
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
struct drm_i915_gem_relocation_entry stack[N_RELOC(512)];
struct drm_i915_gem_relocation_entry __user *urelocs;
- const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
+ const struct drm_i915_gem_exec_object2 *entry = ev->exec;
unsigned int remain;
urelocs = u64_to_user_ptr(entry->relocs_ptr);
@@ -1458,9 +1440,7 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
* we would try to acquire the struct mutex again. Obviously
* this is bad and so lockdep complains vehemently.
*/
- pagefault_disable();
- copied = __copy_from_user_inatomic(r, urelocs, count * sizeof(r[0]));
- pagefault_enable();
+ copied = __copy_from_user(r, urelocs, count * sizeof(r[0]));
if (unlikely(copied)) {
remain = -EFAULT;
goto out;
@@ -1468,7 +1448,7 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
remain -= count;
do {
- u64 offset = eb_relocate_entry(eb, vma, r);
+ u64 offset = eb_relocate_entry(eb, ev, r);
if (likely(offset == 0)) {
} else if ((s64)offset < 0) {
@@ -1510,278 +1490,34 @@ out:
return remain;
}
-static int
-eb_relocate_vma_slow(struct i915_execbuffer *eb, struct i915_vma *vma)
-{
- const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
- struct drm_i915_gem_relocation_entry *relocs =
- u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
- unsigned int i;
- int err;
-
- for (i = 0; i < entry->relocation_count; i++) {
- u64 offset = eb_relocate_entry(eb, vma, &relocs[i]);
-
- if ((s64)offset < 0) {
- err = (int)offset;
- goto err;
- }
- }
- err = 0;
-err:
- reloc_cache_reset(&eb->reloc_cache);
- return err;
-}
-
-static int check_relocations(const struct drm_i915_gem_exec_object2 *entry)
-{
- const char __user *addr, *end;
- unsigned long size;
- char __maybe_unused c;
-
- size = entry->relocation_count;
- if (size == 0)
- return 0;
-
- if (size > N_RELOC(ULONG_MAX))
- return -EINVAL;
-
- addr = u64_to_user_ptr(entry->relocs_ptr);
- size *= sizeof(struct drm_i915_gem_relocation_entry);
- if (!access_ok(addr, size))
- return -EFAULT;
-
- end = addr + size;
- for (; addr < end; addr += PAGE_SIZE) {
- int err = __get_user(c, addr);
- if (err)
- return err;
- }
- return __get_user(c, end - 1);
-}
-
-static int eb_copy_relocations(const struct i915_execbuffer *eb)
+static int eb_relocate(struct i915_execbuffer *eb)
{
- struct drm_i915_gem_relocation_entry *relocs;
- const unsigned int count = eb->buffer_count;
- unsigned int i;
int err;
- for (i = 0; i < count; i++) {
- const unsigned int nreloc = eb->exec[i].relocation_count;
- struct drm_i915_gem_relocation_entry __user *urelocs;
- unsigned long size;
- unsigned long copied;
-
- if (nreloc == 0)
- continue;
-
- err = check_relocations(&eb->exec[i]);
- if (err)
- goto err;
-
- urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr);
- size = nreloc * sizeof(*relocs);
-
- relocs = kvmalloc_array(size, 1, GFP_KERNEL);
- if (!relocs) {
- err = -ENOMEM;
- goto err;
- }
-
- /* copy_from_user is limited to < 4GiB */
- copied = 0;
- do {
- unsigned int len =
- min_t(u64, BIT_ULL(31), size - copied);
-
- if (__copy_from_user((char *)relocs + copied,
- (char __user *)urelocs + copied,
- len))
- goto end;
-
- copied += len;
- } while (copied < size);
-
- /*
- * As we do not update the known relocation offsets after
- * relocating (due to the complexities in lock handling),
- * we need to mark them as invalid now so that we force the
- * relocation processing next time. Just in case the target
- * object is evicted and then rebound into its old
- * presumed_offset before the next execbuffer - if that
- * happened we would make the mistake of assuming that the
- * relocations were valid.
- */
- if (!user_access_begin(urelocs, size))
- goto end;
-
- for (copied = 0; copied < nreloc; copied++)
- unsafe_put_user(-1,
- &urelocs[copied].presumed_offset,
- end_user);
- user_access_end();
-
- eb->exec[i].relocs_ptr = (uintptr_t)relocs;
- }
-
- return 0;
-
-end_user:
- user_access_end();
-end:
- kvfree(relocs);
- err = -EFAULT;
-err:
- while (i--) {
- relocs = u64_to_ptr(typeof(*relocs), eb->exec[i].relocs_ptr);
- if (eb->exec[i].relocation_count)
- kvfree(relocs);
- }
- return err;
-}
-
-static int eb_prefault_relocations(const struct i915_execbuffer *eb)
-{
- const unsigned int count = eb->buffer_count;
- unsigned int i;
-
- for (i = 0; i < count; i++) {
- int err;
-
- err = check_relocations(&eb->exec[i]);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static noinline int eb_relocate_slow(struct i915_execbuffer *eb)
-{
- struct drm_device *dev = &eb->i915->drm;
- bool have_copy = false;
- struct i915_vma *vma;
- int err = 0;
-
-repeat:
- if (signal_pending(current)) {
- err = -ERESTARTSYS;
- goto out;
- }
-
- /* We may process another execbuffer during the unlock... */
- eb_reset_vmas(eb);
- mutex_unlock(&dev->struct_mutex);
-
- /*
- * We take 3 passes through the slowpatch.
- *
- * 1 - we try to just prefault all the user relocation entries and
- * then attempt to reuse the atomic pagefault disabled fast path again.
- *
- * 2 - we copy the user entries to a local buffer here outside of the
- * local and allow ourselves to wait upon any rendering before
- * relocations
- *
- * 3 - we already have a local copy of the relocation entries, but
- * were interrupted (EAGAIN) whilst waiting for the objects, try again.
- */
- if (!err) {
- err = eb_prefault_relocations(eb);
- } else if (!have_copy) {
- err = eb_copy_relocations(eb);
- have_copy = err == 0;
- } else {
- cond_resched();
- err = 0;
- }
- if (err) {
- mutex_lock(&dev->struct_mutex);
- goto out;
- }
-
- /* A frequent cause for EAGAIN are currently unavailable client pages */
- flush_workqueue(eb->i915->mm.userptr_wq);
-
- err = i915_mutex_lock_interruptible(dev);
- if (err) {
- mutex_lock(&dev->struct_mutex);
- goto out;
- }
-
- /* reacquire the objects */
+ mutex_lock(&eb->gem_context->mutex);
err = eb_lookup_vmas(eb);
+ mutex_unlock(&eb->gem_context->mutex);
if (err)
- goto err;
-
- GEM_BUG_ON(!eb->batch);
-
- list_for_each_entry(vma, &eb->relocs, reloc_link) {
- if (!have_copy) {
- pagefault_disable();
- err = eb_relocate_vma(eb, vma);
- pagefault_enable();
- if (err)
- goto repeat;
- } else {
- err = eb_relocate_vma_slow(eb, vma);
- if (err)
- goto err;
- }
- }
-
- /*
- * Leave the user relocations as are, this is the painfully slow path,
- * and we want to avoid the complication of dropping the lock whilst
- * having buffers reserved in the aperture and so causing spurious
- * ENOSPC for random operations.
- */
-
-err:
- if (err == -EAGAIN)
- goto repeat;
-
-out:
- if (have_copy) {
- const unsigned int count = eb->buffer_count;
- unsigned int i;
-
- for (i = 0; i < count; i++) {
- const struct drm_i915_gem_exec_object2 *entry =
- &eb->exec[i];
- struct drm_i915_gem_relocation_entry *relocs;
-
- if (!entry->relocation_count)
- continue;
+ return err;
- relocs = u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
- kvfree(relocs);
- }
+ if (!list_empty(&eb->unbound)) {
+ err = eb_reserve(eb);
+ if (err)
+ return err;
}
- return err;
-}
-
-static int eb_relocate(struct i915_execbuffer *eb)
-{
- if (eb_lookup_vmas(eb))
- goto slow;
-
/* The objects are in their final locations, apply the relocations. */
if (eb->args->flags & __EXEC_HAS_RELOC) {
- struct i915_vma *vma;
+ struct eb_vma *ev;
- list_for_each_entry(vma, &eb->relocs, reloc_link) {
- if (eb_relocate_vma(eb, vma))
- goto slow;
+ list_for_each_entry(ev, &eb->relocs, reloc_link) {
+ err = eb_relocate_vma(eb, ev);
+ if (err)
+ return err;
}
}
return 0;
-
-slow:
- return eb_relocate_slow(eb);
}
static int eb_move_to_gpu(struct i915_execbuffer *eb)
@@ -1794,27 +1530,19 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
ww_acquire_init(&acquire, &reservation_ww_class);
for (i = 0; i < count; i++) {
- struct i915_vma *vma = eb->vma[i];
+ struct eb_vma *ev = &eb->vma[i];
+ struct i915_vma *vma = ev->vma;
err = ww_mutex_lock_interruptible(&vma->resv->lock, &acquire);
- if (!err)
- continue;
-
- GEM_BUG_ON(err == -EALREADY); /* No duplicate vma */
-
if (err == -EDEADLK) {
GEM_BUG_ON(i == 0);
do {
int j = i - 1;
- ww_mutex_unlock(&eb->vma[j]->resv->lock);
+ ww_mutex_unlock(&eb->vma[j].vma->resv->lock);
- swap(eb->flags[i], eb->flags[j]);
swap(eb->vma[i], eb->vma[j]);
- eb->vma[i]->exec_flags = &eb->flags[i];
} while (--i);
- GEM_BUG_ON(vma != eb->vma[0]);
- vma->exec_flags = &eb->flags[0];
err = ww_mutex_lock_slow_interruptible(&vma->resv->lock,
&acquire);
@@ -1825,8 +1553,9 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
ww_acquire_done(&acquire);
while (i--) {
- unsigned int flags = eb->flags[i];
- struct i915_vma *vma = eb->vma[i];
+ struct eb_vma *ev = &eb->vma[i];
+ struct i915_vma *vma = ev->vma;
+ unsigned int flags = ev->flags;
struct drm_i915_gem_object *obj = vma->obj;
assert_vma_held(vma);
@@ -1870,10 +1599,9 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
i915_vma_unlock(vma);
__eb_unreserve_vma(vma, flags);
- vma->exec_flags = NULL;
+ i915_vma_put(vma);
- if (unlikely(flags & __EXEC_OBJECT_HAS_REF))
- i915_vma_put(vma);
+ ev->vma = NULL;
}
ww_acquire_fini(&acquire);
@@ -1887,7 +1615,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
return 0;
err_skip:
- i915_request_skip(eb->request, err);
+ i915_request_set_error_once(eb->request, err);
return err;
}
@@ -2008,7 +1736,7 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
if (!pw)
return -ENOMEM;
- err = i915_active_acquire(&eb->batch->active);
+ err = i915_active_acquire(&eb->batch->vma->active);
if (err)
goto err_free;
@@ -2025,7 +1753,7 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
dma_fence_work_init(&pw->base, &eb_parse_ops);
pw->engine = eb->engine;
- pw->batch = eb->batch;
+ pw->batch = eb->batch->vma;
pw->batch_offset = eb->batch_start_offset;
pw->batch_length = eb->batch_len;
pw->shadow = shadow;
@@ -2067,7 +1795,7 @@ err_trampoline:
err_shadow:
i915_active_release(&shadow->active);
err_batch:
- i915_active_release(&eb->batch->active);
+ i915_active_release(&eb->batch->vma->active);
err_free:
kfree(pw);
return err;
@@ -2130,15 +1858,12 @@ static int eb_parse(struct i915_execbuffer *eb)
if (err)
goto err_trampoline;
- eb->vma[eb->buffer_count] = i915_vma_get(shadow);
- eb->flags[eb->buffer_count] =
- __EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF;
- shadow->exec_flags = &eb->flags[eb->buffer_count];
- eb->buffer_count++;
+ eb->vma[eb->buffer_count].vma = i915_vma_get(shadow);
+ eb->vma[eb->buffer_count].flags = __EXEC_OBJECT_HAS_PIN;
+ eb->batch = &eb->vma[eb->buffer_count++];
eb->trampoline = trampoline;
eb->batch_start_offset = 0;
- eb->batch = shadow;
shadow->private = pool;
return 0;
@@ -2165,7 +1890,7 @@ add_to_client(struct i915_request *rq, struct drm_file *file)
spin_unlock(&file_priv->mm.lock);
}
-static int eb_submit(struct i915_execbuffer *eb)
+static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch)
{
int err;
@@ -2192,7 +1917,7 @@ static int eb_submit(struct i915_execbuffer *eb)
}
err = eb->engine->emit_bb_start(eb->request,
- eb->batch->node.start +
+ batch->node.start +
eb->batch_start_offset,
eb->batch_len,
eb->batch_flags);
@@ -2327,15 +2052,22 @@ static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
intel_context_timeline_unlock(tl);
if (rq) {
- if (i915_request_wait(rq,
- I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT) < 0) {
- i915_request_put(rq);
- err = -EINTR;
- goto err_exit;
- }
+ bool nonblock = eb->file->filp->f_flags & O_NONBLOCK;
+ long timeout;
+
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ if (nonblock)
+ timeout = 0;
+ timeout = i915_request_wait(rq,
+ I915_WAIT_INTERRUPTIBLE,
+ timeout);
i915_request_put(rq);
+
+ if (timeout < 0) {
+ err = nonblock ? -EWOULDBLOCK : timeout;
+ goto err_exit;
+ }
}
eb->engine = ce->engine;
@@ -2560,6 +2292,73 @@ signal_fence_array(struct i915_execbuffer *eb,
}
}
+static void retire_requests(struct intel_timeline *tl, struct i915_request *end)
+{
+ struct i915_request *rq, *rn;
+
+ list_for_each_entry_safe(rq, rn, &tl->requests, link)
+ if (rq == end || !i915_request_retire(rq))
+ break;
+}
+
+static void eb_request_add(struct i915_execbuffer *eb)
+{
+ struct i915_request *rq = eb->request;
+ struct intel_timeline * const tl = i915_request_timeline(rq);
+ struct i915_sched_attr attr = {};
+ struct i915_request *prev;
+
+ lockdep_assert_held(&tl->mutex);
+ lockdep_unpin_lock(&tl->mutex, rq->cookie);
+
+ trace_i915_request_add(rq);
+
+ prev = __i915_request_commit(rq);
+
+ /* Check that the context wasn't destroyed before submission */
+ if (likely(!intel_context_is_closed(eb->context))) {
+ attr = eb->gem_context->sched;
+
+ /*
+ * Boost actual workloads past semaphores!
+ *
+ * With semaphores we spin on one engine waiting for another,
+ * simply to reduce the latency of starting our work when
+ * the signaler completes. However, if there is any other
+ * work that we could be doing on this engine instead, that
+ * is better utilisation and will reduce the overall duration
+ * of the current work. To avoid PI boosting a semaphore
+ * far in the distance past over useful work, we keep a history
+ * of any semaphore use along our dependency chain.
+ */
+ if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
+ attr.priority |= I915_PRIORITY_NOSEMAPHORE;
+
+ /*
+ * Boost priorities to new clients (new request flows).
+ *
+ * Allow interactive/synchronous clients to jump ahead of
+ * the bulk clients. (FQ_CODEL)
+ */
+ if (list_empty(&rq->sched.signalers_list))
+ attr.priority |= I915_PRIORITY_WAIT;
+ } else {
+ /* Serialise with context_close via the add_to_timeline */
+ i915_request_set_error_once(rq, -ENOENT);
+ __i915_request_skip(rq);
+ }
+
+ local_bh_disable();
+ __i915_request_queue(rq, &attr);
+ local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
+
+ /* Try to clean up the client's timeline after submitting the request */
+ if (prev)
+ retire_requests(tl, prev);
+
+ mutex_unlock(&tl->mutex);
+}
+
static int
i915_gem_do_execbuffer(struct drm_device *dev,
struct drm_file *file,
@@ -2572,6 +2371,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
struct dma_fence *in_fence = NULL;
struct dma_fence *exec_fence = NULL;
struct sync_file *out_fence = NULL;
+ struct i915_vma *batch;
int out_fence_fd = -1;
int err;
@@ -2586,9 +2386,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
args->flags |= __EXEC_HAS_RELOC;
eb.exec = exec;
- eb.vma = (struct i915_vma **)(exec + args->buffer_count + 1);
- eb.vma[0] = NULL;
- eb.flags = (unsigned int *)(eb.vma + args->buffer_count + 1);
+ eb.vma = (struct eb_vma *)(exec + args->buffer_count + 1);
+ eb.vma[0].vma = NULL;
eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
reloc_cache_init(&eb.reloc_cache, eb.i915);
@@ -2656,10 +2455,6 @@ i915_gem_do_execbuffer(struct drm_device *dev,
if (unlikely(err))
goto err_context;
- err = i915_mutex_lock_interruptible(dev);
- if (err)
- goto err_engine;
-
err = eb_relocate(&eb);
if (err) {
/*
@@ -2673,21 +2468,23 @@ i915_gem_do_execbuffer(struct drm_device *dev,
goto err_vma;
}
- if (unlikely(*eb.batch->exec_flags & EXEC_OBJECT_WRITE)) {
+ if (unlikely(eb.batch->flags & EXEC_OBJECT_WRITE)) {
drm_dbg(&i915->drm,
"Attempting to use self-modifying batch buffer\n");
err = -EINVAL;
goto err_vma;
}
- if (eb.batch_start_offset > eb.batch->size ||
- eb.batch_len > eb.batch->size - eb.batch_start_offset) {
+
+ if (range_overflows_t(u64,
+ eb.batch_start_offset, eb.batch_len,
+ eb.batch->vma->size)) {
drm_dbg(&i915->drm, "Attempting to use out-of-bounds batch\n");
err = -EINVAL;
goto err_vma;
}
if (eb.batch_len == 0)
- eb.batch_len = eb.batch->size - eb.batch_start_offset;
+ eb.batch_len = eb.batch->vma->size - eb.batch_start_offset;
err = eb_parse(&eb);
if (err)
@@ -2697,6 +2494,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
* batch" bit. Hence we need to pin secure batches into the global gtt.
* hsw should have this fixed, but bdw mucks it up again. */
+ batch = eb.batch->vma;
if (eb.batch_flags & I915_DISPATCH_SECURE) {
struct i915_vma *vma;
@@ -2710,13 +2508,13 @@ i915_gem_do_execbuffer(struct drm_device *dev,
* fitting due to fragmentation.
* So this is actually safe.
*/
- vma = i915_gem_object_ggtt_pin(eb.batch->obj, NULL, 0, 0, 0);
+ vma = i915_gem_object_ggtt_pin(batch->obj, NULL, 0, 0, 0);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
- goto err_vma;
+ goto err_parse;
}
- eb.batch = vma;
+ batch = vma;
}
/* All GPU relocation batches must be submitted prior to the user rq */
@@ -2763,16 +2561,16 @@ i915_gem_do_execbuffer(struct drm_device *dev,
* inactive_list and lose its active reference. Hence we do not need
* to explicitly hold another reference here.
*/
- eb.request->batch = eb.batch;
- if (eb.batch->private)
- intel_engine_pool_mark_active(eb.batch->private, eb.request);
+ eb.request->batch = batch;
+ if (batch->private)
+ intel_engine_pool_mark_active(batch->private, eb.request);
trace_i915_request_queue(eb.request, eb.batch_flags);
- err = eb_submit(&eb);
+ err = eb_submit(&eb, batch);
err_request:
add_to_client(eb.request, file);
i915_request_get(eb.request);
- i915_request_add(eb.request);
+ eb_request_add(&eb);
if (fences)
signal_fence_array(&eb, fences);
@@ -2791,16 +2589,15 @@ err_request:
err_batch_unpin:
if (eb.batch_flags & I915_DISPATCH_SECURE)
- i915_vma_unpin(eb.batch);
- if (eb.batch->private)
- intel_engine_pool_put(eb.batch->private);
+ i915_vma_unpin(batch);
+err_parse:
+ if (batch->private)
+ intel_engine_pool_put(batch->private);
err_vma:
if (eb.exec)
eb_release_vmas(&eb);
if (eb.trampoline)
i915_vma_unpin(eb.trampoline);
- mutex_unlock(&dev->struct_mutex);
-err_engine:
eb_unpin_engine(&eb);
err_context:
i915_gem_context_put(eb.gem_context);
@@ -2818,9 +2615,7 @@ err_in_fence:
static size_t eb_element_size(void)
{
- return (sizeof(struct drm_i915_gem_exec_object2) +
- sizeof(struct i915_vma *) +
- sizeof(unsigned int));
+ return sizeof(struct drm_i915_gem_exec_object2) + sizeof(struct eb_vma);
}
static bool check_buffer_count(size_t count)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
index 9cfb0e41ff06..cbbff81aa0af 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
@@ -8,8 +8,6 @@
#include <linux/slab.h>
#include <linux/swiotlb.h>
-#include <drm/i915_drm.h>
-
#include "i915_drv.h"
#include "i915_gem.h"
#include "i915_gem_object.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index e8cccc131c40..b39c24dae64e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -775,7 +775,7 @@ static struct file *mmap_singleton(struct drm_i915_private *i915)
struct file *file;
rcu_read_lock();
- file = i915->gem.mmap_singleton;
+ file = READ_ONCE(i915->gem.mmap_singleton);
if (file && !get_file_rcu(file))
file = NULL;
rcu_read_unlock();
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 35985218bd85..5da9f9e534b9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -225,6 +225,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
/* But keep the pointer alive for RCU-protected lookups */
call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
+ cond_resched();
}
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index e44a2f40b520..2faa481cc18f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -11,8 +11,6 @@
#include <drm/drm_file.h>
#include <drm/drm_device.h>
-#include <drm/i915_drm.h>
-
#include "display/intel_frontbuffer.h"
#include "i915_gem_object_types.h"
#include "i915_gem_gtt.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
index 70809d8897cd..e00792158f13 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
@@ -186,7 +186,7 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
0);
out_request:
if (unlikely(err))
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
i915_request_add(rq);
out_batch:
@@ -196,6 +196,17 @@ out_unpin:
return err;
}
+/* Wa_1209644611:icl,ehl */
+static bool wa_1209644611_applies(struct drm_i915_private *i915, u32 size)
+{
+ u32 height = size >> PAGE_SHIFT;
+
+ if (!IS_GEN(i915, 11))
+ return false;
+
+ return height % 4 == 3 && height <= 8;
+}
+
struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
struct i915_vma *src,
struct i915_vma *dst)
@@ -237,7 +248,8 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
size = min_t(u64, rem, block_size);
GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX);
- if (INTEL_GEN(i915) >= 9) {
+ if (INTEL_GEN(i915) >= 9 &&
+ !wa_1209644611_applies(i915, size)) {
*cmd++ = GEN9_XY_FAST_COPY_BLT_CMD | (10 - 2);
*cmd++ = BLT_DEPTH_32 | PAGE_SIZE;
*cmd++ = 0;
@@ -385,7 +397,7 @@ out_unlock:
drm_gem_unlock_reservations(objs, ARRAY_SIZE(objs), &acquire);
out_request:
if (unlikely(err))
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
i915_request_add(rq);
out_batch:
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index b07bb40edd5a..698e22420dc5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -194,10 +194,11 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
/* Perma-pin (until release) the physical set of pages */
__i915_gem_object_pin_pages(obj);
- if (!IS_ERR_OR_NULL(pages)) {
+ if (!IS_ERR_OR_NULL(pages))
i915_gem_shmem_ops.put_pages(obj, pages);
- i915_gem_object_release_memory_region(obj);
- }
+
+ i915_gem_object_release_memory_region(obj);
+
mutex_unlock(&obj->mm.lock);
return 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 7eaa2ab01de3..03e5eb4c99d1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -12,7 +12,6 @@
#include <linux/pci.h>
#include <linux/dma-buf.h>
#include <linux/vmalloc.h>
-#include <drm/i915_drm.h>
#include "i915_trace.h"
@@ -256,8 +255,7 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *i915)
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
freed = i915_gem_shrink(i915, -1UL, NULL,
I915_SHRINK_BOUND |
- I915_SHRINK_UNBOUND |
- I915_SHRINK_ACTIVE);
+ I915_SHRINK_UNBOUND);
}
return freed;
@@ -336,7 +334,6 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
freed_pages = 0;
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
freed_pages += i915_gem_shrink(i915, -1UL, NULL,
- I915_SHRINK_ACTIVE |
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_WRITEBACK);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 491cfbaaa330..5557dfa83a7b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -13,6 +13,7 @@
#include "gem/i915_gem_region.h"
#include "i915_drv.h"
#include "i915_gem_stolen.h"
+#include "i915_vgpu.h"
/*
* The BIOS typically reserves some of the system's memory for the exclusive
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index 6c7825a2dc2a..37f77aee1212 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -6,7 +6,6 @@
#include <linux/string.h>
#include <linux/bitops.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_gem.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 63ead7a2b64a..7ffd7afeb7a5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -10,8 +10,6 @@
#include <linux/swap.h>
#include <linux/sched/mm.h>
-#include <drm/i915_drm.h>
-
#include "i915_drv.h"
#include "i915_gem_ioctls.h"
#include "i915_gem_object.h"
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index 375d864736f3..54b86cf7f5d2 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -1004,7 +1004,7 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
return 0;
skip_request:
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
err_request:
i915_request_add(rq);
err_batch:
@@ -1559,7 +1559,7 @@ static int write_to_scratch(struct i915_gem_context *ctx,
goto out_vm;
skip_request:
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
err_request:
i915_request_add(rq);
err_unpin:
@@ -1708,7 +1708,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
goto out_vm;
skip_request:
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
err_request:
i915_request_add(rq);
err_unpin:
@@ -1809,7 +1809,6 @@ static int igt_vm_isolation(void *arg)
vm_total = ctx_vm(ctx_a)->total;
GEM_BUG_ON(ctx_vm(ctx_b)->total != vm_total);
- vm_total -= I915_GTT_PAGE_SIZE;
count = 0;
num_engines = 0;
@@ -1828,10 +1827,10 @@ static int igt_vm_isolation(void *arg)
u32 value = 0xc5c5c5c5;
u64 offset;
- div64_u64_rem(i915_prandom_u64_state(&prng),
- vm_total, &offset);
- offset = round_down(offset, alignof_dword);
- offset += I915_GTT_PAGE_SIZE;
+ /* Leave enough space at offset 0 for the batch */
+ offset = igt_random_offset(&prng,
+ I915_GTT_PAGE_SIZE, vm_total,
+ sizeof(u32), alignof_dword);
err = write_to_scratch(ctx_a, engine,
offset, 0xdeadbeef);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index ef7c74cff28a..43912e9b683d 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -570,7 +570,7 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
obj = i915_gem_object_create_internal(i915, size);
if (IS_ERR(obj))
- return PTR_ERR(obj);
+ return false;
mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL);
i915_gem_object_put(obj);
diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
index 6718da20f35d..772d8cba7da9 100644
--- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
+++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
@@ -159,7 +159,7 @@ int igt_gpu_fill_dw(struct intel_context *ce,
return 0;
skip_request:
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
err_request:
i915_request_add(rq);
err_batch:
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
index b12ea1daa29d..e7e3c620f542 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
@@ -23,6 +23,9 @@ mock_context(struct drm_i915_private *i915,
INIT_LIST_HEAD(&ctx->link);
ctx->i915 = i915;
+ spin_lock_init(&ctx->stale.lock);
+ INIT_LIST_HEAD(&ctx->stale.engines);
+
i915_gem_context_set_persistence(ctx);
mutex_init(&ctx->engines_mutex);
diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.c b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
new file mode 100644
index 000000000000..de595b66a746
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
@@ -0,0 +1,402 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "gen7_renderclear.h"
+#include "i915_drv.h"
+#include "intel_gpu_commands.h"
+
+#define MAX_URB_ENTRIES 64
+#define STATE_SIZE (4 * 1024)
+#define GT3_INLINE_DATA_DELAYS 0x1E00
+#define batch_advance(Y, CS) GEM_BUG_ON((Y)->end != (CS))
+
+struct cb_kernel {
+ const void *data;
+ u32 size;
+};
+
+#define CB_KERNEL(name) { .data = (name), .size = sizeof(name) }
+
+#include "ivb_clear_kernel.c"
+static const struct cb_kernel cb_kernel_ivb = CB_KERNEL(ivb_clear_kernel);
+
+#include "hsw_clear_kernel.c"
+static const struct cb_kernel cb_kernel_hsw = CB_KERNEL(hsw_clear_kernel);
+
+struct batch_chunk {
+ struct i915_vma *vma;
+ u32 offset;
+ u32 *start;
+ u32 *end;
+ u32 max_items;
+};
+
+struct batch_vals {
+ u32 max_primitives;
+ u32 max_urb_entries;
+ u32 cmd_size;
+ u32 state_size;
+ u32 state_start;
+ u32 batch_size;
+ u32 surface_height;
+ u32 surface_width;
+ u32 scratch_size;
+ u32 max_size;
+};
+
+static void
+batch_get_defaults(struct drm_i915_private *i915, struct batch_vals *bv)
+{
+ if (IS_HASWELL(i915)) {
+ bv->max_primitives = 280;
+ bv->max_urb_entries = MAX_URB_ENTRIES;
+ bv->surface_height = 16 * 16;
+ bv->surface_width = 32 * 2 * 16;
+ } else {
+ bv->max_primitives = 128;
+ bv->max_urb_entries = MAX_URB_ENTRIES / 2;
+ bv->surface_height = 16 * 8;
+ bv->surface_width = 32 * 16;
+ }
+ bv->cmd_size = bv->max_primitives * 4096;
+ bv->state_size = STATE_SIZE;
+ bv->state_start = bv->cmd_size;
+ bv->batch_size = bv->cmd_size + bv->state_size;
+ bv->scratch_size = bv->surface_height * bv->surface_width;
+ bv->max_size = bv->batch_size + bv->scratch_size;
+}
+
+static void batch_init(struct batch_chunk *bc,
+ struct i915_vma *vma,
+ u32 *start, u32 offset, u32 max_bytes)
+{
+ bc->vma = vma;
+ bc->offset = offset;
+ bc->start = start + bc->offset / sizeof(*bc->start);
+ bc->end = bc->start;
+ bc->max_items = max_bytes / sizeof(*bc->start);
+}
+
+static u32 batch_offset(const struct batch_chunk *bc, u32 *cs)
+{
+ return (cs - bc->start) * sizeof(*bc->start) + bc->offset;
+}
+
+static u32 batch_addr(const struct batch_chunk *bc)
+{
+ return bc->vma->node.start;
+}
+
+static void batch_add(struct batch_chunk *bc, const u32 d)
+{
+ GEM_BUG_ON((bc->end - bc->start) >= bc->max_items);
+ *bc->end++ = d;
+}
+
+static u32 *batch_alloc_items(struct batch_chunk *bc, u32 align, u32 items)
+{
+ u32 *map;
+
+ if (align) {
+ u32 *end = PTR_ALIGN(bc->end, align);
+
+ memset32(bc->end, 0, end - bc->end);
+ bc->end = end;
+ }
+
+ map = bc->end;
+ bc->end += items;
+
+ return map;
+}
+
+static u32 *batch_alloc_bytes(struct batch_chunk *bc, u32 align, u32 bytes)
+{
+ GEM_BUG_ON(!IS_ALIGNED(bytes, sizeof(*bc->start)));
+ return batch_alloc_items(bc, align, bytes / sizeof(*bc->start));
+}
+
+static u32
+gen7_fill_surface_state(struct batch_chunk *state,
+ const u32 dst_offset,
+ const struct batch_vals *bv)
+{
+ u32 surface_h = bv->surface_height;
+ u32 surface_w = bv->surface_width;
+ u32 *cs = batch_alloc_items(state, 32, 8);
+ u32 offset = batch_offset(state, cs);
+
+#define SURFACE_2D 1
+#define SURFACEFORMAT_B8G8R8A8_UNORM 0x0C0
+#define RENDER_CACHE_READ_WRITE 1
+
+ *cs++ = SURFACE_2D << 29 |
+ (SURFACEFORMAT_B8G8R8A8_UNORM << 18) |
+ (RENDER_CACHE_READ_WRITE << 8);
+
+ *cs++ = batch_addr(state) + dst_offset;
+
+ *cs++ = ((surface_h / 4 - 1) << 16) | (surface_w / 4 - 1);
+ *cs++ = surface_w;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+#define SHADER_CHANNELS(r, g, b, a) \
+ (((r) << 25) | ((g) << 22) | ((b) << 19) | ((a) << 16))
+ *cs++ = SHADER_CHANNELS(4, 5, 6, 7);
+ batch_advance(state, cs);
+
+ return offset;
+}
+
+static u32
+gen7_fill_binding_table(struct batch_chunk *state,
+ const struct batch_vals *bv)
+{
+ u32 surface_start = gen7_fill_surface_state(state, bv->batch_size, bv);
+ u32 *cs = batch_alloc_items(state, 32, 8);
+ u32 offset = batch_offset(state, cs);
+
+ *cs++ = surface_start - state->offset;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ batch_advance(state, cs);
+
+ return offset;
+}
+
+static u32
+gen7_fill_kernel_data(struct batch_chunk *state,
+ const u32 *data,
+ const u32 size)
+{
+ return batch_offset(state,
+ memcpy(batch_alloc_bytes(state, 64, size),
+ data, size));
+}
+
+static u32
+gen7_fill_interface_descriptor(struct batch_chunk *state,
+ const struct batch_vals *bv,
+ const struct cb_kernel *kernel,
+ unsigned int count)
+{
+ u32 kernel_offset =
+ gen7_fill_kernel_data(state, kernel->data, kernel->size);
+ u32 binding_table = gen7_fill_binding_table(state, bv);
+ u32 *cs = batch_alloc_items(state, 32, 8 * count);
+ u32 offset = batch_offset(state, cs);
+
+ *cs++ = kernel_offset;
+ *cs++ = (1 << 7) | (1 << 13);
+ *cs++ = 0;
+ *cs++ = (binding_table - state->offset) | 1;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+
+ /* 1 - 63dummy idds */
+ memset32(cs, 0x00, (count - 1) * 8);
+ batch_advance(state, cs + (count - 1) * 8);
+
+ return offset;
+}
+
+static void
+gen7_emit_state_base_address(struct batch_chunk *batch,
+ u32 surface_state_base)
+{
+ u32 *cs = batch_alloc_items(batch, 0, 12);
+
+ *cs++ = STATE_BASE_ADDRESS | (12 - 2);
+ /* general */
+ *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
+ /* surface */
+ *cs++ = batch_addr(batch) | surface_state_base | BASE_ADDRESS_MODIFY;
+ /* dynamic */
+ *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
+ /* indirect */
+ *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
+ /* instruction */
+ *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
+
+ /* general/dynamic/indirect/instruction access Bound */
+ *cs++ = 0;
+ *cs++ = BASE_ADDRESS_MODIFY;
+ *cs++ = 0;
+ *cs++ = BASE_ADDRESS_MODIFY;
+ *cs++ = 0;
+ *cs++ = 0;
+ batch_advance(batch, cs);
+}
+
+static void
+gen7_emit_vfe_state(struct batch_chunk *batch,
+ const struct batch_vals *bv,
+ u32 urb_size, u32 curbe_size,
+ u32 mode)
+{
+ u32 urb_entries = bv->max_urb_entries;
+ u32 threads = bv->max_primitives - 1;
+ u32 *cs = batch_alloc_items(batch, 32, 8);
+
+ *cs++ = MEDIA_VFE_STATE | (8 - 2);
+
+ /* scratch buffer */
+ *cs++ = 0;
+
+ /* number of threads & urb entries for GPGPU vs Media Mode */
+ *cs++ = threads << 16 | urb_entries << 8 | mode << 2;
+
+ *cs++ = 0;
+
+ /* urb entry size & curbe size in 256 bits unit */
+ *cs++ = urb_size << 16 | curbe_size;
+
+ /* scoreboard */
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ batch_advance(batch, cs);
+}
+
+static void
+gen7_emit_interface_descriptor_load(struct batch_chunk *batch,
+ const u32 interface_descriptor,
+ unsigned int count)
+{
+ u32 *cs = batch_alloc_items(batch, 8, 4);
+
+ *cs++ = MEDIA_INTERFACE_DESCRIPTOR_LOAD | (4 - 2);
+ *cs++ = 0;
+ *cs++ = count * 8 * sizeof(*cs);
+
+ /*
+ * interface descriptor address - it is relative to the dynamics base
+ * address
+ */
+ *cs++ = interface_descriptor;
+ batch_advance(batch, cs);
+}
+
+static void
+gen7_emit_media_object(struct batch_chunk *batch,
+ unsigned int media_object_index)
+{
+ unsigned int x_offset = (media_object_index % 16) * 64;
+ unsigned int y_offset = (media_object_index / 16) * 16;
+ unsigned int inline_data_size;
+ unsigned int media_batch_size;
+ unsigned int i;
+ u32 *cs;
+
+ inline_data_size = 112 * 8;
+ media_batch_size = inline_data_size + 6;
+
+ cs = batch_alloc_items(batch, 8, media_batch_size);
+
+ *cs++ = MEDIA_OBJECT | (media_batch_size - 2);
+
+ /* interface descriptor offset */
+ *cs++ = 0;
+
+ /* without indirect data */
+ *cs++ = 0;
+ *cs++ = 0;
+
+ /* scoreboard */
+ *cs++ = 0;
+ *cs++ = 0;
+
+ /* inline */
+ *cs++ = (y_offset << 16) | (x_offset);
+ *cs++ = 0;
+ *cs++ = GT3_INLINE_DATA_DELAYS;
+ for (i = 3; i < inline_data_size; i++)
+ *cs++ = 0;
+
+ batch_advance(batch, cs);
+}
+
+static void gen7_emit_pipeline_flush(struct batch_chunk *batch)
+{
+ u32 *cs = batch_alloc_items(batch, 0, 5);
+
+ *cs++ = GFX_OP_PIPE_CONTROL(5);
+ *cs++ = PIPE_CONTROL_STATE_CACHE_INVALIDATE |
+ PIPE_CONTROL_GLOBAL_GTT_IVB;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ batch_advance(batch, cs);
+}
+
+static void emit_batch(struct i915_vma * const vma,
+ u32 *start,
+ const struct batch_vals *bv)
+{
+ struct drm_i915_private *i915 = vma->vm->i915;
+ unsigned int desc_count = 64;
+ const u32 urb_size = 112;
+ struct batch_chunk cmds, state;
+ u32 interface_descriptor;
+ unsigned int i;
+
+ batch_init(&cmds, vma, start, 0, bv->cmd_size);
+ batch_init(&state, vma, start, bv->state_start, bv->state_size);
+
+ interface_descriptor =
+ gen7_fill_interface_descriptor(&state, bv,
+ IS_HASWELL(i915) ?
+ &cb_kernel_hsw :
+ &cb_kernel_ivb,
+ desc_count);
+ gen7_emit_pipeline_flush(&cmds);
+ batch_add(&cmds, PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
+ batch_add(&cmds, MI_NOOP);
+ gen7_emit_state_base_address(&cmds, interface_descriptor);
+ gen7_emit_pipeline_flush(&cmds);
+
+ gen7_emit_vfe_state(&cmds, bv, urb_size - 1, 0, 0);
+
+ gen7_emit_interface_descriptor_load(&cmds,
+ interface_descriptor,
+ desc_count);
+
+ for (i = 0; i < bv->max_primitives; i++)
+ gen7_emit_media_object(&cmds, i);
+
+ batch_add(&cmds, MI_BATCH_BUFFER_END);
+}
+
+int gen7_setup_clear_gpr_bb(struct intel_engine_cs * const engine,
+ struct i915_vma * const vma)
+{
+ struct batch_vals bv;
+ u32 *batch;
+
+ batch_get_defaults(engine->i915, &bv);
+ if (!vma)
+ return bv.max_size;
+
+ GEM_BUG_ON(vma->obj->base.size < bv.max_size);
+
+ batch = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
+ if (IS_ERR(batch))
+ return PTR_ERR(batch);
+
+ emit_batch(vma, memset(batch, 0, bv.max_size), &bv);
+
+ i915_gem_object_flush_map(vma->obj);
+ i915_gem_object_unpin_map(vma->obj);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.h b/drivers/gpu/drm/i915/gt/gen7_renderclear.h
new file mode 100644
index 000000000000..bb100748e2c6
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __GEN7_RENDERCLEAR_H__
+#define __GEN7_RENDERCLEAR_H__
+
+struct intel_engine_cs;
+struct i915_vma;
+
+int gen7_setup_clear_gpr_bb(struct intel_engine_cs * const engine,
+ struct i915_vma * const vma);
+
+#endif /* __GEN7_RENDERCLEAR_H__ */
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index 4d1de2d97d5c..94e746af8926 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -8,6 +8,7 @@
#include "gen8_ppgtt.h"
#include "i915_scatterlist.h"
#include "i915_trace.h"
+#include "i915_pvinfo.h"
#include "i915_vgpu.h"
#include "intel_gt.h"
#include "intel_gtt.h"
@@ -25,6 +26,30 @@ static u64 gen8_pde_encode(const dma_addr_t addr,
return pde;
}
+static u64 gen8_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
+
+ if (unlikely(flags & PTE_READ_ONLY))
+ pte &= ~_PAGE_RW;
+
+ switch (level) {
+ case I915_CACHE_NONE:
+ pte |= PPAT_UNCACHED;
+ break;
+ case I915_CACHE_WT:
+ pte |= PPAT_DISPLAY_ELLC;
+ break;
+ default:
+ pte |= PPAT_CACHED;
+ break;
+ }
+
+ return pte;
+}
+
static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
{
struct drm_i915_private *i915 = ppgtt->vm.i915;
@@ -706,6 +731,8 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt)
ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
ppgtt->vm.clear_range = gen8_ppgtt_clear;
+ ppgtt->vm.pte_encode = gen8_pte_encode;
+
if (intel_vgpu_active(gt->i915))
gen8_ppgtt_notify_vgt(ppgtt, true);
diff --git a/drivers/gpu/drm/i915/gt/hsw_clear_kernel.c b/drivers/gpu/drm/i915/gt/hsw_clear_kernel.c
new file mode 100644
index 000000000000..b47f9d4a0848
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/hsw_clear_kernel.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ *
+ * Generated by: IGT Gpu Tools on Fri 21 Feb 2020 05:30:13 AM UTC
+ */
+
+static const u32 hsw_clear_kernel[] = {
+ 0x00000001, 0x26020128, 0x00000024, 0x00000000,
+ 0x00000040, 0x20280c21, 0x00000028, 0x00000001,
+ 0x01000010, 0x20000c20, 0x0000002c, 0x00000000,
+ 0x00010220, 0x34001c00, 0x00001400, 0x00000160,
+ 0x00600001, 0x20600061, 0x00000000, 0x00000000,
+ 0x00000008, 0x20601c85, 0x00000e00, 0x0000000c,
+ 0x00000005, 0x20601ca5, 0x00000060, 0x00000001,
+ 0x00000008, 0x20641c85, 0x00000e00, 0x0000000d,
+ 0x00000005, 0x20641ca5, 0x00000064, 0x00000003,
+ 0x00000041, 0x207424a5, 0x00000064, 0x00000034,
+ 0x00000040, 0x206014a5, 0x00000060, 0x00000074,
+ 0x00000008, 0x20681c85, 0x00000e00, 0x00000008,
+ 0x00000005, 0x20681ca5, 0x00000068, 0x0000000f,
+ 0x00000041, 0x20701ca5, 0x00000060, 0x00000010,
+ 0x00000040, 0x206814a5, 0x00000068, 0x00000070,
+ 0x00600001, 0x20a00061, 0x00000000, 0x00000000,
+ 0x00000005, 0x206c1c85, 0x00000e00, 0x00000007,
+ 0x00000041, 0x206c1ca5, 0x0000006c, 0x00000004,
+ 0x00600001, 0x20800021, 0x008d0000, 0x00000000,
+ 0x00000001, 0x20800021, 0x0000006c, 0x00000000,
+ 0x00000001, 0x20840021, 0x00000068, 0x00000000,
+ 0x00000001, 0x20880061, 0x00000000, 0x00000003,
+ 0x00000005, 0x208c0d21, 0x00000086, 0xffffffff,
+ 0x05600032, 0x20a00fa1, 0x008d0080, 0x02190001,
+ 0x00000040, 0x20a01ca5, 0x000000a0, 0x00000001,
+ 0x05600032, 0x20a00fa1, 0x008d0080, 0x040a8001,
+ 0x02000040, 0x20281c21, 0x00000028, 0xffffffff,
+ 0x00010220, 0x34001c00, 0x00001400, 0xffffffe0,
+ 0x00000001, 0x26020128, 0x00000024, 0x00000000,
+ 0x00000001, 0x220010e4, 0x00000000, 0x00000000,
+ 0x00000001, 0x220831ec, 0x00000000, 0x007f007f,
+ 0x00600001, 0x20400021, 0x008d0000, 0x00000000,
+ 0x00600001, 0x2fe00021, 0x008d0000, 0x00000000,
+ 0x00200001, 0x20400121, 0x00450020, 0x00000000,
+ 0x00000001, 0x20480061, 0x00000000, 0x000f000f,
+ 0x00000005, 0x204c0d21, 0x00000046, 0xffffffef,
+ 0x00800001, 0x20600061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20800061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20a00061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20c00061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20e00061, 0x00000000, 0x00000000,
+ 0x00800001, 0x21000061, 0x00000000, 0x00000000,
+ 0x00800001, 0x21200061, 0x00000000, 0x00000000,
+ 0x00800001, 0x21400061, 0x00000000, 0x00000000,
+ 0x05600032, 0x20000fa0, 0x008d0040, 0x120a8000,
+ 0x00000040, 0x20402d21, 0x00000020, 0x00100010,
+ 0x05600032, 0x20000fa0, 0x008d0040, 0x120a8000,
+ 0x02000040, 0x22083d8c, 0x00000208, 0xffffffff,
+ 0x00800001, 0xa0000109, 0x00000602, 0x00000000,
+ 0x00000040, 0x22001c84, 0x00000200, 0x00000020,
+ 0x00010220, 0x34001c00, 0x00001400, 0xffffffc0,
+ 0x07600032, 0x20000fa0, 0x008d0fe0, 0x82000010,
+};
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 8bb444cda14f..aea992e46c42 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -51,6 +51,11 @@ int intel_context_alloc_state(struct intel_context *ce)
return -EINTR;
if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
+ if (intel_context_is_banned(ce)) {
+ err = -EIO;
+ goto unlock;
+ }
+
err = ce->ops->alloc(ce);
if (unlikely(err))
goto unlock;
@@ -92,6 +97,8 @@ int __intel_context_do_pin(struct intel_context *ce)
{
int err;
+ GEM_BUG_ON(intel_context_is_closed(ce));
+
if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
err = intel_context_alloc_state(ce);
if (err)
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index 18efad255124..07be021882cc 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -173,6 +173,11 @@ static inline bool intel_context_is_barrier(const struct intel_context *ce)
return test_bit(CONTEXT_BARRIER_BIT, &ce->flags);
}
+static inline bool intel_context_is_closed(const struct intel_context *ce)
+{
+ return test_bit(CONTEXT_CLOSED_BIT, &ce->flags);
+}
+
static inline bool intel_context_use_semaphores(const struct intel_context *ce)
{
return test_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
diff --git a/drivers/gpu/drm/i915/gt/intel_context_param.c b/drivers/gpu/drm/i915/gt/intel_context_param.c
new file mode 100644
index 000000000000..65dcd090245d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_context_param.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_active.h"
+#include "intel_context.h"
+#include "intel_context_param.h"
+#include "intel_ring.h"
+
+int intel_context_set_ring_size(struct intel_context *ce, long sz)
+{
+ int err;
+
+ if (intel_context_lock_pinned(ce))
+ return -EINTR;
+
+ err = i915_active_wait(&ce->active);
+ if (err < 0)
+ goto unlock;
+
+ if (intel_context_is_pinned(ce)) {
+ err = -EBUSY; /* In active use, come back later! */
+ goto unlock;
+ }
+
+ if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
+ struct intel_ring *ring;
+
+ /* Replace the existing ringbuffer */
+ ring = intel_engine_create_ring(ce->engine, sz);
+ if (IS_ERR(ring)) {
+ err = PTR_ERR(ring);
+ goto unlock;
+ }
+
+ intel_ring_put(ce->ring);
+ ce->ring = ring;
+
+ /* Context image will be updated on next pin */
+ } else {
+ ce->ring = __intel_context_ring_size(sz);
+ }
+
+unlock:
+ intel_context_unlock_pinned(ce);
+ return err;
+}
+
+long intel_context_get_ring_size(struct intel_context *ce)
+{
+ long sz = (unsigned long)READ_ONCE(ce->ring);
+
+ if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
+ if (intel_context_lock_pinned(ce))
+ return -EINTR;
+
+ sz = ce->ring->size;
+ intel_context_unlock_pinned(ce);
+ }
+
+ return sz;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_context_param.h b/drivers/gpu/drm/i915/gt/intel_context_param.h
new file mode 100644
index 000000000000..f053d8633fe2
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_context_param.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_CONTEXT_PARAM_H
+#define INTEL_CONTEXT_PARAM_H
+
+struct intel_context;
+
+int intel_context_set_ring_size(struct intel_context *ce, long sz);
+long intel_context_get_ring_size(struct intel_context *ce);
+
+#endif /* INTEL_CONTEXT_PARAM_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 11278343b9b5..07cb83a0d017 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -45,8 +45,8 @@ struct intel_context {
struct intel_engine_cs *engine;
struct intel_engine_cs *inflight;
-#define intel_context_inflight(ce) ptr_mask_bits((ce)->inflight, 2)
-#define intel_context_inflight_count(ce) ptr_unmask_bits((ce)->inflight, 2)
+#define intel_context_inflight(ce) ptr_mask_bits(READ_ONCE((ce)->inflight), 2)
+#define intel_context_inflight_count(ce) ptr_unmask_bits(READ_ONCE((ce)->inflight), 2)
struct i915_address_space *vm;
struct i915_gem_context __rcu *gem_context;
@@ -62,10 +62,11 @@ struct intel_context {
#define CONTEXT_BARRIER_BIT 0
#define CONTEXT_ALLOC_BIT 1
#define CONTEXT_VALID_BIT 2
-#define CONTEXT_USE_SEMAPHORES 3
-#define CONTEXT_BANNED 4
-#define CONTEXT_FORCE_SINGLE_SUBMISSION 5
-#define CONTEXT_NOPREEMPT 6
+#define CONTEXT_CLOSED_BIT 3
+#define CONTEXT_USE_SEMAPHORES 4
+#define CONTEXT_BANNED 5
+#define CONTEXT_FORCE_SINGLE_SUBMISSION 6
+#define CONTEXT_NOPREEMPT 7
u32 *lrc_reg_state;
u64 lrc_desc;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 29c8c03c5caa..b469de0dd9b6 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -107,7 +107,20 @@ execlists_num_ports(const struct intel_engine_execlists * const execlists)
static inline struct i915_request *
execlists_active(const struct intel_engine_execlists *execlists)
{
- return *READ_ONCE(execlists->active);
+ struct i915_request * const *cur, * const *old, *active;
+
+ cur = READ_ONCE(execlists->active);
+ smp_rmb(); /* pairs with overwrite protection in process_csb() */
+ do {
+ old = cur;
+
+ active = READ_ONCE(*cur);
+ cur = READ_ONCE(execlists->active);
+
+ smp_rmb(); /* and complete the seqlock retry */
+ } while (unlikely(cur != old));
+
+ return active;
}
static inline void
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index e46e55354e95..3aa8a652c16d 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -275,6 +275,7 @@ static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
{
const struct engine_info *info = &intel_engines[id];
+ struct drm_i915_private *i915 = gt->i915;
struct intel_engine_cs *engine;
BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
@@ -301,11 +302,11 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
engine->id = id;
engine->legacy_idx = INVALID_ENGINE;
engine->mask = BIT(id);
- engine->i915 = gt->i915;
+ engine->i915 = i915;
engine->gt = gt;
engine->uncore = gt->uncore;
engine->hw_id = engine->guc_id = info->hw_id;
- engine->mmio_base = __engine_mmio_base(gt->i915, info->mmio_bases);
+ engine->mmio_base = __engine_mmio_base(i915, info->mmio_bases);
engine->class = info->class;
engine->instance = info->instance;
@@ -313,6 +314,8 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
engine->props.heartbeat_interval_ms =
CONFIG_DRM_I915_HEARTBEAT_INTERVAL;
+ engine->props.max_busywait_duration_ns =
+ CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT;
engine->props.preempt_timeout_ms =
CONFIG_DRM_I915_PREEMPT_TIMEOUT;
engine->props.stop_timeout_ms =
@@ -320,11 +323,15 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
engine->props.timeslice_duration_ms =
CONFIG_DRM_I915_TIMESLICE_DURATION;
+ /* Override to uninterruptible for OpenCL workloads. */
+ if (INTEL_GEN(i915) == 12 && engine->class == RENDER_CLASS)
+ engine->props.preempt_timeout_ms = 0;
+
engine->context_size = intel_engine_context_size(gt, engine->class);
if (WARN_ON(engine->context_size > BIT(20)))
engine->context_size = 0;
if (engine->context_size)
- DRIVER_CAPS(gt->i915)->has_logical_contexts = true;
+ DRIVER_CAPS(i915)->has_logical_contexts = true;
/* Nothing to do here, execute in order of dependencies */
engine->schedule = NULL;
@@ -340,7 +347,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
gt->engine_class[info->class][info->instance] = engine;
gt->engine[id] = engine;
- gt->i915->engine[id] = engine;
+ i915->engine[id] = engine;
return 0;
}
@@ -639,7 +646,7 @@ static int measure_breadcrumb_dw(struct intel_context *ce)
{
struct intel_engine_cs *engine = ce->engine;
struct measure_breadcrumb *frame;
- int dw = -ENOMEM;
+ int dw;
GEM_BUG_ON(!engine->gt->scratch);
@@ -1379,24 +1386,24 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
char hdr[160];
int len;
- len = snprintf(hdr, sizeof(hdr),
- "\t\tActive[%d]: ",
- (int)(port - execlists->active));
+ len = scnprintf(hdr, sizeof(hdr),
+ "\t\tActive[%d]: ",
+ (int)(port - execlists->active));
if (!i915_request_signaled(rq)) {
struct intel_timeline *tl = get_timeline(rq);
- len += snprintf(hdr + len, sizeof(hdr) - len,
- "ring:{start:%08x, hwsp:%08x, seqno:%08x, runtime:%llums}, ",
- i915_ggtt_offset(rq->ring->vma),
- tl ? tl->hwsp_offset : 0,
- hwsp_seqno(rq),
- DIV_ROUND_CLOSEST_ULL(intel_context_get_total_runtime_ns(rq->context),
- 1000 * 1000));
+ len += scnprintf(hdr + len, sizeof(hdr) - len,
+ "ring:{start:%08x, hwsp:%08x, seqno:%08x, runtime:%llums}, ",
+ i915_ggtt_offset(rq->ring->vma),
+ tl ? tl->hwsp_offset : 0,
+ hwsp_seqno(rq),
+ DIV_ROUND_CLOSEST_ULL(intel_context_get_total_runtime_ns(rq->context),
+ 1000 * 1000));
if (tl)
intel_timeline_put(tl);
}
- snprintf(hdr + len, sizeof(hdr) - len, "rq: ");
+ scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
print_request(m, rq, hdr);
}
for (port = execlists->pending; (rq = *port); port++) {
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index b23366a81048..80cdde712842 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -547,6 +547,7 @@ struct intel_engine_cs {
struct {
unsigned long heartbeat_interval_ms;
+ unsigned long max_busywait_duration_ns;
unsigned long preempt_timeout_ms;
unsigned long stop_timeout_ms;
unsigned long timeslice_duration_ms;
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index 7dae91e0d002..aed498a0d032 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -8,6 +8,8 @@
#include <asm/set_memory.h>
#include <asm/smp.h>
+#include <drm/i915_drm.h>
+
#include "intel_gt.h"
#include "i915_drv.h"
#include "i915_scatterlist.h"
@@ -157,6 +159,13 @@ static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
intel_gtt_chipset_flush();
}
+static u64 gen8_ggtt_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ return addr | _PAGE_PRESENT;
+}
+
static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
{
writeq(pte, addr);
@@ -172,7 +181,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
gen8_pte_t __iomem *pte =
(gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
- gen8_set_pte(pte, gen8_pte_encode(addr, level, 0));
+ gen8_set_pte(pte, gen8_ggtt_pte_encode(addr, level, 0));
ggtt->invalidate(ggtt);
}
@@ -185,7 +194,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
struct sgt_iter sgt_iter;
gen8_pte_t __iomem *gtt_entries;
- const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0);
+ const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, 0);
dma_addr_t addr;
/*
@@ -427,7 +436,7 @@ static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
u64 size;
int ret;
- if (!USES_GUC(ggtt->vm.i915))
+ if (!intel_uc_uses_guc(&ggtt->vm.gt->uc))
return 0;
GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP);
@@ -754,7 +763,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
* readback check when writing GTT PTE entries.
*/
if (IS_GEN9_LP(i915) || INTEL_GEN(i915) >= 10)
- ggtt->gsm = ioremap_nocache(phys_addr, size);
+ ggtt->gsm = ioremap(phys_addr, size);
else
ggtt->gsm = ioremap_wc(phys_addr, size);
if (!ggtt->gsm) {
@@ -857,7 +866,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
ggtt->vm.vma_ops.clear_pages = clear_pages;
- ggtt->vm.pte_encode = gen8_pte_encode;
+ ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
setup_private_pat(ggtt->vm.gt->uncore);
diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
index 51b8718513bc..f04214a54f75 100644
--- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
@@ -292,10 +292,21 @@
#define MI_STORE_URB_MEM MI_INSTR(0x2D, 0)
#define MI_CONDITIONAL_BATCH_BUFFER_END MI_INSTR(0x36, 0)
-#define PIPELINE_SELECT ((0x3<<29)|(0x1<<27)|(0x1<<24)|(0x4<<16))
-#define GFX_OP_3DSTATE_VF_STATISTICS ((0x3<<29)|(0x1<<27)|(0x0<<24)|(0xB<<16))
-#define MEDIA_VFE_STATE ((0x3<<29)|(0x2<<27)|(0x0<<24)|(0x0<<16))
+#define STATE_BASE_ADDRESS \
+ ((0x3 << 29) | (0x0 << 27) | (0x1 << 24) | (0x1 << 16))
+#define BASE_ADDRESS_MODIFY REG_BIT(0)
+#define PIPELINE_SELECT \
+ ((0x3 << 29) | (0x1 << 27) | (0x1 << 24) | (0x4 << 16))
+#define PIPELINE_SELECT_MEDIA REG_BIT(0)
+#define GFX_OP_3DSTATE_VF_STATISTICS \
+ ((0x3 << 29) | (0x1 << 27) | (0x0 << 24) | (0xB << 16))
+#define MEDIA_VFE_STATE \
+ ((0x3 << 29) | (0x2 << 27) | (0x0 << 24) | (0x0 << 16))
#define MEDIA_VFE_STATE_MMIO_ACCESS_MASK (0x18)
+#define MEDIA_INTERFACE_DESCRIPTOR_LOAD \
+ ((0x3 << 29) | (0x2 << 27) | (0x0 << 24) | (0x2 << 16))
+#define MEDIA_OBJECT \
+ ((0x3 << 29) | (0x2 << 27) | (0x1 << 24) | (0x0 << 16))
#define GPGPU_OBJECT ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x4<<16))
#define GPGPU_WALKER ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x5<<16))
#define GFX_OP_3DSTATE_DX9_CONSTANTF_VS \
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index f1f1b306e0af..d09f7596cb98 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -592,7 +592,9 @@ int intel_gt_init(struct intel_gt *gt)
if (err)
goto err_engines;
- intel_uc_init(&gt->uc);
+ err = intel_uc_init(&gt->uc);
+ if (err)
+ goto err_engines;
err = intel_gt_resume(gt);
if (err)
@@ -642,6 +644,13 @@ void intel_gt_driver_remove(struct intel_gt *gt)
void intel_gt_driver_unregister(struct intel_gt *gt)
{
intel_rps_driver_unregister(&gt->rps);
+
+ /*
+ * Upon unregistering the device to prevent any new users, cancel
+ * all in-flight requests so that we can quickly unbind the active
+ * resources.
+ */
+ intel_gt_set_wedged(gt);
}
void intel_gt_driver_release(struct intel_gt *gt)
@@ -658,6 +667,9 @@ void intel_gt_driver_release(struct intel_gt *gt)
void intel_gt_driver_late_release(struct intel_gt *gt)
{
+ /* We need to wait for inflight RCU frees to release their grip */
+ rcu_barrier();
+
intel_uc_driver_late_release(&gt->uc);
intel_gt_fini_requests(gt);
intel_gt_fini_reset(gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
index 8a5054f21bf8..24c99d0838af 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
@@ -147,24 +147,32 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
fence = i915_active_fence_get(&tl->last_request);
if (fence) {
+ mutex_unlock(&tl->mutex);
+
timeout = dma_fence_wait_timeout(fence,
interruptible,
timeout);
dma_fence_put(fence);
+
+ /* Retirement is best effort */
+ if (!mutex_trylock(&tl->mutex)) {
+ active_count++;
+ goto out_active;
+ }
}
}
if (!retire_requests(tl) || flush_submission(gt))
active_count++;
+ mutex_unlock(&tl->mutex);
- spin_lock(&timelines->lock);
+out_active: spin_lock(&timelines->lock);
- /* Resume iteration after dropping lock */
+ /* Resume list iteration after reacquiring spinlock */
list_safe_reset_next(tl, tn, link);
if (atomic_dec_and_test(&tl->active_count))
list_del(&tl->link);
- mutex_unlock(&tl->mutex);
/* Defer the final release to after the spinlock */
if (refcount_dec_and_test(&tl->kref.refcount)) {
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index bb9a6e638175..2a72cce63fd9 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -171,7 +171,9 @@ void __i915_vm_close(struct i915_address_space *vm)
{
struct i915_vma *vma, *vn;
- mutex_lock(&vm->mutex);
+ if (!atomic_dec_and_mutex_lock(&vm->open, &vm->mutex))
+ return;
+
list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
struct drm_i915_gem_object *obj = vma->obj;
@@ -186,6 +188,7 @@ void __i915_vm_close(struct i915_address_space *vm)
i915_gem_object_put(obj);
}
GEM_BUG_ON(!list_empty(&vm->bound_list));
+
mutex_unlock(&vm->mutex);
}
@@ -484,30 +487,6 @@ void gtt_write_workarounds(struct intel_gt *gt)
}
}
-u64 gen8_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
-{
- gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
-
- if (unlikely(flags & PTE_READ_ONLY))
- pte &= ~_PAGE_RW;
-
- switch (level) {
- case I915_CACHE_NONE:
- pte |= PPAT_UNCACHED;
- break;
- case I915_CACHE_WT:
- pte |= PPAT_DISPLAY_ELLC;
- break;
- default:
- pte |= PPAT_CACHED;
- break;
- }
-
- return pte;
-}
-
static void tgl_setup_private_ppat(struct intel_uncore *uncore)
{
/* TGL doesn't support LLC or AGE settings */
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index 23004445806a..b3116fe8d180 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -429,8 +429,7 @@ static inline void
i915_vm_close(struct i915_address_space *vm)
{
GEM_BUG_ON(!atomic_read(&vm->open));
- if (atomic_dec_and_test(&vm->open))
- __i915_vm_close(vm);
+ __i915_vm_close(vm);
i915_vm_put(vm);
}
@@ -515,10 +514,6 @@ struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt);
void i915_ggtt_suspend(struct i915_ggtt *gtt);
void i915_ggtt_resume(struct i915_ggtt *ggtt);
-u64 gen8_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags);
-
int setup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p);
void cleanup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p);
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index ba31cbe8c68e..683014e7bc51 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -245,7 +245,7 @@ static void mark_eio(struct i915_request *rq)
GEM_BUG_ON(i915_request_signaled(rq));
- dma_fence_set_error(&rq->fence, -EIO);
+ i915_request_set_error_once(rq, -EIO);
i915_request_mark_complete(rq);
}
@@ -293,7 +293,7 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb)
static inline int rq_prio(const struct i915_request *rq)
{
- return rq->sched.attr.priority;
+ return READ_ONCE(rq->sched.attr.priority);
}
static int effective_prio(const struct i915_request *rq)
@@ -1004,7 +1004,7 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
i915_request_cancel_breadcrumb(rq);
spin_unlock(&rq->lock);
}
- rq->engine = owner;
+ WRITE_ONCE(rq->engine, owner);
owner->submit_request(rq);
active = NULL;
}
@@ -1316,7 +1316,7 @@ __execlists_schedule_out(struct i915_request *rq,
* If we have just completed this context, the engine may now be
* idle and we want to re-enter powersaving.
*/
- if (list_is_last(&rq->link, &ce->timeline->requests) &&
+ if (list_is_last_rcu(&rq->link, &ce->timeline->requests) &&
i915_request_completed(rq))
intel_engine_add_retire(engine, ce->timeline);
@@ -1448,6 +1448,7 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
{
struct i915_request * const *port, *rq;
struct intel_context *ce = NULL;
+ bool sentinel = false;
trace_ports(execlists, msg, execlists->pending);
@@ -1481,6 +1482,26 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
}
ce = rq->context;
+ /*
+ * Sentinels are supposed to be lonely so they flush the
+ * current exection off the HW. Check that they are the
+ * only request in the pending submission.
+ */
+ if (sentinel) {
+ GEM_TRACE_ERR("context:%llx after sentinel in pending[%zd]\n",
+ ce->timeline->fence_context,
+ port - execlists->pending);
+ return false;
+ }
+
+ sentinel = i915_request_has_sentinel(rq);
+ if (sentinel && port != execlists->pending) {
+ GEM_TRACE_ERR("sentinel context:%llx not in prime position[%zd]\n",
+ ce->timeline->fence_context,
+ port - execlists->pending);
+ return false;
+ }
+
/* Hold tightly onto the lock to prevent concurrent retires! */
if (!spin_trylock_irqsave(&rq->lock, flags))
continue;
@@ -1576,6 +1597,11 @@ static bool can_merge_ctx(const struct intel_context *prev,
return true;
}
+static unsigned long i915_request_flags(const struct i915_request *rq)
+{
+ return READ_ONCE(rq->fence.flags);
+}
+
static bool can_merge_rq(const struct i915_request *prev,
const struct i915_request *next)
{
@@ -1593,7 +1619,7 @@ static bool can_merge_rq(const struct i915_request *prev,
if (i915_request_completed(next))
return true;
- if (unlikely((prev->fence.flags ^ next->fence.flags) &
+ if (unlikely((i915_request_flags(prev) ^ i915_request_flags(next)) &
(BIT(I915_FENCE_FLAG_NOPREEMPT) |
BIT(I915_FENCE_FLAG_SENTINEL))))
return false;
@@ -1601,6 +1627,7 @@ static bool can_merge_rq(const struct i915_request *prev,
if (!can_merge_ctx(prev->context, next->context))
return false;
+ GEM_BUG_ON(i915_seqno_passed(prev->fence.seqno, next->fence.seqno));
return true;
}
@@ -1636,7 +1663,7 @@ static bool virtual_matches(const struct virtual_engine *ve,
}
static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
- struct intel_engine_cs *engine)
+ struct i915_request *rq)
{
struct intel_engine_cs *old = ve->siblings[0];
@@ -1644,22 +1671,21 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
spin_lock(&old->breadcrumbs.irq_lock);
if (!list_empty(&ve->context.signal_link)) {
- list_move_tail(&ve->context.signal_link,
- &engine->breadcrumbs.signalers);
- intel_engine_signal_breadcrumbs(engine);
- }
- spin_unlock(&old->breadcrumbs.irq_lock);
-}
+ list_del_init(&ve->context.signal_link);
-static struct i915_request *
-last_active(const struct intel_engine_execlists *execlists)
-{
- struct i915_request * const *last = READ_ONCE(execlists->active);
-
- while (*last && i915_request_completed(*last))
- last++;
+ /*
+ * We cannot acquire the new engine->breadcrumbs.irq_lock
+ * (as we are holding a breadcrumbs.irq_lock already),
+ * so attach this request to the signaler on submission.
+ * The queued irq_work will occur when we finally drop
+ * the engine->active.lock after dequeue.
+ */
+ set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags);
- return *last;
+ /* Also transfer the pending irq_work for the old breadcrumb. */
+ intel_engine_signal_breadcrumbs(rq->engine);
+ }
+ spin_unlock(&old->breadcrumbs.irq_lock);
}
#define for_each_waiter(p__, rq__) \
@@ -1668,9 +1694,9 @@ last_active(const struct intel_engine_execlists *execlists)
wait_link)
#define for_each_signaler(p__, rq__) \
- list_for_each_entry_lockless(p__, \
- &(rq__)->sched.signalers_list, \
- signal_link)
+ list_for_each_entry_rcu(p__, \
+ &(rq__)->sched.signalers_list, \
+ signal_link)
static void defer_request(struct i915_request *rq, struct list_head * const pl)
{
@@ -1735,11 +1761,9 @@ need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq)
if (!intel_engine_has_timeslices(engine))
return false;
- if (list_is_last(&rq->sched.link, &engine->active.requests))
- return false;
-
- hint = max(rq_prio(list_next_entry(rq, sched.link)),
- engine->execlists.queue_priority_hint);
+ hint = engine->execlists.queue_priority_hint;
+ if (!list_is_last(&rq->sched.link, &engine->active.requests))
+ hint = max(hint, rq_prio(list_next_entry(rq, sched.link)));
return hint >= effective_prio(rq);
}
@@ -1762,12 +1786,13 @@ timeslice(const struct intel_engine_cs *engine)
static unsigned long
active_timeslice(const struct intel_engine_cs *engine)
{
- const struct i915_request *rq = *engine->execlists.active;
+ const struct intel_engine_execlists *execlists = &engine->execlists;
+ const struct i915_request *rq = *execlists->active;
if (!rq || i915_request_completed(rq))
return 0;
- if (engine->execlists.switch_priority_hint < effective_prio(rq))
+ if (READ_ONCE(execlists->switch_priority_hint) < effective_prio(rq))
return 0;
return timeslice(engine);
@@ -1781,16 +1806,29 @@ static void set_timeslice(struct intel_engine_cs *engine)
set_timer_ms(&engine->execlists.timer, active_timeslice(engine));
}
+static void start_timeslice(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists *execlists = &engine->execlists;
+ int prio = queue_prio(execlists);
+
+ WRITE_ONCE(execlists->switch_priority_hint, prio);
+ if (prio == INT_MIN)
+ return;
+
+ if (timer_pending(&execlists->timer))
+ return;
+
+ set_timer_ms(&execlists->timer, timeslice(engine));
+}
+
static void record_preemption(struct intel_engine_execlists *execlists)
{
(void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
}
-static unsigned long active_preempt_timeout(struct intel_engine_cs *engine)
+static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
+ const struct i915_request *rq)
{
- struct i915_request *rq;
-
- rq = last_active(&engine->execlists);
if (!rq)
return 0;
@@ -1801,13 +1839,14 @@ static unsigned long active_preempt_timeout(struct intel_engine_cs *engine)
return READ_ONCE(engine->props.preempt_timeout_ms);
}
-static void set_preempt_timeout(struct intel_engine_cs *engine)
+static void set_preempt_timeout(struct intel_engine_cs *engine,
+ const struct i915_request *rq)
{
if (!intel_engine_has_preempt_reset(engine))
return;
set_timer_ms(&engine->execlists.preempt,
- active_preempt_timeout(engine));
+ active_preempt_timeout(engine, rq));
}
static inline void clear_ports(struct i915_request **ports, int count)
@@ -1820,6 +1859,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_request **port = execlists->pending;
struct i915_request ** const last_port = port + execlists->port_mask;
+ struct i915_request * const *active;
struct i915_request *last;
struct rb_node *rb;
bool submit = false;
@@ -1874,7 +1914,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* i.e. we will retrigger preemption following the ack in case
* of trouble.
*/
- last = last_active(execlists);
+ active = READ_ONCE(execlists->active);
+ while ((last = *active) && i915_request_completed(last))
+ active++;
+
if (last) {
if (need_preempt(engine, last, rb)) {
ENGINE_TRACE(engine,
@@ -1944,11 +1987,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* Even if ELSP[1] is occupied and not worthy
* of timeslices, our queue might be.
*/
- if (!execlists->timer.expires &&
- need_timeslice(engine, last))
- set_timer_ms(&execlists->timer,
- timeslice(engine));
-
+ start_timeslice(engine);
return;
}
}
@@ -1983,7 +2022,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
if (last && !can_merge_rq(last, rq)) {
spin_unlock(&ve->base.active.lock);
- return; /* leave this for another */
+ start_timeslice(engine);
+ return; /* leave this for another sibling */
}
ENGINE_TRACE(engine,
@@ -1995,13 +2035,14 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
"",
yesno(engine != ve->siblings[0]));
- ve->request = NULL;
- ve->base.execlists.queue_priority_hint = INT_MIN;
+ WRITE_ONCE(ve->request, NULL);
+ WRITE_ONCE(ve->base.execlists.queue_priority_hint,
+ INT_MIN);
rb_erase_cached(rb, &execlists->virtual);
RB_CLEAR_NODE(rb);
GEM_BUG_ON(!(rq->execution_mask & engine->mask));
- rq->engine = engine;
+ WRITE_ONCE(rq->engine, engine);
if (engine != ve->siblings[0]) {
u32 *regs = ve->context.lrc_reg_state;
@@ -2014,7 +2055,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
engine);
if (!list_empty(&ve->context.signals))
- virtual_xfer_breadcrumbs(ve, engine);
+ virtual_xfer_breadcrumbs(ve, rq);
/*
* Move the bound engine to the top of the list
@@ -2121,6 +2162,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
GEM_BUG_ON(last &&
!can_merge_ctx(last->context,
rq->context));
+ GEM_BUG_ON(last &&
+ i915_seqno_passed(last->fence.seqno,
+ rq->fence.seqno));
submit = true;
last = rq;
@@ -2159,7 +2203,7 @@ done:
* Skip if we ended up with exactly the same set of requests,
* e.g. trying to timeslice a pair of ordered contexts
*/
- if (!memcmp(execlists->active, execlists->pending,
+ if (!memcmp(active, execlists->pending,
(port - execlists->pending + 1) * sizeof(*port))) {
do
execlists_schedule_out(fetch_and_zero(port));
@@ -2170,7 +2214,7 @@ done:
clear_ports(port + 1, last_port - port);
execlists_submit_ports(engine);
- set_preempt_timeout(engine);
+ set_preempt_timeout(engine, *active);
} else {
skip_submit:
ring_set_paused(engine, 0);
@@ -2191,6 +2235,7 @@ cancel_port_requests(struct intel_engine_execlists * const execlists)
execlists_schedule_out(*port);
clear_ports(execlists->inflight, ARRAY_SIZE(execlists->inflight));
+ smp_wmb(); /* complete the seqlock for execlists_active() */
WRITE_ONCE(execlists->active, execlists->inflight);
}
@@ -2345,6 +2390,7 @@ static void process_csb(struct intel_engine_cs *engine)
/* Point active to the new ELSP; prevent overwriting */
WRITE_ONCE(execlists->active, execlists->pending);
+ smp_wmb(); /* notify execlists_active() */
/* cancel old inflight, prepare for switch */
trace_ports(execlists, "preempted", old);
@@ -2352,11 +2398,12 @@ static void process_csb(struct intel_engine_cs *engine)
execlists_schedule_out(*old++);
/* switch pending to inflight */
- WRITE_ONCE(execlists->active,
- memcpy(execlists->inflight,
- execlists->pending,
- execlists_num_ports(execlists) *
- sizeof(*execlists->pending)));
+ memcpy(execlists->inflight,
+ execlists->pending,
+ execlists_num_ports(execlists) *
+ sizeof(*execlists->pending));
+ smp_wmb(); /* complete the seqlock */
+ WRITE_ONCE(execlists->active, execlists->inflight);
WRITE_ONCE(execlists->pending[0], NULL);
} else {
@@ -2533,11 +2580,13 @@ unlock:
static bool hold_request(const struct i915_request *rq)
{
struct i915_dependency *p;
+ bool result = false;
/*
* If one of our ancestors is on hold, we must also be on hold,
* otherwise we will bypass it and execute before it.
*/
+ rcu_read_lock();
for_each_signaler(p, rq) {
const struct i915_request *s =
container_of(p->signaler, typeof(*s), sched);
@@ -2545,11 +2594,13 @@ static bool hold_request(const struct i915_request *rq)
if (s->engine != rq->engine)
continue;
- if (i915_request_on_hold(s))
- return true;
+ result = i915_request_on_hold(s);
+ if (result)
+ break;
}
+ rcu_read_unlock();
- return false;
+ return result;
}
static void __execlists_unhold(struct i915_request *rq)
@@ -2575,6 +2626,10 @@ static void __execlists_unhold(struct i915_request *rq)
struct i915_request *w =
container_of(p->waiter, typeof(*w), sched);
+ /* Propagate any change in error status */
+ if (rq->fence.error)
+ i915_request_set_error_once(w, rq->fence.error);
+
if (w->engine != rq->engine)
continue;
@@ -2962,6 +3017,7 @@ __execlists_update_reg_state(const struct intel_context *ce,
regs[CTX_RING_START] = i915_ggtt_offset(ring->vma);
regs[CTX_RING_HEAD] = head;
regs[CTX_RING_TAIL] = ring->tail;
+ regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
/* RPCS */
if (engine->class == RENDER_CLASS) {
@@ -3632,9 +3688,6 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
if (!rq)
goto unwind;
- /* We still have requests in-flight; the engine should be active */
- GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
-
ce = rq->context;
GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
@@ -3644,8 +3697,12 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
goto out_replay;
}
+ /* We still have requests in-flight; the engine should be active */
+ GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
+
/* Context has requests still in-flight; it should not be idle! */
GEM_BUG_ON(i915_active_is_idle(&ce->active));
+
rq = active_request(ce->timeline, rq);
head = intel_ring_wrap(ce->ring, rq->head);
GEM_BUG_ON(head == ce->ring->tail);
@@ -3719,7 +3776,10 @@ static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled)
static void nop_submission_tasklet(unsigned long data)
{
+ struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
+
/* The driver is wedged; don't process any more events. */
+ WRITE_ONCE(engine->execlists.queue_priority_hint, INT_MIN);
}
static void execlists_reset_cancel(struct intel_engine_cs *engine)
@@ -4115,26 +4175,6 @@ static int gen12_emit_flush_render(struct i915_request *request,
*cs++ = preparser_disable(false);
intel_ring_advance(request, cs);
-
- /*
- * Wa_1604544889:tgl
- */
- if (IS_TGL_REVID(request->i915, TGL_REVID_A0, TGL_REVID_A0)) {
- flags = 0;
- flags |= PIPE_CONTROL_CS_STALL;
- flags |= PIPE_CONTROL_HDC_PIPELINE_FLUSH;
-
- flags |= PIPE_CONTROL_STORE_DATA_INDEX;
- flags |= PIPE_CONTROL_QW_WRITE;
-
- cs = intel_ring_begin(request, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- cs = gen8_emit_pipe_control(cs, flags,
- LRC_PPHWSP_SCRATCH_ADDR);
- intel_ring_advance(request, cs);
- }
}
return 0;
@@ -4873,7 +4913,7 @@ static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
mask = rq->execution_mask;
if (unlikely(!mask)) {
/* Invalid selection, submit to a random engine in error */
- i915_request_skip(rq, -ENODEV);
+ i915_request_set_error_once(rq, -ENODEV);
mask = ve->siblings[0]->mask;
}
@@ -4887,7 +4927,7 @@ static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
static void virtual_submission_tasklet(unsigned long data)
{
struct virtual_engine * const ve = (struct virtual_engine *)data;
- const int prio = ve->base.execlists.queue_priority_hint;
+ const int prio = READ_ONCE(ve->base.execlists.queue_priority_hint);
intel_engine_mask_t mask;
unsigned int n;
@@ -5283,11 +5323,15 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
show_request(m, last, "\t\tE ");
}
- last = NULL;
- count = 0;
+ if (execlists->switch_priority_hint != INT_MIN)
+ drm_printf(m, "\t\tSwitch priority hint: %d\n",
+ READ_ONCE(execlists->switch_priority_hint));
if (execlists->queue_priority_hint != INT_MIN)
drm_printf(m, "\t\tQueue priority hint: %d\n",
- execlists->queue_priority_hint);
+ READ_ONCE(execlists->queue_priority_hint));
+
+ last = NULL;
+ count = 0;
for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
int i;
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index bef132709854..3847ee44b181 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -7,6 +7,7 @@
#include <linux/pm_runtime.h>
#include "i915_drv.h"
+#include "i915_vgpu.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
#include "intel_rc6.h"
@@ -319,10 +320,10 @@ static int vlv_rc6_init(struct intel_rc6 *rc6)
return PTR_ERR(pctx);
}
- GEM_BUG_ON(range_overflows_t(u64,
- i915->dsm.start,
- pctx->stolen->start,
- U32_MAX));
+ GEM_BUG_ON(range_overflows_end_t(u64,
+ i915->dsm.start,
+ pctx->stolen->start,
+ U32_MAX));
pctx_paddr = i915->dsm.start + pctx->stolen->start;
intel_uncore_write(uncore, VLV_PCBR, pctx_paddr);
@@ -602,6 +603,7 @@ void intel_rc6_unpark(struct intel_rc6 *rc6)
void intel_rc6_park(struct intel_rc6 *rc6)
{
struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ unsigned int target;
if (!rc6->enabled)
return;
@@ -616,7 +618,14 @@ void intel_rc6_park(struct intel_rc6 *rc6)
/* Turn off the HW timers and go directly to rc6 */
set(uncore, GEN6_RC_CONTROL, GEN6_RC_CTL_RC6_ENABLE);
- set(uncore, GEN6_RC_STATE, 0x4 << RC_SW_TARGET_STATE_SHIFT);
+
+ if (HAS_RC6pp(rc6_to_i915(rc6)))
+ target = 0x6; /* deepest rc6 */
+ else if (HAS_RC6p(rc6_to_i915(rc6)))
+ target = 0x5; /* deep rc6 */
+ else
+ target = 0x4; /* normal rc6 */
+ set(uncore, GEN6_RC_STATE, target << RC_SW_TARGET_STATE_SHIFT);
}
void intel_rc6_disable(struct intel_rc6 *rc6)
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index aef6ab58d7d9..80db3c9d785e 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -48,8 +48,10 @@ static void engine_skip_context(struct i915_request *rq)
lockdep_assert_held(&engine->active.lock);
list_for_each_entry_continue(rq, &engine->active.requests, sched.link)
- if (rq->context == hung_ctx)
- i915_request_skip(rq, -EIO);
+ if (rq->context == hung_ctx) {
+ i915_request_set_error_once(rq, -EIO);
+ __i915_request_skip(rq);
+ }
}
static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
@@ -86,19 +88,18 @@ static bool mark_guilty(struct i915_request *rq)
bool banned;
int i;
+ if (intel_context_is_closed(rq->context)) {
+ intel_context_set_banned(rq->context);
+ return true;
+ }
+
rcu_read_lock();
ctx = rcu_dereference(rq->context->gem_context);
if (ctx && !kref_get_unless_zero(&ctx->ref))
ctx = NULL;
rcu_read_unlock();
if (!ctx)
- return false;
-
- if (i915_gem_context_is_closed(ctx)) {
- intel_context_set_banned(rq->context);
- banned = true;
- goto out;
- }
+ return intel_context_is_banned(rq->context);
atomic_inc(&ctx->guilty_count);
@@ -154,11 +155,12 @@ void __i915_request_reset(struct i915_request *rq, bool guilty)
rcu_read_lock(); /* protect the GEM context */
if (guilty) {
- i915_request_skip(rq, -EIO);
+ i915_request_set_error_once(rq, -EIO);
+ __i915_request_skip(rq);
if (mark_guilty(rq))
engine_skip_context(rq);
} else {
- dma_fence_set_error(&rq->fence, -EAGAIN);
+ i915_request_set_error_once(rq, -EAGAIN);
mark_innocent(rq);
}
rcu_read_unlock();
@@ -785,7 +787,7 @@ static void nop_submit_request(struct i915_request *request)
unsigned long flags;
RQ_TRACE(request, "-EIO\n");
- dma_fence_set_error(&request->fence, -EIO);
+ i915_request_set_error_once(request, -EIO);
spin_lock_irqsave(&engine->active.lock, flags);
__i915_request_submit(request);
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index f70b903a98bc..fdc3f10e12aa 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -29,11 +29,10 @@
#include <linux/log2.h>
-#include <drm/i915_drm.h>
-
#include "gem/i915_gem_context.h"
#include "gen6_ppgtt.h"
+#include "gen7_renderclear.h"
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_context.h"
@@ -897,9 +896,7 @@ static void reset_cancel(struct intel_engine_cs *engine)
/* Mark all submitted requests as skipped. */
list_for_each_entry(request, &engine->active.requests, sched.link) {
- if (!i915_request_signaled(request))
- dma_fence_set_error(&request->fence, -EIO);
-
+ i915_request_set_error_once(request, -EIO);
i915_request_mark_complete(request);
}
@@ -1360,7 +1357,9 @@ static int load_pd_dir(struct i915_request *rq,
return rq->engine->emit_flush(rq, EMIT_FLUSH);
}
-static inline int mi_set_context(struct i915_request *rq, u32 flags)
+static inline int mi_set_context(struct i915_request *rq,
+ struct intel_context *ce,
+ u32 flags)
{
struct drm_i915_private *i915 = rq->i915;
struct intel_engine_cs *engine = rq->engine;
@@ -1435,7 +1434,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
*cs++ = MI_NOOP;
*cs++ = MI_SET_CONTEXT;
- *cs++ = i915_ggtt_offset(rq->context->state) | flags;
+ *cs++ = i915_ggtt_offset(ce->state) | flags;
/*
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
* WaMiSetContext_Hang:snb,ivb,vlv
@@ -1550,13 +1549,56 @@ static int switch_mm(struct i915_request *rq, struct i915_address_space *vm)
return rq->engine->emit_flush(rq, EMIT_INVALIDATE);
}
+static int clear_residuals(struct i915_request *rq)
+{
+ struct intel_engine_cs *engine = rq->engine;
+ int ret;
+
+ ret = switch_mm(rq, vm_alias(engine->kernel_context->vm));
+ if (ret)
+ return ret;
+
+ if (engine->kernel_context->state) {
+ ret = mi_set_context(rq,
+ engine->kernel_context,
+ MI_MM_SPACE_GTT | MI_RESTORE_INHIBIT);
+ if (ret)
+ return ret;
+ }
+
+ ret = engine->emit_bb_start(rq,
+ engine->wa_ctx.vma->node.start, 0,
+ 0);
+ if (ret)
+ return ret;
+
+ ret = engine->emit_flush(rq, EMIT_FLUSH);
+ if (ret)
+ return ret;
+
+ /* Always invalidate before the next switch_mm() */
+ return engine->emit_flush(rq, EMIT_INVALIDATE);
+}
+
static int switch_context(struct i915_request *rq)
{
+ struct intel_engine_cs *engine = rq->engine;
struct intel_context *ce = rq->context;
+ void **residuals = NULL;
int ret;
GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
+ if (engine->wa_ctx.vma && ce != engine->kernel_context) {
+ if (engine->wa_ctx.vma->private != ce) {
+ ret = clear_residuals(rq);
+ if (ret)
+ return ret;
+
+ residuals = &engine->wa_ctx.vma->private;
+ }
+ }
+
ret = switch_mm(rq, vm_alias(ce->vm));
if (ret)
return ret;
@@ -1564,7 +1606,7 @@ static int switch_context(struct i915_request *rq)
if (ce->state) {
u32 flags;
- GEM_BUG_ON(rq->engine->id != RCS0);
+ GEM_BUG_ON(engine->id != RCS0);
/* For resource streamer on HSW+ and power context elsewhere */
BUILD_BUG_ON(HSW_MI_RS_SAVE_STATE_EN != MI_SAVE_EXT_STATE_EN);
@@ -1576,7 +1618,7 @@ static int switch_context(struct i915_request *rq)
else
flags |= MI_RESTORE_INHIBIT;
- ret = mi_set_context(rq, flags);
+ ret = mi_set_context(rq, ce, flags);
if (ret)
return ret;
}
@@ -1585,6 +1627,20 @@ static int switch_context(struct i915_request *rq)
if (ret)
return ret;
+ /*
+ * Now past the point of no return, this request _will_ be emitted.
+ *
+ * Or at least this preamble will be emitted, the request may be
+ * interrupted prior to submitting the user payload. If so, we
+ * still submit the "empty" request in order to preserve global
+ * state tracking such as this, our tracking of the current
+ * dirty context.
+ */
+ if (residuals) {
+ intel_context_put(*residuals);
+ *residuals = intel_context_get(ce);
+ }
+
return 0;
}
@@ -1769,6 +1825,11 @@ static void ring_release(struct intel_engine_cs *engine)
intel_engine_cleanup_common(engine);
+ if (engine->wa_ctx.vma) {
+ intel_context_put(engine->wa_ctx.vma->private);
+ i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0);
+ }
+
intel_ring_unpin(engine->legacy.ring);
intel_ring_put(engine->legacy.ring);
@@ -1916,6 +1977,64 @@ static void setup_vecs(struct intel_engine_cs *engine)
engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb;
}
+static int gen7_ctx_switch_bb_setup(struct intel_engine_cs * const engine,
+ struct i915_vma * const vma)
+{
+ return gen7_setup_clear_gpr_bb(engine, vma);
+}
+
+static int gen7_ctx_switch_bb_init(struct intel_engine_cs *engine)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int size;
+ int err;
+
+ size = gen7_ctx_switch_bb_setup(engine, NULL /* probe size */);
+ if (size <= 0)
+ return size;
+
+ size = ALIGN(size, PAGE_SIZE);
+ obj = i915_gem_object_create_internal(engine->i915, size);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_vma_instance(obj, engine->gt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ vma->private = intel_context_create(engine); /* dummy residuals */
+ if (IS_ERR(vma->private)) {
+ err = PTR_ERR(vma->private);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_HIGH);
+ if (err)
+ goto err_private;
+
+ err = i915_vma_sync(vma);
+ if (err)
+ goto err_unpin;
+
+ err = gen7_ctx_switch_bb_setup(engine, vma);
+ if (err)
+ goto err_unpin;
+
+ engine->wa_ctx.vma = vma;
+ return 0;
+
+err_unpin:
+ i915_vma_unpin(vma);
+err_private:
+ intel_context_put(vma->private);
+err_obj:
+ i915_gem_object_put(obj);
+ return err;
+}
+
int intel_ring_submission_setup(struct intel_engine_cs *engine)
{
struct intel_timeline *timeline;
@@ -1969,11 +2088,19 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine)
GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma);
+ if (IS_HASWELL(engine->i915) && engine->class == RENDER_CLASS) {
+ err = gen7_ctx_switch_bb_init(engine);
+ if (err)
+ goto err_ring_unpin;
+ }
+
/* Finally, take ownership and responsibility for cleanup! */
engine->release = ring_release;
return 0;
+err_ring_unpin:
+ intel_ring_unpin(ring);
err_ring:
intel_ring_put(ring);
err_timeline_unpin:
@@ -1984,3 +2111,7 @@ err:
intel_engine_cleanup_common(engine);
return err;
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_ring_submission.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 30ae29b30f11..cfaf141bac4d 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -4,6 +4,8 @@
* Copyright © 2019 Intel Corporation
*/
+#include <drm/i915_drm.h>
+
#include "i915_drv.h"
#include "intel_gt.h"
#include "intel_gt_irq.h"
@@ -55,7 +57,7 @@ static u32 rps_pm_mask(struct intel_rps *rps, u8 val)
if (val < rps->max_freq_softlimit)
mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
- mask &= rps->pm_events;
+ mask &= READ_ONCE(rps->pm_events);
return rps_pm_sanitize_mask(rps, ~mask);
}
@@ -68,17 +70,19 @@ static void rps_reset_ei(struct intel_rps *rps)
static void rps_enable_interrupts(struct intel_rps *rps)
{
struct intel_gt *gt = rps_to_gt(rps);
+ u32 events;
rps_reset_ei(rps);
if (IS_VALLEYVIEW(gt->i915))
/* WaGsvRC0ResidencyMethod:vlv */
- rps->pm_events = GEN6_PM_RP_UP_EI_EXPIRED;
+ events = GEN6_PM_RP_UP_EI_EXPIRED;
else
- rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD |
- GEN6_PM_RP_DOWN_THRESHOLD |
- GEN6_PM_RP_DOWN_TIMEOUT);
+ events = (GEN6_PM_RP_UP_THRESHOLD |
+ GEN6_PM_RP_DOWN_THRESHOLD |
+ GEN6_PM_RP_DOWN_TIMEOUT);
+ WRITE_ONCE(rps->pm_events, events);
spin_lock_irq(&gt->irq_lock);
gen6_gt_pm_enable_irq(gt, rps->pm_events);
spin_unlock_irq(&gt->irq_lock);
@@ -115,8 +119,7 @@ static void rps_disable_interrupts(struct intel_rps *rps)
{
struct intel_gt *gt = rps_to_gt(rps);
- rps->pm_events = 0;
-
+ WRITE_ONCE(rps->pm_events, 0);
set(gt->uncore, GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u));
spin_lock_irq(&gt->irq_lock);
@@ -642,7 +645,7 @@ void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive)
{
mutex_lock(&rps->power.mutex);
if (interactive) {
- if (!rps->power.interactive++ && rps->active)
+ if (!rps->power.interactive++ && READ_ONCE(rps->active))
rps_set_power(rps, HIGH_POWER);
} else {
GEM_BUG_ON(!rps->power.interactive);
@@ -719,11 +722,15 @@ void intel_rps_unpark(struct intel_rps *rps)
* performance, jump directly to RPe as our starting frequency.
*/
mutex_lock(&rps->lock);
- rps->active = true;
+
+ WRITE_ONCE(rps->active, true);
+
freq = max(rps->cur_freq, rps->efficient_freq),
freq = clamp(freq, rps->min_freq_softlimit, rps->max_freq_softlimit);
intel_rps_set(rps, freq);
+
rps->last_adj = 0;
+
mutex_unlock(&rps->lock);
if (INTEL_GEN(rps_to_i915(rps)) >= 6)
@@ -743,7 +750,7 @@ void intel_rps_park(struct intel_rps *rps)
if (INTEL_GEN(i915) >= 6)
rps_disable_interrupts(rps);
- rps->active = false;
+ WRITE_ONCE(rps->active, false);
if (rps->last_freq <= rps->idle_freq)
return;
@@ -763,14 +770,27 @@ void intel_rps_park(struct intel_rps *rps)
intel_uncore_forcewake_get(rps_to_uncore(rps), FORCEWAKE_MEDIA);
rps_set(rps, rps->idle_freq, false);
intel_uncore_forcewake_put(rps_to_uncore(rps), FORCEWAKE_MEDIA);
+
+ /*
+ * Since we will try and restart from the previously requested
+ * frequency on unparking, treat this idle point as a downclock
+ * interrupt and reduce the frequency for resume. If we park/unpark
+ * more frequently than the rps worker can run, we will not respond
+ * to any EI and never see a change in frequency.
+ *
+ * (Note we accommodate Cherryview's limitation of only using an
+ * even bin by applying it to all.)
+ */
+ rps->cur_freq =
+ max_t(int, round_down(rps->cur_freq - 1, 2), rps->min_freq);
}
void intel_rps_boost(struct i915_request *rq)
{
- struct intel_rps *rps = &rq->engine->gt->rps;
+ struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps;
unsigned long flags;
- if (i915_request_signaled(rq) || !rps->active)
+ if (i915_request_signaled(rq) || !READ_ONCE(rps->active))
return;
/* Serializes with i915_request_retire() */
@@ -1453,12 +1473,12 @@ static void rps_work(struct work_struct *work)
u32 pm_iir = 0;
spin_lock_irq(&gt->irq_lock);
- pm_iir = fetch_and_zero(&rps->pm_iir);
+ pm_iir = fetch_and_zero(&rps->pm_iir) & READ_ONCE(rps->pm_events);
client_boost = atomic_read(&rps->num_waiters);
spin_unlock_irq(&gt->irq_lock);
/* Make sure we didn't queue anything we're not going to process. */
- if ((pm_iir & rps->pm_events) == 0 && !client_boost)
+ if (!pm_iir && !client_boost)
goto out;
mutex_lock(&rps->lock);
@@ -1554,11 +1574,15 @@ void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
{
struct intel_gt *gt = rps_to_gt(rps);
+ u32 events;
- if (pm_iir & rps->pm_events) {
+ events = pm_iir & READ_ONCE(rps->pm_events);
+ if (events) {
spin_lock(&gt->irq_lock);
- gen6_gt_pm_mask_irq(gt, pm_iir & rps->pm_events);
- rps->pm_iir |= pm_iir & rps->pm_events;
+
+ gen6_gt_pm_mask_irq(gt, events);
+ rps->pm_iir |= events;
+
schedule_work(&rps->work);
spin_unlock(&gt->irq_lock);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 54e1e55f3c81..91debbc97c9a 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -192,11 +192,15 @@ static void cacheline_release(struct intel_timeline_cacheline *cl)
static void cacheline_free(struct intel_timeline_cacheline *cl)
{
+ if (!i915_active_acquire_if_busy(&cl->active)) {
+ __idle_cacheline_free(cl);
+ return;
+ }
+
GEM_BUG_ON(ptr_test_bit(cl->vaddr, CACHELINE_FREE));
cl->vaddr = ptr_set_bit(cl->vaddr, CACHELINE_FREE);
- if (i915_active_is_idle(&cl->active))
- __idle_cacheline_free(cl);
+ i915_active_release(&cl->active);
}
int intel_timeline_init(struct intel_timeline *timeline,
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 887e0dc701f7..5176ad1a3976 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -575,29 +575,46 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
/* allow headerless messages for preemptible GPGPU context */
WA_SET_BIT_MASKED(GEN10_SAMPLER_MODE,
GEN11_SAMPLER_ENABLE_HEADLESS_MSG);
+
+ /* Wa_1604278689:icl,ehl */
+ wa_write(wal, IVB_FBC_RT_BASE, 0xFFFFFFFF & ~ILK_FBC_RT_VALID);
+ wa_write_masked_or(wal, IVB_FBC_RT_BASE_UPPER,
+ 0, /* write-only register; skip validation */
+ 0xFFFFFFFF);
+
+ /* Wa_1406306137:icl,ehl */
+ wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN11_DIS_PICK_2ND_EU);
}
static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
- u32 val;
-
- /* Wa_1409142259:tgl */
+ /*
+ * Wa_1409142259:tgl
+ * Wa_1409347922:tgl
+ * Wa_1409252684:tgl
+ * Wa_1409217633:tgl
+ * Wa_1409207793:tgl
+ * Wa_1409178076:tgl
+ * Wa_1408979724:tgl
+ */
WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
- /* Wa_1604555607:tgl */
- val = intel_uncore_read(engine->uncore, FF_MODE2);
- val &= ~FF_MODE2_TDS_TIMER_MASK;
- val |= FF_MODE2_TDS_TIMER_128;
/*
- * FIXME: FF_MODE2 register is not readable till TGL B0. We can
- * enable verification of WA from the later steppings, which enables
- * the read of FF_MODE2.
+ * Wa_1604555607:gen12 and Wa_1608008084:gen12
+ * FF_MODE2 register will return the wrong value when read. The default
+ * value for this register is zero for all fields and there are no bit
+ * masks. So instead of doing a RMW we should just write the TDS timer
+ * value for Wa_1604555607.
*/
- wa_add(wal, FF_MODE2, FF_MODE2_TDS_TIMER_MASK, val,
- IS_TGL_REVID(engine->i915, TGL_REVID_A0, TGL_REVID_A0) ? 0 :
- FF_MODE2_TDS_TIMER_MASK);
+ wa_add(wal, FF_MODE2, FF_MODE2_TDS_TIMER_MASK,
+ FF_MODE2_TDS_TIMER_128, 0);
+
+ /* WaDisableGPGPUMidThreadPreemption:tgl */
+ WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
+ GEN9_PREEMPT_GPGPU_LEVEL_MASK,
+ GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
}
static void
@@ -903,11 +920,6 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
SLICE_UNIT_LEVEL_CLKGATE,
MSCUNIT_CLKGATE_DIS);
- /* Wa_1406680159:icl */
- wa_write_or(wal,
- SUBSLICE_UNIT_LEVEL_CLKGATE,
- GWUNIT_CLKGATE_DIS);
-
/* Wa_1406838659:icl (pre-prod) */
if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
wa_write_or(wal,
@@ -936,7 +948,7 @@ tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
SUBSLICE_UNIT_LEVEL_CLKGATE2,
CPSSUNIT_CLKGATE_DIS);
- /* Wa_1409180338:tgl */
+ /* Wa_1607087056:tgl also know as BUG:1409180338 */
if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
wa_write_or(wal,
SLICE_UNIT_LEVEL_CLKGATE,
@@ -1251,6 +1263,7 @@ static void tgl_whitelist_build(struct intel_engine_cs *engine)
case RENDER_CLASS:
/*
* WaAllowPMDepthAndInvocationCountAccessFromUMD:tgl
+ * Wa_1408556865:tgl
*
* This covers 4 registers which are next to one another :
* - PS_INVOCATION_COUNT
@@ -1264,6 +1277,9 @@ static void tgl_whitelist_build(struct intel_engine_cs *engine)
/* Wa_1808121037:tgl */
whitelist_reg(w, GEN7_COMMON_SLICE_CHICKEN1);
+
+ /* Wa_1806527549:tgl */
+ whitelist_reg(w, HIZ_CHICKEN);
break;
default:
break;
@@ -1330,19 +1346,21 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
struct drm_i915_private *i915 = engine->i915;
if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) {
- /* Wa_1606700617:tgl */
- wa_masked_en(wal,
- GEN9_CS_DEBUG_MODE1,
- FF_DOP_CLOCK_GATE_DISABLE);
-
- /* Wa_1607138336:tgl */
+ /*
+ * Wa_1607138336:tgl
+ * Wa_1607063988:tgl
+ */
wa_write_or(wal,
GEN9_CTX_PREEMPT_REG,
GEN12_DISABLE_POSH_BUSY_FF_DOP_CG);
- /* Wa_1607030317:tgl */
- /* Wa_1607186500:tgl */
- /* Wa_1607297627:tgl */
+ /*
+ * Wa_1607030317:tgl
+ * Wa_1607186500:tgl
+ * Wa_1607297627:tgl there is 3 entries for this WA on BSpec, 2
+ * of then says it is fixed on B0 the other one says it is
+ * permanent
+ */
wa_masked_en(wal,
GEN6_RC_SLEEP_PSMI_CONTROL,
GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
@@ -1361,10 +1379,29 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN7_FF_THREAD_MODE,
GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
+ /*
+ * Wa_1409085225:tgl
+ * Wa_14010229206:tgl
+ */
+ wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
+
+ /* Wa_1408615072:tgl */
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
+ VSUNIT_CLKGATE_DIS_TGL);
+ }
+
+ if (IS_TIGERLAKE(i915)) {
/* Wa_1606931601:tgl */
+ wa_masked_en(wal, GEN7_ROW_CHICKEN2, GEN12_DISABLE_EARLY_READ);
+
+ /* Wa_1409804808:tgl */
+ wa_masked_en(wal, GEN7_ROW_CHICKEN2,
+ GEN12_PUSH_CONST_DEREF_HOLD_DIS);
+
+ /* Wa_1606700617:tgl */
wa_masked_en(wal,
- GEN7_ROW_CHICKEN2,
- GEN12_DISABLE_EARLY_READ);
+ GEN9_CS_DEBUG_MODE1,
+ FF_DOP_CLOCK_GATE_DISABLE);
}
if (IS_GEN(i915, 11)) {
@@ -1430,10 +1467,38 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN11_SCRATCH2,
GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE,
0);
+
+ /* WaEnable32PlaneMode:icl */
+ wa_masked_en(wal, GEN9_CSFE_CHICKEN1_RCS,
+ GEN11_ENABLE_32_PLANE_MODE);
+
+ /*
+ * Wa_1408615072:icl,ehl (vsunit)
+ * Wa_1407596294:icl,ehl (hsunit)
+ */
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
+ VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
+
+ /* Wa_1407352427:icl,ehl */
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
+ PSDUNIT_CLKGATE_DIS);
+
+ /* Wa_1406680159:icl,ehl */
+ wa_write_or(wal,
+ SUBSLICE_UNIT_LEVEL_CLKGATE,
+ GWUNIT_CLKGATE_DIS);
+
+ /*
+ * Wa_1408767742:icl[a2..forever],ehl[all]
+ * Wa_1605460711:icl[a0..c0]
+ */
+ wa_write_or(wal,
+ GEN7_FF_THREAD_MODE,
+ GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
}
- if (IS_GEN_RANGE(i915, 9, 11)) {
- /* FtrPerCtxtPreemptionGranularityControl:skl,bxt,kbl,cfl,cnl,icl */
+ if (IS_GEN_RANGE(i915, 9, 12)) {
+ /* FtrPerCtxtPreemptionGranularityControl:skl,bxt,kbl,cfl,cnl,icl,tgl */
wa_masked_en(wal,
GEN7_FF_SLICE_CS_CHICKEN1,
GEN9_FFSC_PERCTX_PREEMPT_CTRL);
@@ -1600,15 +1665,34 @@ err_obj:
return ERR_PTR(err);
}
+static const struct {
+ u32 start;
+ u32 end;
+} mcr_ranges_gen8[] = {
+ { .start = 0x5500, .end = 0x55ff },
+ { .start = 0x7000, .end = 0x7fff },
+ { .start = 0x9400, .end = 0x97ff },
+ { .start = 0xb000, .end = 0xb3ff },
+ { .start = 0xe000, .end = 0xe7ff },
+ {},
+};
+
static bool mcr_range(struct drm_i915_private *i915, u32 offset)
{
+ int i;
+
+ if (INTEL_GEN(i915) < 8)
+ return false;
+
/*
- * Registers in this range are affected by the MCR selector
+ * Registers in these ranges are affected by the MCR selector
* which only controls CPU initiated MMIO. Routing does not
* work for CS access so we cannot verify them on this path.
*/
- if (INTEL_GEN(i915) >= 8 && (offset >= 0xb000 && offset <= 0xb4ff))
- return true;
+ for (i = 0; mcr_ranges_gen8[i].start; i++)
+ if (offset >= mcr_ranges_gen8[i].start &&
+ offset <= mcr_ranges_gen8[i].end)
+ return true;
return false;
}
diff --git a/drivers/gpu/drm/i915/gt/ivb_clear_kernel.c b/drivers/gpu/drm/i915/gt/ivb_clear_kernel.c
new file mode 100644
index 000000000000..610ca7687735
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/ivb_clear_kernel.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ *
+ * Generated by: IGT Gpu Tools on Fri 21 Feb 2020 05:29:32 AM UTC
+ */
+
+static const u32 ivb_clear_kernel[] = {
+ 0x00000001, 0x26020128, 0x00000024, 0x00000000,
+ 0x00000040, 0x20280c21, 0x00000028, 0x00000001,
+ 0x01000010, 0x20000c20, 0x0000002c, 0x00000000,
+ 0x00010220, 0x34001c00, 0x00001400, 0x0000002c,
+ 0x00600001, 0x20600061, 0x00000000, 0x00000000,
+ 0x00000008, 0x20601c85, 0x00000e00, 0x0000000c,
+ 0x00000005, 0x20601ca5, 0x00000060, 0x00000001,
+ 0x00000008, 0x20641c85, 0x00000e00, 0x0000000d,
+ 0x00000005, 0x20641ca5, 0x00000064, 0x00000003,
+ 0x00000041, 0x207424a5, 0x00000064, 0x00000034,
+ 0x00000040, 0x206014a5, 0x00000060, 0x00000074,
+ 0x00000008, 0x20681c85, 0x00000e00, 0x00000008,
+ 0x00000005, 0x20681ca5, 0x00000068, 0x0000000f,
+ 0x00000041, 0x20701ca5, 0x00000060, 0x00000010,
+ 0x00000040, 0x206814a5, 0x00000068, 0x00000070,
+ 0x00600001, 0x20a00061, 0x00000000, 0x00000000,
+ 0x00000005, 0x206c1c85, 0x00000e00, 0x00000007,
+ 0x00000041, 0x206c1ca5, 0x0000006c, 0x00000004,
+ 0x00600001, 0x20800021, 0x008d0000, 0x00000000,
+ 0x00000001, 0x20800021, 0x0000006c, 0x00000000,
+ 0x00000001, 0x20840021, 0x00000068, 0x00000000,
+ 0x00000001, 0x20880061, 0x00000000, 0x00000003,
+ 0x00000005, 0x208c0d21, 0x00000086, 0xffffffff,
+ 0x05600032, 0x20a00fa1, 0x008d0080, 0x02190001,
+ 0x00000040, 0x20a01ca5, 0x000000a0, 0x00000001,
+ 0x05600032, 0x20a00fa1, 0x008d0080, 0x040a8001,
+ 0x02000040, 0x20281c21, 0x00000028, 0xffffffff,
+ 0x00010220, 0x34001c00, 0x00001400, 0xfffffffc,
+ 0x00000001, 0x26020128, 0x00000024, 0x00000000,
+ 0x00000001, 0x220010e4, 0x00000000, 0x00000000,
+ 0x00000001, 0x220831ec, 0x00000000, 0x007f007f,
+ 0x00600001, 0x20400021, 0x008d0000, 0x00000000,
+ 0x00600001, 0x2fe00021, 0x008d0000, 0x00000000,
+ 0x00200001, 0x20400121, 0x00450020, 0x00000000,
+ 0x00000001, 0x20480061, 0x00000000, 0x000f000f,
+ 0x00000005, 0x204c0d21, 0x00000046, 0xffffffef,
+ 0x00800001, 0x20600061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20800061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20a00061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20c00061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20e00061, 0x00000000, 0x00000000,
+ 0x00800001, 0x21000061, 0x00000000, 0x00000000,
+ 0x00800001, 0x21200061, 0x00000000, 0x00000000,
+ 0x00800001, 0x21400061, 0x00000000, 0x00000000,
+ 0x05600032, 0x20000fa0, 0x008d0040, 0x120a8000,
+ 0x00000040, 0x20402d21, 0x00000020, 0x00100010,
+ 0x05600032, 0x20000fa0, 0x008d0040, 0x120a8000,
+ 0x02000040, 0x22083d8c, 0x00000208, 0xffffffff,
+ 0x00800001, 0xa0000109, 0x00000602, 0x00000000,
+ 0x00000040, 0x22001c84, 0x00000200, 0x00000020,
+ 0x00010220, 0x34001c00, 0x00001400, 0xfffffff8,
+ 0x07600032, 0x20000fa0, 0x008d0fe0, 0x82000010,
+};
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 5633515c12e9..4a53ded7c2dd 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -244,9 +244,7 @@ static void mock_reset_cancel(struct intel_engine_cs *engine)
/* Mark all submitted requests as skipped. */
list_for_each_entry(request, &engine->active.requests, sched.link) {
- if (!i915_request_signaled(request))
- dma_fence_set_error(&request->fence, -EIO);
-
+ i915_request_set_error_once(request, -EIO);
i915_request_mark_complete(request);
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
index 43d4d589749f..697114dd1f47 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
@@ -142,6 +142,24 @@ out:
return err;
}
+static void engine_heartbeat_disable(struct intel_engine_cs *engine,
+ unsigned long *saved)
+{
+ *saved = engine->props.heartbeat_interval_ms;
+ engine->props.heartbeat_interval_ms = 0;
+
+ intel_engine_pm_get(engine);
+ intel_engine_park_heartbeat(engine);
+}
+
+static void engine_heartbeat_enable(struct intel_engine_cs *engine,
+ unsigned long saved)
+{
+ intel_engine_pm_put(engine);
+
+ engine->props.heartbeat_interval_ms = saved;
+}
+
static int live_idle_flush(void *arg)
{
struct intel_gt *gt = arg;
@@ -152,9 +170,11 @@ static int live_idle_flush(void *arg)
/* Check that we can flush the idle barriers */
for_each_engine(engine, gt, id) {
- intel_engine_pm_get(engine);
+ unsigned long heartbeat;
+
+ engine_heartbeat_disable(engine, &heartbeat);
err = __live_idle_pulse(engine, intel_engine_flush_barriers);
- intel_engine_pm_put(engine);
+ engine_heartbeat_enable(engine, heartbeat);
if (err)
break;
}
@@ -172,9 +192,11 @@ static int live_idle_pulse(void *arg)
/* Check that heartbeat pulses flush the idle barriers */
for_each_engine(engine, gt, id) {
- intel_engine_pm_get(engine);
+ unsigned long heartbeat;
+
+ engine_heartbeat_disable(engine, &heartbeat);
err = __live_idle_pulse(engine, intel_engine_pulse);
- intel_engine_pm_put(engine);
+ engine_heartbeat_enable(engine, heartbeat);
if (err && err != -ENODEV)
break;
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 3e5e6c86e843..2b2efff6e19d 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -268,7 +268,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
cancel_rq:
if (err) {
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
i915_request_add(rq);
}
unpin_hws:
@@ -1640,7 +1640,7 @@ static int igt_reset_engines_atomic(void *arg)
if (!intel_has_reset_engine(gt))
return 0;
- if (USES_GUC_SUBMISSION(gt->i915))
+ if (intel_uc_uses_guc_submission(&gt->uc))
return 0;
igt_global_reset_lock(gt);
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 49b93cda04ca..6f06ba750a0a 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -90,6 +90,49 @@ static int wait_for_submit(struct intel_engine_cs *engine,
return -ETIME;
}
+static int wait_for_reset(struct intel_engine_cs *engine,
+ struct i915_request *rq,
+ unsigned long timeout)
+{
+ timeout += jiffies;
+
+ do {
+ cond_resched();
+ intel_engine_flush_submission(engine);
+
+ if (READ_ONCE(engine->execlists.pending[0]))
+ continue;
+
+ if (i915_request_completed(rq))
+ break;
+
+ if (READ_ONCE(rq->fence.error))
+ break;
+ } while (time_before(jiffies, timeout));
+
+ flush_scheduled_work();
+
+ if (rq->fence.error != -EIO) {
+ pr_err("%s: hanging request %llx:%lld not reset\n",
+ engine->name,
+ rq->fence.context,
+ rq->fence.seqno);
+ return -EINVAL;
+ }
+
+ /* Give the request a jiffie to complete after flushing the worker */
+ if (i915_request_wait(rq, 0,
+ max(0l, (long)(timeout - jiffies)) + 1) < 0) {
+ pr_err("%s: hanging request %llx:%lld did not complete\n",
+ engine->name,
+ rq->fence.context,
+ rq->fence.seqno);
+ return -ETIME;
+ }
+
+ return 0;
+}
+
static int live_sanitycheck(void *arg)
{
struct intel_gt *gt = arg;
@@ -1805,14 +1848,9 @@ static int __cancel_active0(struct live_preempt_cancel *arg)
if (err)
goto out;
- if (i915_request_wait(rq, 0, HZ / 5) < 0) {
- err = -EIO;
- goto out;
- }
-
- if (rq->fence.error != -EIO) {
- pr_err("Cancelled inflight0 request did not report -EIO\n");
- err = -EINVAL;
+ err = wait_for_reset(arg->engine, rq, HZ / 2);
+ if (err) {
+ pr_err("Cancelled inflight0 request did not reset\n");
goto out;
}
@@ -1870,10 +1908,9 @@ static int __cancel_active1(struct live_preempt_cancel *arg)
goto out;
igt_spinner_end(&arg->a.spin);
- if (i915_request_wait(rq[1], 0, HZ / 5) < 0) {
- err = -EIO;
+ err = wait_for_reset(arg->engine, rq[1], HZ / 2);
+ if (err)
goto out;
- }
if (rq[0]->fence.error != 0) {
pr_err("Normal inflight0 request did not complete\n");
@@ -1953,10 +1990,9 @@ static int __cancel_queued(struct live_preempt_cancel *arg)
if (err)
goto out;
- if (i915_request_wait(rq[2], 0, HZ / 5) < 0) {
- err = -EIO;
+ err = wait_for_reset(arg->engine, rq[2], HZ / 2);
+ if (err)
goto out;
- }
if (rq[0]->fence.error != -EIO) {
pr_err("Cancelled inflight0 request did not report -EIO\n");
@@ -2014,14 +2050,9 @@ static int __cancel_hostile(struct live_preempt_cancel *arg)
if (err)
goto out;
- if (i915_request_wait(rq, 0, HZ / 5) < 0) {
- err = -EIO;
- goto out;
- }
-
- if (rq->fence.error != -EIO) {
- pr_err("Cancelled inflight0 request did not report -EIO\n");
- err = -EINVAL;
+ err = wait_for_reset(arg->engine, rq, HZ / 2);
+ if (err) {
+ pr_err("Cancelled inflight0 request did not reset\n");
goto out;
}
@@ -2109,7 +2140,7 @@ static int live_suppress_self_preempt(void *arg)
if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
return 0;
- if (USES_GUC_SUBMISSION(gt->i915))
+ if (intel_uc_uses_guc_submission(&gt->uc))
return 0; /* presume black blox */
if (intel_vgpu_active(gt->i915))
@@ -3224,7 +3255,7 @@ static int live_virtual_engine(void *arg)
unsigned int class, inst;
int err;
- if (USES_GUC_SUBMISSION(gt->i915))
+ if (intel_uc_uses_guc_submission(&gt->uc))
return 0;
for_each_engine(engine, gt, id) {
@@ -3357,7 +3388,7 @@ static int live_virtual_mask(void *arg)
unsigned int class, inst;
int err;
- if (USES_GUC_SUBMISSION(gt->i915))
+ if (intel_uc_uses_guc_submission(&gt->uc))
return 0;
for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
@@ -3499,7 +3530,7 @@ static int live_virtual_preserved(void *arg)
* are preserved.
*/
- if (USES_GUC_SUBMISSION(gt->i915))
+ if (intel_uc_uses_guc_submission(&gt->uc))
return 0;
/* As we use CS_GPR we cannot run before they existed on all engines. */
@@ -3729,7 +3760,7 @@ static int live_virtual_bond(void *arg)
unsigned int class, inst;
int err;
- if (USES_GUC_SUBMISSION(gt->i915))
+ if (intel_uc_uses_guc_submission(&gt->uc))
return 0;
for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
@@ -3890,7 +3921,7 @@ static int live_virtual_reset(void *arg)
* forgotten.
*/
- if (USES_GUC_SUBMISSION(gt->i915))
+ if (intel_uc_uses_guc_submission(&gt->uc))
return 0;
if (!intel_has_reset_engine(gt))
@@ -4015,6 +4046,32 @@ static int emit_semaphore_signal(struct intel_context *ce, void *slot)
return 0;
}
+static int context_flush(struct intel_context *ce, long timeout)
+{
+ struct i915_request *rq;
+ struct dma_fence *fence;
+ int err = 0;
+
+ rq = intel_engine_create_kernel_request(ce->engine);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ fence = i915_active_fence_get(&ce->timeline->last_request);
+ if (fence) {
+ i915_request_await_dma_fence(rq, fence);
+ dma_fence_put(fence);
+ }
+
+ rq = i915_request_get(rq);
+ i915_request_add(rq);
+ if (i915_request_wait(rq, 0, timeout) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+
+ rmb(); /* We know the request is written, make sure all state is too! */
+ return err;
+}
+
static int live_lrc_layout(void *arg)
{
struct intel_gt *gt = arg;
@@ -4638,18 +4695,10 @@ static int __lrc_timestamp(const struct lrc_timestamp *arg, bool preempt)
wmb();
}
- if (i915_request_wait(rq, 0, HZ / 2) < 0) {
- err = -ETIME;
- goto err;
- }
-
- /* and wait for switch to kernel */
- if (igt_flush_test(arg->engine->i915)) {
- err = -EIO;
+ /* And wait for switch to kernel (to save our context to memory) */
+ err = context_flush(arg->ce[0], HZ / 2);
+ if (err)
goto err;
- }
-
- rmb();
if (!timestamp_advanced(arg->poison, slot[1])) {
pr_err("%s(%s): invalid timestamp on restore, context:%x, request:%x\n",
@@ -4674,9 +4723,9 @@ err:
static int live_lrc_timestamp(void *arg)
{
+ struct lrc_timestamp data = {};
struct intel_gt *gt = arg;
enum intel_engine_id id;
- struct lrc_timestamp data;
const u32 poison[] = {
0,
S32_MAX,
@@ -4748,6 +4797,677 @@ err:
return 0;
}
+static struct i915_vma *
+create_user_vma(struct i915_address_space *vm, unsigned long size)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ obj = i915_gem_object_create_internal(vm->i915, size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err) {
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+ }
+
+ return vma;
+}
+
+static struct i915_vma *
+store_context(struct intel_context *ce, struct i915_vma *scratch)
+{
+ struct i915_vma *batch;
+ u32 dw, x, *cs, *hw;
+
+ batch = create_user_vma(ce->vm, SZ_64K);
+ if (IS_ERR(batch))
+ return batch;
+
+ cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+ if (IS_ERR(cs)) {
+ i915_vma_put(batch);
+ return ERR_CAST(cs);
+ }
+
+ x = 0;
+ dw = 0;
+ hw = ce->engine->pinned_default_state;
+ hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+ do {
+ u32 len = hw[dw] & 0x7f;
+
+ if (hw[dw] == 0) {
+ dw++;
+ continue;
+ }
+
+ if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
+ dw += len + 2;
+ continue;
+ }
+
+ dw++;
+ len = (len + 1) / 2;
+ while (len--) {
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8;
+ *cs++ = hw[dw];
+ *cs++ = lower_32_bits(scratch->node.start + x);
+ *cs++ = upper_32_bits(scratch->node.start + x);
+
+ dw += 2;
+ x += 4;
+ }
+ } while (dw < PAGE_SIZE / sizeof(u32) &&
+ (hw[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
+
+ *cs++ = MI_BATCH_BUFFER_END;
+
+ i915_gem_object_flush_map(batch->obj);
+ i915_gem_object_unpin_map(batch->obj);
+
+ return batch;
+}
+
+static int move_to_active(struct i915_request *rq,
+ struct i915_vma *vma,
+ unsigned int flags)
+{
+ int err;
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, flags);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, flags);
+ i915_vma_unlock(vma);
+
+ return err;
+}
+
+static struct i915_request *
+record_registers(struct intel_context *ce,
+ struct i915_vma *before,
+ struct i915_vma *after,
+ u32 *sema)
+{
+ struct i915_vma *b_before, *b_after;
+ struct i915_request *rq;
+ u32 *cs;
+ int err;
+
+ b_before = store_context(ce, before);
+ if (IS_ERR(b_before))
+ return ERR_CAST(b_before);
+
+ b_after = store_context(ce, after);
+ if (IS_ERR(b_after)) {
+ rq = ERR_CAST(b_after);
+ goto err_before;
+ }
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ goto err_after;
+
+ err = move_to_active(rq, before, EXEC_OBJECT_WRITE);
+ if (err)
+ goto err_rq;
+
+ err = move_to_active(rq, b_before, 0);
+ if (err)
+ goto err_rq;
+
+ err = move_to_active(rq, after, EXEC_OBJECT_WRITE);
+ if (err)
+ goto err_rq;
+
+ err = move_to_active(rq, b_after, 0);
+ if (err)
+ goto err_rq;
+
+ cs = intel_ring_begin(rq, 14);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_rq;
+ }
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+ *cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
+ *cs++ = lower_32_bits(b_before->node.start);
+ *cs++ = upper_32_bits(b_before->node.start);
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_NEQ_SDD;
+ *cs++ = 0;
+ *cs++ = i915_ggtt_offset(ce->engine->status_page.vma) +
+ offset_in_page(sema);
+ *cs++ = 0;
+ *cs++ = MI_NOOP;
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+ *cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
+ *cs++ = lower_32_bits(b_after->node.start);
+ *cs++ = upper_32_bits(b_after->node.start);
+
+ intel_ring_advance(rq, cs);
+
+ WRITE_ONCE(*sema, 0);
+ i915_request_get(rq);
+ i915_request_add(rq);
+err_after:
+ i915_vma_put(b_after);
+err_before:
+ i915_vma_put(b_before);
+ return rq;
+
+err_rq:
+ i915_request_add(rq);
+ rq = ERR_PTR(err);
+ goto err_after;
+}
+
+static struct i915_vma *load_context(struct intel_context *ce, u32 poison)
+{
+ struct i915_vma *batch;
+ u32 dw, *cs, *hw;
+
+ batch = create_user_vma(ce->vm, SZ_64K);
+ if (IS_ERR(batch))
+ return batch;
+
+ cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+ if (IS_ERR(cs)) {
+ i915_vma_put(batch);
+ return ERR_CAST(cs);
+ }
+
+ dw = 0;
+ hw = ce->engine->pinned_default_state;
+ hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+ do {
+ u32 len = hw[dw] & 0x7f;
+
+ if (hw[dw] == 0) {
+ dw++;
+ continue;
+ }
+
+ if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
+ dw += len + 2;
+ continue;
+ }
+
+ dw++;
+ len = (len + 1) / 2;
+ *cs++ = MI_LOAD_REGISTER_IMM(len);
+ while (len--) {
+ *cs++ = hw[dw];
+ *cs++ = poison;
+ dw += 2;
+ }
+ } while (dw < PAGE_SIZE / sizeof(u32) &&
+ (hw[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
+
+ *cs++ = MI_BATCH_BUFFER_END;
+
+ i915_gem_object_flush_map(batch->obj);
+ i915_gem_object_unpin_map(batch->obj);
+
+ return batch;
+}
+
+static int poison_registers(struct intel_context *ce, u32 poison, u32 *sema)
+{
+ struct i915_request *rq;
+ struct i915_vma *batch;
+ u32 *cs;
+ int err;
+
+ batch = load_context(ce, poison);
+ if (IS_ERR(batch))
+ return PTR_ERR(batch);
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_batch;
+ }
+
+ err = move_to_active(rq, batch, 0);
+ if (err)
+ goto err_rq;
+
+ cs = intel_ring_begin(rq, 8);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_rq;
+ }
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+ *cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
+ *cs++ = lower_32_bits(batch->node.start);
+ *cs++ = upper_32_bits(batch->node.start);
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(ce->engine->status_page.vma) +
+ offset_in_page(sema);
+ *cs++ = 0;
+ *cs++ = 1;
+
+ intel_ring_advance(rq, cs);
+
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+err_rq:
+ i915_request_add(rq);
+err_batch:
+ i915_vma_put(batch);
+ return err;
+}
+
+static bool is_moving(u32 a, u32 b)
+{
+ return a != b;
+}
+
+static int compare_isolation(struct intel_engine_cs *engine,
+ struct i915_vma *ref[2],
+ struct i915_vma *result[2],
+ struct intel_context *ce,
+ u32 poison)
+{
+ u32 x, dw, *hw, *lrc;
+ u32 *A[2], *B[2];
+ int err = 0;
+
+ A[0] = i915_gem_object_pin_map(ref[0]->obj, I915_MAP_WC);
+ if (IS_ERR(A[0]))
+ return PTR_ERR(A[0]);
+
+ A[1] = i915_gem_object_pin_map(ref[1]->obj, I915_MAP_WC);
+ if (IS_ERR(A[1])) {
+ err = PTR_ERR(A[1]);
+ goto err_A0;
+ }
+
+ B[0] = i915_gem_object_pin_map(result[0]->obj, I915_MAP_WC);
+ if (IS_ERR(B[0])) {
+ err = PTR_ERR(B[0]);
+ goto err_A1;
+ }
+
+ B[1] = i915_gem_object_pin_map(result[1]->obj, I915_MAP_WC);
+ if (IS_ERR(B[1])) {
+ err = PTR_ERR(B[1]);
+ goto err_B0;
+ }
+
+ lrc = i915_gem_object_pin_map(ce->state->obj,
+ i915_coherent_map_type(engine->i915));
+ if (IS_ERR(lrc)) {
+ err = PTR_ERR(lrc);
+ goto err_B1;
+ }
+ lrc += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+
+ x = 0;
+ dw = 0;
+ hw = engine->pinned_default_state;
+ hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+ do {
+ u32 len = hw[dw] & 0x7f;
+
+ if (hw[dw] == 0) {
+ dw++;
+ continue;
+ }
+
+ if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
+ dw += len + 2;
+ continue;
+ }
+
+ dw++;
+ len = (len + 1) / 2;
+ while (len--) {
+ if (!is_moving(A[0][x], A[1][x]) &&
+ (A[0][x] != B[0][x] || A[1][x] != B[1][x])) {
+ switch (hw[dw] & 4095) {
+ case 0x30: /* RING_HEAD */
+ case 0x34: /* RING_TAIL */
+ break;
+
+ default:
+ pr_err("%s[%d]: Mismatch for register %4x, default %08x, reference %08x, result (%08x, %08x), poison %08x, context %08x\n",
+ engine->name, dw,
+ hw[dw], hw[dw + 1],
+ A[0][x], B[0][x], B[1][x],
+ poison, lrc[dw + 1]);
+ err = -EINVAL;
+ break;
+ }
+ }
+ dw += 2;
+ x++;
+ }
+ } while (dw < PAGE_SIZE / sizeof(u32) &&
+ (hw[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
+
+ i915_gem_object_unpin_map(ce->state->obj);
+err_B1:
+ i915_gem_object_unpin_map(result[1]->obj);
+err_B0:
+ i915_gem_object_unpin_map(result[0]->obj);
+err_A1:
+ i915_gem_object_unpin_map(ref[1]->obj);
+err_A0:
+ i915_gem_object_unpin_map(ref[0]->obj);
+ return err;
+}
+
+static int __lrc_isolation(struct intel_engine_cs *engine, u32 poison)
+{
+ u32 *sema = memset32(engine->status_page.addr + 1000, 0, 1);
+ struct i915_vma *ref[2], *result[2];
+ struct intel_context *A, *B;
+ struct i915_request *rq;
+ int err;
+
+ A = intel_context_create(engine);
+ if (IS_ERR(A))
+ return PTR_ERR(A);
+
+ B = intel_context_create(engine);
+ if (IS_ERR(B)) {
+ err = PTR_ERR(B);
+ goto err_A;
+ }
+
+ ref[0] = create_user_vma(A->vm, SZ_64K);
+ if (IS_ERR(ref[0])) {
+ err = PTR_ERR(ref[0]);
+ goto err_B;
+ }
+
+ ref[1] = create_user_vma(A->vm, SZ_64K);
+ if (IS_ERR(ref[1])) {
+ err = PTR_ERR(ref[1]);
+ goto err_ref0;
+ }
+
+ rq = record_registers(A, ref[0], ref[1], sema);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ref1;
+ }
+
+ WRITE_ONCE(*sema, 1);
+ wmb();
+
+ if (i915_request_wait(rq, 0, HZ / 2) < 0) {
+ i915_request_put(rq);
+ err = -ETIME;
+ goto err_ref1;
+ }
+ i915_request_put(rq);
+
+ result[0] = create_user_vma(A->vm, SZ_64K);
+ if (IS_ERR(result[0])) {
+ err = PTR_ERR(result[0]);
+ goto err_ref1;
+ }
+
+ result[1] = create_user_vma(A->vm, SZ_64K);
+ if (IS_ERR(result[1])) {
+ err = PTR_ERR(result[1]);
+ goto err_result0;
+ }
+
+ rq = record_registers(A, result[0], result[1], sema);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_result1;
+ }
+
+ err = poison_registers(B, poison, sema);
+ if (err) {
+ WRITE_ONCE(*sema, -1);
+ i915_request_put(rq);
+ goto err_result1;
+ }
+
+ if (i915_request_wait(rq, 0, HZ / 2) < 0) {
+ i915_request_put(rq);
+ err = -ETIME;
+ goto err_result1;
+ }
+ i915_request_put(rq);
+
+ err = compare_isolation(engine, ref, result, A, poison);
+
+err_result1:
+ i915_vma_put(result[1]);
+err_result0:
+ i915_vma_put(result[0]);
+err_ref1:
+ i915_vma_put(ref[1]);
+err_ref0:
+ i915_vma_put(ref[0]);
+err_B:
+ intel_context_put(B);
+err_A:
+ intel_context_put(A);
+ return err;
+}
+
+static bool skip_isolation(const struct intel_engine_cs *engine)
+{
+ if (engine->class == COPY_ENGINE_CLASS && INTEL_GEN(engine->i915) == 9)
+ return true;
+
+ if (engine->class == RENDER_CLASS && INTEL_GEN(engine->i915) == 11)
+ return true;
+
+ return false;
+}
+
+static int live_lrc_isolation(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ const u32 poison[] = {
+ STACK_MAGIC,
+ 0x3a3a3a3a,
+ 0x5c5c5c5c,
+ 0xffffffff,
+ 0xffff0000,
+ };
+
+ /*
+ * Our goal is try and verify that per-context state cannot be
+ * tampered with by another non-privileged client.
+ *
+ * We take the list of context registers from the LRI in the default
+ * context image and attempt to modify that list from a remote context.
+ */
+
+ for_each_engine(engine, gt, id) {
+ int err = 0;
+ int i;
+
+ /* Just don't even ask */
+ if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN) &&
+ skip_isolation(engine))
+ continue;
+
+ intel_engine_pm_get(engine);
+ if (engine->pinned_default_state) {
+ for (i = 0; i < ARRAY_SIZE(poison); i++) {
+ err = __lrc_isolation(engine, poison[i]);
+ if (err)
+ break;
+
+ err = __lrc_isolation(engine, ~poison[i]);
+ if (err)
+ break;
+ }
+ }
+ intel_engine_pm_put(engine);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void garbage_reset(struct intel_engine_cs *engine,
+ struct i915_request *rq)
+{
+ const unsigned int bit = I915_RESET_ENGINE + engine->id;
+ unsigned long *lock = &engine->gt->reset.flags;
+
+ if (test_and_set_bit(bit, lock))
+ return;
+
+ tasklet_disable(&engine->execlists.tasklet);
+
+ if (!rq->fence.error)
+ intel_engine_reset(engine, NULL);
+
+ tasklet_enable(&engine->execlists.tasklet);
+ clear_and_wake_up_bit(bit, lock);
+}
+
+static struct i915_request *garbage(struct intel_context *ce,
+ struct rnd_state *prng)
+{
+ struct i915_request *rq;
+ int err;
+
+ err = intel_context_pin(ce);
+ if (err)
+ return ERR_PTR(err);
+
+ prandom_bytes_state(prng,
+ ce->lrc_reg_state,
+ ce->engine->context_size -
+ LRC_STATE_PN * PAGE_SIZE);
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_unpin;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ return rq;
+
+err_unpin:
+ intel_context_unpin(ce);
+ return ERR_PTR(err);
+}
+
+static int __lrc_garbage(struct intel_engine_cs *engine, struct rnd_state *prng)
+{
+ struct intel_context *ce;
+ struct i915_request *hang;
+ int err = 0;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ hang = garbage(ce, prng);
+ if (IS_ERR(hang)) {
+ err = PTR_ERR(hang);
+ goto err_ce;
+ }
+
+ if (wait_for_submit(engine, hang, HZ / 2)) {
+ i915_request_put(hang);
+ err = -ETIME;
+ goto err_ce;
+ }
+
+ intel_context_set_banned(ce);
+ garbage_reset(engine, hang);
+
+ intel_engine_flush_submission(engine);
+ if (!hang->fence.error) {
+ i915_request_put(hang);
+ pr_err("%s: corrupted context was not reset\n",
+ engine->name);
+ err = -EINVAL;
+ goto err_ce;
+ }
+
+ if (i915_request_wait(hang, 0, HZ / 2) < 0) {
+ pr_err("%s: corrupted context did not recover\n",
+ engine->name);
+ i915_request_put(hang);
+ err = -EIO;
+ goto err_ce;
+ }
+ i915_request_put(hang);
+
+err_ce:
+ intel_context_put(ce);
+ return err;
+}
+
+static int live_lrc_garbage(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * Verify that we can recover if one context state is completely
+ * corrupted.
+ */
+
+ if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ I915_RND_STATE(prng);
+ int err = 0, i;
+
+ if (!intel_has_reset_engine(engine->gt))
+ continue;
+
+ intel_engine_pm_get(engine);
+ for (i = 0; i < 3; i++) {
+ err = __lrc_garbage(engine, &prng);
+ if (err)
+ break;
+ }
+ intel_engine_pm_put(engine);
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int __live_pphwsp_runtime(struct intel_engine_cs *engine)
{
struct intel_context *ce;
@@ -4845,7 +5565,9 @@ int intel_lrc_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_lrc_fixed),
SUBTEST(live_lrc_state),
SUBTEST(live_lrc_gpr),
+ SUBTEST(live_lrc_isolation),
SUBTEST(live_lrc_timestamp),
+ SUBTEST(live_lrc_garbage),
SUBTEST(live_pphwsp_runtime),
};
diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c
index 5f7e2dcf5686..95b165faeba7 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rc6.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c
@@ -12,6 +12,21 @@
#include "selftests/i915_random.h"
+static u64 rc6_residency(struct intel_rc6 *rc6)
+{
+ u64 result;
+
+ /* XXX VLV_GT_MEDIA_RC6? */
+
+ result = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
+ if (HAS_RC6p(rc6_to_i915(rc6)))
+ result += intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6p);
+ if (HAS_RC6pp(rc6_to_i915(rc6)))
+ result += intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6pp);
+
+ return result;
+}
+
int live_rc6_manual(void *arg)
{
struct intel_gt *gt = arg;
@@ -38,9 +53,9 @@ int live_rc6_manual(void *arg)
__intel_rc6_disable(rc6);
msleep(1); /* wakeup is not immediate, takes about 100us on icl */
- res[0] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
+ res[0] = rc6_residency(rc6);
msleep(250);
- res[1] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
+ res[1] = rc6_residency(rc6);
if ((res[1] - res[0]) >> 10) {
pr_err("RC6 residency increased by %lldus while disabled for 250ms!\n",
(res[1] - res[0]) >> 10);
@@ -51,9 +66,9 @@ int live_rc6_manual(void *arg)
/* Manually enter RC6 */
intel_rc6_park(rc6);
- res[0] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
+ res[0] = rc6_residency(rc6);
msleep(100);
- res[1] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
+ res[1] = rc6_residency(rc6);
if (res[1] == res[0]) {
pr_err("Did not enter RC6! RC6_STATE=%08x, RC6_CONTROL=%08x, residency=%lld\n",
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
index 6ad6aca315f6..35406ecdf0b2 100644
--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -115,7 +115,7 @@ static int igt_atomic_engine_reset(void *arg)
if (!intel_has_reset_engine(gt))
return 0;
- if (USES_GUC_SUBMISSION(gt->i915))
+ if (intel_uc_uses_guc_submission(&gt->uc))
return 0;
intel_gt_pm_get(gt);
diff --git a/drivers/gpu/drm/i915/gt/selftest_ring_submission.c b/drivers/gpu/drm/i915/gt/selftest_ring_submission.c
new file mode 100644
index 000000000000..9995faadd7e8
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_ring_submission.c
@@ -0,0 +1,296 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "intel_engine_pm.h"
+#include "selftests/igt_flush_test.h"
+
+static struct i915_vma *create_wally(struct intel_engine_cs *engine)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u32 *cs;
+ int err;
+
+ obj = i915_gem_object_create_internal(engine->i915, 4096);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, engine->gt->vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_HIGH);
+ if (err) {
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+ }
+
+ err = i915_vma_sync(vma);
+ if (err) {
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+ }
+
+ cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(cs)) {
+ i915_gem_object_put(obj);
+ return ERR_CAST(cs);
+ }
+
+ if (INTEL_GEN(engine->i915) >= 6) {
+ *cs++ = MI_STORE_DWORD_IMM_GEN4;
+ *cs++ = 0;
+ } else if (INTEL_GEN(engine->i915) >= 4) {
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = 0;
+ } else {
+ *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
+ }
+ *cs++ = vma->node.start + 4000;
+ *cs++ = STACK_MAGIC;
+
+ *cs++ = MI_BATCH_BUFFER_END;
+ i915_gem_object_unpin_map(obj);
+
+ vma->private = intel_context_create(engine); /* dummy residuals */
+ if (IS_ERR(vma->private)) {
+ vma = ERR_CAST(vma->private);
+ i915_gem_object_put(obj);
+ }
+
+ return vma;
+}
+
+static int context_sync(struct intel_context *ce)
+{
+ struct i915_request *rq;
+ int err = 0;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+
+ return err;
+}
+
+static int new_context_sync(struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+ int err;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ err = context_sync(ce);
+ intel_context_put(ce);
+
+ return err;
+}
+
+static int mixed_contexts_sync(struct intel_engine_cs *engine, u32 *result)
+{
+ int pass;
+ int err;
+
+ for (pass = 0; pass < 2; pass++) {
+ WRITE_ONCE(*result, 0);
+ err = context_sync(engine->kernel_context);
+ if (err || READ_ONCE(*result)) {
+ if (!err) {
+ pr_err("pass[%d] wa_bb emitted for the kernel context\n",
+ pass);
+ err = -EINVAL;
+ }
+ return err;
+ }
+
+ WRITE_ONCE(*result, 0);
+ err = new_context_sync(engine);
+ if (READ_ONCE(*result) != STACK_MAGIC) {
+ if (!err) {
+ pr_err("pass[%d] wa_bb *NOT* emitted after the kernel context\n",
+ pass);
+ err = -EINVAL;
+ }
+ return err;
+ }
+
+ WRITE_ONCE(*result, 0);
+ err = new_context_sync(engine);
+ if (READ_ONCE(*result) != STACK_MAGIC) {
+ if (!err) {
+ pr_err("pass[%d] wa_bb *NOT* emitted for the user context switch\n",
+ pass);
+ err = -EINVAL;
+ }
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int double_context_sync_00(struct intel_engine_cs *engine, u32 *result)
+{
+ struct intel_context *ce;
+ int err, i;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ for (i = 0; i < 2; i++) {
+ WRITE_ONCE(*result, 0);
+ err = context_sync(ce);
+ if (err)
+ break;
+ }
+ intel_context_put(ce);
+ if (err)
+ return err;
+
+ if (READ_ONCE(*result)) {
+ pr_err("wa_bb emitted between the same user context\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int kernel_context_sync_00(struct intel_engine_cs *engine, u32 *result)
+{
+ struct intel_context *ce;
+ int err, i;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ for (i = 0; i < 2; i++) {
+ WRITE_ONCE(*result, 0);
+ err = context_sync(ce);
+ if (err)
+ break;
+
+ err = context_sync(engine->kernel_context);
+ if (err)
+ break;
+ }
+ intel_context_put(ce);
+ if (err)
+ return err;
+
+ if (READ_ONCE(*result)) {
+ pr_err("wa_bb emitted between the same user context [with intervening kernel]\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __live_ctx_switch_wa(struct intel_engine_cs *engine)
+{
+ struct i915_vma *bb;
+ u32 *result;
+ int err;
+
+ bb = create_wally(engine);
+ if (IS_ERR(bb))
+ return PTR_ERR(bb);
+
+ result = i915_gem_object_pin_map(bb->obj, I915_MAP_WC);
+ if (IS_ERR(result)) {
+ intel_context_put(bb->private);
+ i915_vma_unpin_and_release(&bb, 0);
+ return PTR_ERR(result);
+ }
+ result += 1000;
+
+ engine->wa_ctx.vma = bb;
+
+ err = mixed_contexts_sync(engine, result);
+ if (err)
+ goto out;
+
+ err = double_context_sync_00(engine, result);
+ if (err)
+ goto out;
+
+ err = kernel_context_sync_00(engine, result);
+ if (err)
+ goto out;
+
+out:
+ intel_context_put(engine->wa_ctx.vma->private);
+ i915_vma_unpin_and_release(&engine->wa_ctx.vma, I915_VMA_RELEASE_MAP);
+ return err;
+}
+
+static int live_ctx_switch_wa(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * Exercise the inter-context wa batch.
+ *
+ * Between each user context we run a wa batch, and since it may
+ * have implications for user visible state, we have to check that
+ * we do actually execute it.
+ *
+ * The trick we use is to replace the normal wa batch with a custom
+ * one that writes to a marker within it, and we can then look for
+ * that marker to confirm if the batch was run when we expect it,
+ * and equally important it was wasn't run when we don't!
+ */
+
+ for_each_engine(engine, gt, id) {
+ struct i915_vma *saved_wa;
+ int err;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ if (IS_GEN_RANGE(gt->i915, 4, 5))
+ continue; /* MI_STORE_DWORD is privileged! */
+
+ saved_wa = fetch_and_zero(&engine->wa_ctx.vma);
+
+ intel_engine_pm_get(engine);
+ err = __live_ctx_switch_wa(engine);
+ intel_engine_pm_put(engine);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ engine->wa_ctx.vma = saved_wa;
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+int intel_ring_submission_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_ctx_switch_wa),
+ };
+
+ if (HAS_EXECLISTS(i915))
+ return 0;
+
+ return intel_gt_live_subtests(tests, &i915->gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/sysfs_engines.c b/drivers/gpu/drm/i915/gt/sysfs_engines.c
new file mode 100644
index 000000000000..8f9b2f33dbaf
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/sysfs_engines.c
@@ -0,0 +1,445 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+
+#include "i915_drv.h"
+#include "intel_engine.h"
+#include "intel_engine_heartbeat.h"
+#include "sysfs_engines.h"
+
+struct kobj_engine {
+ struct kobject base;
+ struct intel_engine_cs *engine;
+};
+
+static struct intel_engine_cs *kobj_to_engine(struct kobject *kobj)
+{
+ return container_of(kobj, struct kobj_engine, base)->engine;
+}
+
+static ssize_t
+name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s\n", kobj_to_engine(kobj)->name);
+}
+
+static struct kobj_attribute name_attr =
+__ATTR(name, 0444, name_show, NULL);
+
+static ssize_t
+class_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_class);
+}
+
+static struct kobj_attribute class_attr =
+__ATTR(class, 0444, class_show, NULL);
+
+static ssize_t
+inst_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_instance);
+}
+
+static struct kobj_attribute inst_attr =
+__ATTR(instance, 0444, inst_show, NULL);
+
+static ssize_t
+mmio_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "0x%x\n", kobj_to_engine(kobj)->mmio_base);
+}
+
+static struct kobj_attribute mmio_attr =
+__ATTR(mmio_base, 0444, mmio_show, NULL);
+
+static const char * const vcs_caps[] = {
+ [ilog2(I915_VIDEO_CLASS_CAPABILITY_HEVC)] = "hevc",
+ [ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
+};
+
+static const char * const vecs_caps[] = {
+ [ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
+};
+
+static ssize_t repr_trim(char *buf, ssize_t len)
+{
+ /* Trim off the trailing space and replace with a newline */
+ if (len > PAGE_SIZE)
+ len = PAGE_SIZE;
+ if (len > 0)
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+static ssize_t
+__caps_show(struct intel_engine_cs *engine,
+ u32 caps, char *buf, bool show_unknown)
+{
+ const char * const *repr;
+ int count, n;
+ ssize_t len;
+
+ BUILD_BUG_ON(!typecheck(typeof(caps), engine->uabi_capabilities));
+
+ switch (engine->class) {
+ case VIDEO_DECODE_CLASS:
+ repr = vcs_caps;
+ count = ARRAY_SIZE(vcs_caps);
+ break;
+
+ case VIDEO_ENHANCEMENT_CLASS:
+ repr = vecs_caps;
+ count = ARRAY_SIZE(vecs_caps);
+ break;
+
+ default:
+ repr = NULL;
+ count = 0;
+ break;
+ }
+ GEM_BUG_ON(count > BITS_PER_TYPE(typeof(caps)));
+
+ len = 0;
+ for_each_set_bit(n,
+ (unsigned long *)&caps,
+ show_unknown ? BITS_PER_TYPE(typeof(caps)) : count) {
+ if (n >= count || !repr[n]) {
+ if (GEM_WARN_ON(show_unknown))
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "[%x] ", n);
+ } else {
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "%s ", repr[n]);
+ }
+ if (GEM_WARN_ON(len >= PAGE_SIZE))
+ break;
+ }
+ return repr_trim(buf, len);
+}
+
+static ssize_t
+caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return __caps_show(engine, engine->uabi_capabilities, buf, true);
+}
+
+static struct kobj_attribute caps_attr =
+__ATTR(capabilities, 0444, caps_show, NULL);
+
+static ssize_t
+all_caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ return __caps_show(kobj_to_engine(kobj), -1, buf, false);
+}
+
+static struct kobj_attribute all_caps_attr =
+__ATTR(known_capabilities, 0444, all_caps_show, NULL);
+
+static ssize_t
+max_spin_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+ unsigned long long duration;
+ int err;
+
+ /*
+ * When waiting for a request, if is it currently being executed
+ * on the GPU, we busywait for a short while before sleeping. The
+ * premise is that most requests are short, and if it is already
+ * executing then there is a good chance that it will complete
+ * before we can setup the interrupt handler and go to sleep.
+ * We try to offset the cost of going to sleep, by first spinning
+ * on the request -- if it completed in less time than it would take
+ * to go sleep, process the interrupt and return back to the client,
+ * then we have saved the client some latency, albeit at the cost
+ * of spinning on an expensive CPU core.
+ *
+ * While we try to avoid waiting at all for a request that is unlikely
+ * to complete, deciding how long it is worth spinning is for is an
+ * arbitrary decision: trading off power vs latency.
+ */
+
+ err = kstrtoull(buf, 0, &duration);
+ if (err)
+ return err;
+
+ if (duration > jiffies_to_nsecs(2))
+ return -EINVAL;
+
+ WRITE_ONCE(engine->props.max_busywait_duration_ns, duration);
+
+ return count;
+}
+
+static ssize_t
+max_spin_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return sprintf(buf, "%lu\n", engine->props.max_busywait_duration_ns);
+}
+
+static struct kobj_attribute max_spin_attr =
+__ATTR(max_busywait_duration_ns, 0644, max_spin_show, max_spin_store);
+
+static ssize_t
+timeslice_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+ unsigned long long duration;
+ int err;
+
+ /*
+ * Execlists uses a scheduling quantum (a timeslice) to alternate
+ * execution between ready-to-run contexts of equal priority. This
+ * ensures that all users (though only if they of equal importance)
+ * have the opportunity to run and prevents livelocks where contexts
+ * may have implicit ordering due to userspace semaphores.
+ */
+
+ err = kstrtoull(buf, 0, &duration);
+ if (err)
+ return err;
+
+ if (duration > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
+ return -EINVAL;
+
+ WRITE_ONCE(engine->props.timeslice_duration_ms, duration);
+
+ if (execlists_active(&engine->execlists))
+ set_timer_ms(&engine->execlists.timer, duration);
+
+ return count;
+}
+
+static ssize_t
+timeslice_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return sprintf(buf, "%lu\n", engine->props.timeslice_duration_ms);
+}
+
+static struct kobj_attribute timeslice_duration_attr =
+__ATTR(timeslice_duration_ms, 0644, timeslice_show, timeslice_store);
+
+static ssize_t
+stop_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+ unsigned long long duration;
+ int err;
+
+ /*
+ * When we allow ourselves to sleep before a GPU reset after disabling
+ * submission, even for a few milliseconds, gives an innocent context
+ * the opportunity to clear the GPU before the reset occurs. However,
+ * how long to sleep depends on the typical non-preemptible duration
+ * (a similar problem to determining the ideal preempt-reset timeout
+ * or even the heartbeat interval).
+ */
+
+ err = kstrtoull(buf, 0, &duration);
+ if (err)
+ return err;
+
+ if (duration > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
+ return -EINVAL;
+
+ WRITE_ONCE(engine->props.stop_timeout_ms, duration);
+ return count;
+}
+
+static ssize_t
+stop_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return sprintf(buf, "%lu\n", engine->props.stop_timeout_ms);
+}
+
+static struct kobj_attribute stop_timeout_attr =
+__ATTR(stop_timeout_ms, 0644, stop_show, stop_store);
+
+static ssize_t
+preempt_timeout_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+ unsigned long long timeout;
+ int err;
+
+ /*
+ * After initialising a preemption request, we give the current
+ * resident a small amount of time to vacate the GPU. The preemption
+ * request is for a higher priority context and should be immediate to
+ * maintain high quality of service (and avoid priority inversion).
+ * However, the preemption granularity of the GPU can be quite coarse
+ * and so we need a compromise.
+ */
+
+ err = kstrtoull(buf, 0, &timeout);
+ if (err)
+ return err;
+
+ if (timeout > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
+ return -EINVAL;
+
+ WRITE_ONCE(engine->props.preempt_timeout_ms, timeout);
+
+ if (READ_ONCE(engine->execlists.pending[0]))
+ set_timer_ms(&engine->execlists.preempt, timeout);
+
+ return count;
+}
+
+static ssize_t
+preempt_timeout_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return sprintf(buf, "%lu\n", engine->props.preempt_timeout_ms);
+}
+
+static struct kobj_attribute preempt_timeout_attr =
+__ATTR(preempt_timeout_ms, 0644, preempt_timeout_show, preempt_timeout_store);
+
+static ssize_t
+heartbeat_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+ unsigned long long delay;
+ int err;
+
+ /*
+ * We monitor the health of the system via periodic heartbeat pulses.
+ * The pulses also provide the opportunity to perform garbage
+ * collection. However, we interpret an incomplete pulse (a missed
+ * heartbeat) as an indication that the system is no longer responsive,
+ * i.e. hung, and perform an engine or full GPU reset. Given that the
+ * preemption granularity can be very coarse on a system, the optimal
+ * value for any workload is unknowable!
+ */
+
+ err = kstrtoull(buf, 0, &delay);
+ if (err)
+ return err;
+
+ if (delay >= jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
+ return -EINVAL;
+
+ err = intel_engine_set_heartbeat(engine, delay);
+ if (err)
+ return err;
+
+ return count;
+}
+
+static ssize_t
+heartbeat_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return sprintf(buf, "%lu\n", engine->props.heartbeat_interval_ms);
+}
+
+static struct kobj_attribute heartbeat_interval_attr =
+__ATTR(heartbeat_interval_ms, 0644, heartbeat_show, heartbeat_store);
+
+static void kobj_engine_release(struct kobject *kobj)
+{
+ kfree(kobj);
+}
+
+static struct kobj_type kobj_engine_type = {
+ .release = kobj_engine_release,
+ .sysfs_ops = &kobj_sysfs_ops
+};
+
+static struct kobject *
+kobj_engine(struct kobject *dir, struct intel_engine_cs *engine)
+{
+ struct kobj_engine *ke;
+
+ ke = kzalloc(sizeof(*ke), GFP_KERNEL);
+ if (!ke)
+ return NULL;
+
+ kobject_init(&ke->base, &kobj_engine_type);
+ ke->engine = engine;
+
+ if (kobject_add(&ke->base, dir, "%s", engine->name)) {
+ kobject_put(&ke->base);
+ return NULL;
+ }
+
+ /* xfer ownership to sysfs tree */
+ return &ke->base;
+}
+
+void intel_engines_add_sysfs(struct drm_i915_private *i915)
+{
+ static const struct attribute *files[] = {
+ &name_attr.attr,
+ &class_attr.attr,
+ &inst_attr.attr,
+ &mmio_attr.attr,
+ &caps_attr.attr,
+ &all_caps_attr.attr,
+ &max_spin_attr.attr,
+ &stop_timeout_attr.attr,
+#if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
+ &heartbeat_interval_attr.attr,
+#endif
+ NULL
+ };
+
+ struct device *kdev = i915->drm.primary->kdev;
+ struct intel_engine_cs *engine;
+ struct kobject *dir;
+
+ dir = kobject_create_and_add("engine", &kdev->kobj);
+ if (!dir)
+ return;
+
+ for_each_uabi_engine(engine, i915) {
+ struct kobject *kobj;
+
+ kobj = kobj_engine(dir, engine);
+ if (!kobj)
+ goto err_engine;
+
+ if (sysfs_create_files(kobj, files))
+ goto err_object;
+
+ if (intel_engine_has_timeslices(engine) &&
+ sysfs_create_file(kobj, &timeslice_duration_attr.attr))
+ goto err_engine;
+
+ if (intel_engine_has_preempt_reset(engine) &&
+ sysfs_create_file(kobj, &preempt_timeout_attr.attr))
+ goto err_engine;
+
+ if (0) {
+err_object:
+ kobject_put(kobj);
+err_engine:
+ dev_err(kdev, "Failed to add sysfs engine '%s'\n",
+ engine->name);
+ break;
+ }
+ }
+}
diff --git a/drivers/gpu/drm/i915/gt/sysfs_engines.h b/drivers/gpu/drm/i915/gt/sysfs_engines.h
new file mode 100644
index 000000000000..9546fffe03a7
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/sysfs_engines.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_ENGINE_SYSFS_H
+#define INTEL_ENGINE_SYSFS_H
+
+struct drm_i915_private;
+
+void intel_engines_add_sysfs(struct drm_i915_private *i915);
+
+#endif /* INTEL_ENGINE_SYSFS_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index c4c1523da7a6..819f09ef51fc 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -207,7 +207,7 @@ static u32 guc_ctl_feature_flags(struct intel_guc *guc)
{
u32 flags = 0;
- if (!intel_guc_is_submission_supported(guc))
+ if (!intel_guc_submission_is_used(guc))
flags |= GUC_CTL_DISABLE_SCHEDULER;
return flags;
@@ -217,7 +217,7 @@ static u32 guc_ctl_ctxinfo_flags(struct intel_guc *guc)
{
u32 flags = 0;
- if (intel_guc_is_submission_supported(guc)) {
+ if (intel_guc_submission_is_used(guc)) {
u32 ctxnum, base;
base = intel_guc_ggtt_offset(guc, guc->stage_desc_pool);
@@ -333,7 +333,7 @@ int intel_guc_init(struct intel_guc *guc)
ret = intel_uc_fw_init(&guc->fw);
if (ret)
- goto err_fetch;
+ goto out;
ret = intel_guc_log_create(&guc->log);
if (ret)
@@ -348,7 +348,7 @@ int intel_guc_init(struct intel_guc *guc)
if (ret)
goto err_ads;
- if (intel_guc_is_submission_supported(guc)) {
+ if (intel_guc_submission_is_used(guc)) {
/*
* This is stuff we need to have available at fw load time
* if we are planning to enable submission later
@@ -364,6 +364,8 @@ int intel_guc_init(struct intel_guc *guc)
/* We need to notify the guc whenever we change the GGTT */
i915_ggtt_enable_guc(gt->ggtt);
+ intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_LOADABLE);
+
return 0;
err_ct:
@@ -374,9 +376,8 @@ err_log:
intel_guc_log_destroy(&guc->log);
err_fw:
intel_uc_fw_fini(&guc->fw);
-err_fetch:
- intel_uc_fw_cleanup_fetch(&guc->fw);
- DRM_DEV_DEBUG_DRIVER(gt->i915->drm.dev, "failed with %d\n", ret);
+out:
+ i915_probe_error(gt->i915, "failed with %d\n", ret);
return ret;
}
@@ -384,12 +385,12 @@ void intel_guc_fini(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
- if (!intel_uc_fw_is_available(&guc->fw))
+ if (!intel_uc_fw_is_loadable(&guc->fw))
return;
i915_ggtt_disable_guc(gt->ggtt);
- if (intel_guc_is_submission_supported(guc))
+ if (intel_guc_submission_is_used(guc))
intel_guc_submission_fini(guc);
intel_guc_ct_fini(&guc->ct);
@@ -397,9 +398,6 @@ void intel_guc_fini(struct intel_guc *guc)
intel_guc_ads_destroy(guc);
intel_guc_log_destroy(&guc->log);
intel_uc_fw_fini(&guc->fw);
- intel_uc_fw_cleanup_fetch(&guc->fw);
-
- intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_DISABLED);
}
/*
@@ -544,7 +542,7 @@ int intel_guc_suspend(struct intel_guc *guc)
* If GuC communication is enabled but submission is not supported,
* we do not need to suspend the GuC.
*/
- if (!intel_guc_submission_is_enabled(guc))
+ if (!intel_guc_submission_is_used(guc) || !intel_guc_is_ready(guc))
return 0;
/*
@@ -609,7 +607,7 @@ int intel_guc_resume(struct intel_guc *guc)
* we do not need to resume the GuC but we do need to enable the
* GuC communication on resume (above).
*/
- if (!intel_guc_submission_is_enabled(guc))
+ if (!intel_guc_submission_is_used(guc) || !intel_guc_is_ready(guc))
return 0;
return intel_guc_send(guc, action, ARRAY_SIZE(action));
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 668b067b71e2..4594ccbeaa34 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -39,7 +39,7 @@ struct intel_guc {
void (*disable)(struct intel_guc *guc);
} interrupts;
- bool submission_supported;
+ bool submission_selected;
struct i915_vma *ads_vma;
struct __guc_ads_blob *ads_blob;
@@ -143,11 +143,17 @@ static inline bool intel_guc_is_supported(struct intel_guc *guc)
return intel_uc_fw_is_supported(&guc->fw);
}
-static inline bool intel_guc_is_enabled(struct intel_guc *guc)
+static inline bool intel_guc_is_wanted(struct intel_guc *guc)
{
return intel_uc_fw_is_enabled(&guc->fw);
}
+static inline bool intel_guc_is_used(struct intel_guc *guc)
+{
+ GEM_BUG_ON(__intel_uc_fw_status(&guc->fw) == INTEL_UC_FIRMWARE_SELECTED);
+ return intel_uc_fw_is_available(&guc->fw);
+}
+
static inline bool intel_guc_is_fw_running(struct intel_guc *guc)
{
return intel_uc_fw_is_running(&guc->fw);
@@ -167,11 +173,6 @@ static inline int intel_guc_sanitize(struct intel_guc *guc)
return 0;
}
-static inline bool intel_guc_is_submission_supported(struct intel_guc *guc)
-{
- return guc->submission_supported;
-}
-
static inline void intel_guc_enable_msg(struct intel_guc *guc, u32 mask)
{
spin_lock_irq(&guc->irq_lock);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 9e42324fdecd..fe7778c28d2d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -456,9 +456,7 @@ static void guc_reset_cancel(struct intel_engine_cs *engine)
/* Mark all executing requests as skipped. */
list_for_each_entry(rq, &engine->active.requests, sched.link) {
- if (!i915_request_signaled(rq))
- dma_fence_set_error(&rq->fence, -EIO);
-
+ i915_request_set_error_once(rq, -EIO);
i915_request_mark_complete(rq);
}
@@ -660,12 +658,9 @@ void intel_guc_submission_disable(struct intel_guc *guc)
guc_proc_desc_fini(guc);
}
-static bool __guc_submission_support(struct intel_guc *guc)
+static bool __guc_submission_selected(struct intel_guc *guc)
{
- /* XXX: GuC submission is unavailable for now */
- return false;
-
- if (!intel_guc_is_supported(guc))
+ if (!intel_guc_submission_is_supported(guc))
return false;
return i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION;
@@ -673,7 +668,7 @@ static bool __guc_submission_support(struct intel_guc *guc)
void intel_guc_submission_init_early(struct intel_guc *guc)
{
- guc->submission_supported = __guc_submission_support(guc);
+ guc->submission_selected = __guc_submission_selected(guc);
}
bool intel_engine_in_guc_submission_mode(const struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
index e402a2932592..4cf9d3e50263 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
@@ -8,7 +8,8 @@
#include <linux/types.h>
-struct intel_guc;
+#include "intel_guc.h"
+
struct intel_engine_cs;
void intel_guc_submission_init_early(struct intel_guc *guc);
@@ -20,4 +21,20 @@ int intel_guc_preempt_work_create(struct intel_guc *guc);
void intel_guc_preempt_work_destroy(struct intel_guc *guc);
bool intel_engine_in_guc_submission_mode(const struct intel_engine_cs *engine);
+static inline bool intel_guc_submission_is_supported(struct intel_guc *guc)
+{
+ /* XXX: GuC submission is unavailable for now */
+ return false;
+}
+
+static inline bool intel_guc_submission_is_wanted(struct intel_guc *guc)
+{
+ return guc->submission_selected;
+}
+
+static inline bool intel_guc_submission_is_used(struct intel_guc *guc)
+{
+ return intel_guc_is_used(guc) && intel_guc_submission_is_wanted(guc);
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index 32a069841c14..a74b65694512 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -121,19 +121,20 @@ int intel_huc_init(struct intel_huc *huc)
if (err)
goto out_fini;
+ intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_LOADABLE);
+
return 0;
out_fini:
intel_uc_fw_fini(&huc->fw);
out:
- intel_uc_fw_cleanup_fetch(&huc->fw);
- DRM_DEV_DEBUG_DRIVER(i915->drm.dev, "failed with %d\n", err);
+ i915_probe_error(i915, "failed with %d\n", err);
return err;
}
void intel_huc_fini(struct intel_huc *huc)
{
- if (!intel_uc_fw_is_available(&huc->fw))
+ if (!intel_uc_fw_is_loadable(&huc->fw))
return;
intel_huc_rsa_data_destroy(huc);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
index 644c059fe01d..a40b9cfc6c22 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
@@ -41,11 +41,17 @@ static inline bool intel_huc_is_supported(struct intel_huc *huc)
return intel_uc_fw_is_supported(&huc->fw);
}
-static inline bool intel_huc_is_enabled(struct intel_huc *huc)
+static inline bool intel_huc_is_wanted(struct intel_huc *huc)
{
return intel_uc_fw_is_enabled(&huc->fw);
}
+static inline bool intel_huc_is_used(struct intel_huc *huc)
+{
+ GEM_BUG_ON(__intel_uc_fw_status(&huc->fw) == INTEL_UC_FIRMWARE_SELECTED);
+ return intel_uc_fw_is_available(&huc->fw);
+}
+
static inline bool intel_huc_is_authenticated(struct intel_huc *huc)
{
return intel_uc_fw_is_running(&huc->fw);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
index eee193bf2cc4..9cdf4cbe691c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
@@ -20,7 +20,7 @@ void intel_huc_fw_init_early(struct intel_huc *huc)
struct drm_i915_private *i915 = gt->i915;
intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC,
- intel_uc_uses_guc(uc),
+ intel_uc_wants_guc(uc),
INTEL_INFO(i915)->platform, INTEL_REVID(i915));
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index affc4d6f9ead..a4cbe06e06bd 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -48,17 +48,17 @@ static void __confirm_options(struct intel_uc *uc)
DRM_DEV_DEBUG_DRIVER(i915->drm.dev,
"enable_guc=%d (guc:%s submission:%s huc:%s)\n",
i915_modparams.enable_guc,
- yesno(intel_uc_uses_guc(uc)),
- yesno(intel_uc_uses_guc_submission(uc)),
- yesno(intel_uc_uses_huc(uc)));
+ yesno(intel_uc_wants_guc(uc)),
+ yesno(intel_uc_wants_guc_submission(uc)),
+ yesno(intel_uc_wants_huc(uc)));
if (i915_modparams.enable_guc == -1)
return;
if (i915_modparams.enable_guc == 0) {
- GEM_BUG_ON(intel_uc_uses_guc(uc));
- GEM_BUG_ON(intel_uc_uses_guc_submission(uc));
- GEM_BUG_ON(intel_uc_uses_huc(uc));
+ GEM_BUG_ON(intel_uc_wants_guc(uc));
+ GEM_BUG_ON(intel_uc_wants_guc_submission(uc));
+ GEM_BUG_ON(intel_uc_wants_huc(uc));
return;
}
@@ -93,7 +93,7 @@ void intel_uc_init_early(struct intel_uc *uc)
__confirm_options(uc);
- if (intel_uc_uses_guc(uc))
+ if (intel_uc_wants_guc(uc))
uc->ops = &uc_ops_on;
else
uc->ops = &uc_ops_off;
@@ -257,13 +257,13 @@ static void __uc_fetch_firmwares(struct intel_uc *uc)
{
int err;
- GEM_BUG_ON(!intel_uc_uses_guc(uc));
+ GEM_BUG_ON(!intel_uc_wants_guc(uc));
err = intel_uc_fw_fetch(&uc->guc.fw);
if (err)
return;
- if (intel_uc_uses_huc(uc))
+ if (intel_uc_wants_huc(uc))
intel_uc_fw_fetch(&uc->huc.fw);
}
@@ -273,25 +273,38 @@ static void __uc_cleanup_firmwares(struct intel_uc *uc)
intel_uc_fw_cleanup_fetch(&uc->guc.fw);
}
-static void __uc_init(struct intel_uc *uc)
+static int __uc_init(struct intel_uc *uc)
{
struct intel_guc *guc = &uc->guc;
struct intel_huc *huc = &uc->huc;
int ret;
- GEM_BUG_ON(!intel_uc_uses_guc(uc));
+ GEM_BUG_ON(!intel_uc_wants_guc(uc));
+
+ if (!intel_uc_uses_guc(uc))
+ return 0;
+
+ if (i915_inject_probe_failure(uc_to_gt(uc)->i915))
+ return -ENOMEM;
/* XXX: GuC submission is unavailable for now */
- GEM_BUG_ON(intel_uc_supports_guc_submission(uc));
+ GEM_BUG_ON(intel_uc_uses_guc_submission(uc));
ret = intel_guc_init(guc);
- if (ret) {
- intel_uc_fw_cleanup_fetch(&huc->fw);
- return;
+ if (ret)
+ return ret;
+
+ if (intel_uc_uses_huc(uc)) {
+ ret = intel_huc_init(huc);
+ if (ret)
+ goto out_guc;
}
- if (intel_uc_uses_huc(uc))
- intel_huc_init(huc);
+ return 0;
+
+out_guc:
+ intel_guc_fini(guc);
+ return ret;
}
static void __uc_fini(struct intel_uc *uc)
@@ -402,12 +415,12 @@ static int __uc_init_hw(struct intel_uc *uc)
int ret, attempts;
GEM_BUG_ON(!intel_uc_supports_guc(uc));
- GEM_BUG_ON(!intel_uc_uses_guc(uc));
+ GEM_BUG_ON(!intel_uc_wants_guc(uc));
- if (!intel_uc_fw_is_available(&guc->fw)) {
+ if (!intel_uc_fw_is_loadable(&guc->fw)) {
ret = __uc_check_hw(uc) ||
intel_uc_fw_is_overridden(&guc->fw) ||
- intel_uc_supports_guc_submission(uc) ?
+ intel_uc_wants_guc_submission(uc) ?
intel_uc_fw_status_to_error(guc->fw.status) : 0;
goto err_out;
}
@@ -459,14 +472,14 @@ static int __uc_init_hw(struct intel_uc *uc)
if (ret)
goto err_communication;
- if (intel_uc_supports_guc_submission(uc))
+ if (intel_uc_uses_guc_submission(uc))
intel_guc_submission_enable(guc);
dev_info(i915->drm.dev, "%s firmware %s version %u.%u %s:%s\n",
intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_GUC), guc->fw.path,
guc->fw.major_ver_found, guc->fw.minor_ver_found,
"submission",
- enableddisabled(intel_uc_supports_guc_submission(uc)));
+ enableddisabled(intel_uc_uses_guc_submission(uc)));
if (intel_uc_uses_huc(uc)) {
dev_info(i915->drm.dev, "%s firmware %s version %u.%u %s:%s\n",
@@ -508,7 +521,7 @@ static void __uc_fini_hw(struct intel_uc *uc)
if (!intel_guc_is_fw_running(guc))
return;
- if (intel_uc_supports_guc_submission(uc))
+ if (intel_uc_uses_guc_submission(uc))
intel_guc_submission_disable(guc);
if (guc_communication_enabled(guc))
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
index 49c913524686..5ae7b50b7dc1 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
@@ -7,6 +7,7 @@
#define _INTEL_UC_H_
#include "intel_guc.h"
+#include "intel_guc_submission.h"
#include "intel_huc.h"
#include "i915_params.h"
@@ -16,7 +17,7 @@ struct intel_uc_ops {
int (*sanitize)(struct intel_uc *uc);
void (*init_fw)(struct intel_uc *uc);
void (*fini_fw)(struct intel_uc *uc);
- void (*init)(struct intel_uc *uc);
+ int (*init)(struct intel_uc *uc);
void (*fini)(struct intel_uc *uc);
int (*init_hw)(struct intel_uc *uc);
void (*fini_hw)(struct intel_uc *uc);
@@ -40,35 +41,44 @@ void intel_uc_runtime_suspend(struct intel_uc *uc);
int intel_uc_resume(struct intel_uc *uc);
int intel_uc_runtime_resume(struct intel_uc *uc);
-static inline bool intel_uc_supports_guc(struct intel_uc *uc)
-{
- return intel_guc_is_supported(&uc->guc);
-}
-
-static inline bool intel_uc_uses_guc(struct intel_uc *uc)
-{
- return intel_guc_is_enabled(&uc->guc);
-}
+/*
+ * We need to know as early as possible if we're going to use GuC or not to
+ * take the correct setup paths. Additionally, once we've started loading the
+ * GuC, it is unsafe to keep executing without it because some parts of the HW,
+ * a subset of which is not cleaned on GT reset, will start expecting the GuC FW
+ * to be running.
+ * To solve both these requirements, we commit to using the microcontrollers if
+ * the relevant modparam is set and the blobs are found on the system. At this
+ * stage, the only thing that can stop us from attempting to load the blobs on
+ * the HW and use them is a fundamental issue (e.g. no memory for our
+ * structures); if we hit such a problem during driver load we're broken even
+ * without GuC, so there is no point in trying to fall back.
+ *
+ * Given the above, we can be in one of 4 states, with the last one implying
+ * we're committed to using the microcontroller:
+ * - Not supported: not available in HW and/or firmware not defined.
+ * - Supported: available in HW and firmware defined.
+ * - Wanted: supported + enabled in modparam.
+ * - In use: wanted + firmware found on the system and successfully fetched.
+ */
-static inline bool intel_uc_supports_guc_submission(struct intel_uc *uc)
-{
- return intel_guc_is_submission_supported(&uc->guc);
+#define __uc_state_checker(x, func, state, required) \
+static inline bool intel_uc_##state##_##func(struct intel_uc *uc) \
+{ \
+ return intel_##func##_is_##required(&uc->x); \
}
-static inline bool intel_uc_uses_guc_submission(struct intel_uc *uc)
-{
- return intel_guc_is_submission_supported(&uc->guc);
-}
+#define uc_state_checkers(x, func) \
+__uc_state_checker(x, func, supports, supported) \
+__uc_state_checker(x, func, wants, wanted) \
+__uc_state_checker(x, func, uses, used)
-static inline bool intel_uc_supports_huc(struct intel_uc *uc)
-{
- return intel_uc_supports_guc(uc);
-}
+uc_state_checkers(guc, guc);
+uc_state_checkers(huc, huc);
+uc_state_checkers(guc, guc_submission);
-static inline bool intel_uc_uses_huc(struct intel_uc *uc)
-{
- return intel_huc_is_enabled(&uc->huc);
-}
+#undef uc_state_checkers
+#undef __uc_state_checker
#define intel_uc_ops_function(_NAME, _OPS, _TYPE, _RET) \
static inline _TYPE intel_uc_##_NAME(struct intel_uc *uc) \
@@ -80,7 +90,7 @@ static inline _TYPE intel_uc_##_NAME(struct intel_uc *uc) \
intel_uc_ops_function(sanitize, sanitize, int, 0);
intel_uc_ops_function(fetch_firmwares, init_fw, void, );
intel_uc_ops_function(cleanup_firmwares, fini_fw, void, );
-intel_uc_ops_function(init, init, void, );
+intel_uc_ops_function(init, init, int, 0);
intel_uc_ops_function(fini, fini, void, );
intel_uc_ops_function(init_hw, init_hw, int, 0);
intel_uc_ops_function(fini_hw, fini_hw, void, );
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 8ee0a0c7f447..18c755203688 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -43,7 +43,7 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
* features.
*/
#define INTEL_UC_FIRMWARE_DEFS(fw_def, guc_def, huc_def) \
- fw_def(TIGERLAKE, 0, guc_def(tgl, 35, 2, 0), huc_def(tgl, 7, 0, 3)) \
+ fw_def(TIGERLAKE, 0, guc_def(tgl, 35, 2, 0), huc_def(tgl, 7, 0, 12)) \
fw_def(ELKHARTLAKE, 0, guc_def(ehl, 33, 0, 4), huc_def(ehl, 9, 0, 0)) \
fw_def(ICELAKE, 0, guc_def(icl, 33, 0, 0), huc_def(icl, 9, 0, 0)) \
fw_def(COFFEELAKE, 5, guc_def(cml, 33, 0, 0), huc_def(cml, 4, 0, 0)) \
@@ -279,7 +279,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
err = i915_inject_probe_error(i915, -ENXIO);
if (err)
- return err;
+ goto fail;
__force_fw_fetch_failures(uc_fw, -EINVAL);
__force_fw_fetch_failures(uc_fw, -ESTALE);
@@ -501,7 +501,7 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
if (err)
return err;
- if (!intel_uc_fw_is_available(uc_fw))
+ if (!intel_uc_fw_is_loadable(uc_fw))
return -ENOEXEC;
/* Call custom loader */
@@ -544,7 +544,10 @@ int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
{
- intel_uc_fw_cleanup_fetch(uc_fw);
+ if (i915_gem_object_has_pinned_pages(uc_fw->obj))
+ i915_gem_object_unpin_pages(uc_fw->obj);
+
+ intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
}
/**
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
index 1f30543d0d2d..888ff0de0244 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
@@ -29,8 +29,11 @@ struct intel_gt;
* | | SELECTED |
* +------------+- / | \ -+
* | | MISSING <--/ | \--> ERROR |
- * | fetch | | |
- * | | /------> AVAILABLE <---<-----------\ |
+ * | fetch | V |
+ * | | AVAILABLE |
+ * +------------+- | -+
+ * | init | V |
+ * | | /------> LOADABLE <----<-----------\ |
* +------------+- \ / \ \ \ -+
* | | FAIL <--< \--> TRANSFERRED \ |
* | upload | \ / \ / |
@@ -46,6 +49,7 @@ enum intel_uc_fw_status {
INTEL_UC_FIRMWARE_MISSING, /* blob not found on the system */
INTEL_UC_FIRMWARE_ERROR, /* invalid format or version */
INTEL_UC_FIRMWARE_AVAILABLE, /* blob found and copied in mem */
+ INTEL_UC_FIRMWARE_LOADABLE, /* all fw-required objects are ready */
INTEL_UC_FIRMWARE_FAIL, /* failed to xfer or init/auth the fw */
INTEL_UC_FIRMWARE_TRANSFERRED, /* dma xfer done */
INTEL_UC_FIRMWARE_RUNNING /* init/auth done */
@@ -115,6 +119,8 @@ const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status)
return "ERROR";
case INTEL_UC_FIRMWARE_AVAILABLE:
return "AVAILABLE";
+ case INTEL_UC_FIRMWARE_LOADABLE:
+ return "LOADABLE";
case INTEL_UC_FIRMWARE_FAIL:
return "FAIL";
case INTEL_UC_FIRMWARE_TRANSFERRED:
@@ -143,6 +149,7 @@ static inline int intel_uc_fw_status_to_error(enum intel_uc_fw_status status)
case INTEL_UC_FIRMWARE_SELECTED:
return -ESTALE;
case INTEL_UC_FIRMWARE_AVAILABLE:
+ case INTEL_UC_FIRMWARE_LOADABLE:
case INTEL_UC_FIRMWARE_TRANSFERRED:
case INTEL_UC_FIRMWARE_RUNNING:
return 0;
@@ -184,6 +191,11 @@ static inline bool intel_uc_fw_is_available(struct intel_uc_fw *uc_fw)
return __intel_uc_fw_status(uc_fw) >= INTEL_UC_FIRMWARE_AVAILABLE;
}
+static inline bool intel_uc_fw_is_loadable(struct intel_uc_fw *uc_fw)
+{
+ return __intel_uc_fw_status(uc_fw) >= INTEL_UC_FIRMWARE_LOADABLE;
+}
+
static inline bool intel_uc_fw_is_loaded(struct intel_uc_fw *uc_fw)
{
return __intel_uc_fw_status(uc_fw) >= INTEL_UC_FIRMWARE_TRANSFERRED;
@@ -202,7 +214,7 @@ static inline bool intel_uc_fw_is_overridden(const struct intel_uc_fw *uc_fw)
static inline void intel_uc_fw_sanitize(struct intel_uc_fw *uc_fw)
{
if (intel_uc_fw_is_loaded(uc_fw))
- intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
+ intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_LOADABLE);
}
static inline u32 __intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw)
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 4ff60c793a21..37fc460414a8 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -151,12 +151,12 @@ static void dmabuf_gem_object_free(struct kref *kref)
dmabuf_obj = container_of(pos,
struct intel_vgpu_dmabuf_obj, list);
if (dmabuf_obj == obj) {
+ list_del(pos);
intel_gvt_hypervisor_put_vfio_device(vgpu);
idr_remove(&vgpu->object_idr,
dmabuf_obj->dmabuf_id);
kfree(dmabuf_obj->info);
kfree(dmabuf_obj);
- list_del(pos);
break;
}
}
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index 7aaae9c562f8..990a181094e3 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -144,7 +144,7 @@ void intel_gvt_free_firmware(struct intel_gvt *gvt)
clean_firmware_sysfs(gvt);
kfree(gvt->firmware.cfg_space);
- kfree(gvt->firmware.mmio);
+ vfree(gvt->firmware.mmio);
}
static int verify_firmware(struct intel_gvt *gvt,
@@ -225,7 +225,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt)
firmware->cfg_space = mem;
- mem = kmalloc(info->mmio_size, GFP_KERNEL);
+ mem = vmalloc(info->mmio_size);
if (!mem) {
kfree(path);
kfree(firmware->cfg_space);
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index f6f2ab2683f7..2a4b23f8aa74 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1962,7 +1962,11 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
if (mm->type == INTEL_GVT_MM_PPGTT) {
list_del(&mm->ppgtt_mm.list);
+
+ mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
list_del(&mm->ppgtt_mm.lru_list);
+ mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
+
invalidate_ppgtt_mm(mm);
} else {
vfree(mm->ggtt_mm.virtual_ggtt);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index a6c4fcefa83b..9e1787867894 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -35,6 +35,7 @@
#include <linux/kthread.h>
#include "i915_drv.h"
+#include "intel_gvt.h"
#include "gvt.h"
#include <linux/vfio.h>
#include <linux/mdev.h>
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 9d67f33f37a0..cb11c3184085 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -1228,7 +1228,8 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
ce->vm = i915_vm_get(&ppgtt->vm);
intel_context_set_single_submission(ce);
- if (!USES_GUC_SUBMISSION(i915)) { /* Max ring buffer size */
+ /* Max ring buffer size */
+ if (!intel_uc_wants_guc_submission(&engine->gt->uc)) {
const unsigned int ring_size = 512 * SZ_4K;
ce->ring = __intel_context_ring_size(ring_size);
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index abcde8ce1a9a..78f14f04d2ea 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -560,9 +560,9 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
intel_vgpu_reset_mmio(vgpu, dmlr);
populate_pvinfo_page(vgpu);
- intel_vgpu_reset_display(vgpu);
if (dmlr) {
+ intel_vgpu_reset_display(vgpu);
intel_vgpu_reset_cfg_space(vgpu);
/* only reset the failsafe mode when dmlr reset */
vgpu->failsafe = false;
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 9ccb931a733e..c4048628188a 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -7,6 +7,7 @@
#include <linux/debugobjects.h>
#include "gt/intel_context.h"
+#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_ring.h"
@@ -452,6 +453,9 @@ static void enable_signaling(struct i915_active_fence *active)
{
struct dma_fence *fence;
+ if (unlikely(is_barrier(active)))
+ return;
+
fence = i915_active_fence_get(active);
if (!fence)
return;
@@ -460,26 +464,49 @@ static void enable_signaling(struct i915_active_fence *active)
dma_fence_put(fence);
}
-int i915_active_wait(struct i915_active *ref)
+static int flush_barrier(struct active_node *it)
{
- struct active_node *it, *n;
- int err = 0;
+ struct intel_engine_cs *engine;
- might_sleep();
+ if (likely(!is_barrier(&it->base)))
+ return 0;
- if (!i915_active_acquire_if_busy(ref))
+ engine = __barrier_to_engine(it);
+ smp_rmb(); /* serialise with add_active_barriers */
+ if (!is_barrier(&it->base))
return 0;
- /* Flush lazy signals */
+ return intel_engine_flush_barriers(engine);
+}
+
+static int flush_lazy_signals(struct i915_active *ref)
+{
+ struct active_node *it, *n;
+ int err = 0;
+
enable_signaling(&ref->excl);
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
- if (is_barrier(&it->base)) /* unconnected idle barrier */
- continue;
+ err = flush_barrier(it); /* unconnected idle barrier? */
+ if (err)
+ break;
enable_signaling(&it->base);
}
- /* Any fence added after the wait begins will not be auto-signaled */
+ return err;
+}
+
+int i915_active_wait(struct i915_active *ref)
+{
+ int err;
+
+ might_sleep();
+
+ if (!i915_active_acquire_if_busy(ref))
+ return 0;
+
+ /* Any fence added after the wait begins will not be auto-signaled */
+ err = flush_lazy_signals(ref);
i915_active_release(ref);
if (err)
return err;
@@ -491,25 +518,81 @@ int i915_active_wait(struct i915_active *ref)
return 0;
}
-int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
+static int __await_active(struct i915_active_fence *active,
+ int (*fn)(void *arg, struct dma_fence *fence),
+ void *arg)
+{
+ struct dma_fence *fence;
+
+ if (is_barrier(active)) /* XXX flush the barrier? */
+ return 0;
+
+ fence = i915_active_fence_get(active);
+ if (fence) {
+ int err;
+
+ err = fn(arg, fence);
+ dma_fence_put(fence);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static int await_active(struct i915_active *ref,
+ unsigned int flags,
+ int (*fn)(void *arg, struct dma_fence *fence),
+ void *arg)
{
int err = 0;
+ /* We must always wait for the exclusive fence! */
if (rcu_access_pointer(ref->excl.fence)) {
- struct dma_fence *fence;
-
- rcu_read_lock();
- fence = dma_fence_get_rcu_safe(&ref->excl.fence);
- rcu_read_unlock();
- if (fence) {
- err = i915_request_await_dma_fence(rq, fence);
- dma_fence_put(fence);
+ err = __await_active(&ref->excl, fn, arg);
+ if (err)
+ return err;
+ }
+
+ if (flags & I915_ACTIVE_AWAIT_ALL && i915_active_acquire_if_busy(ref)) {
+ struct active_node *it, *n;
+
+ rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
+ err = __await_active(&it->base, fn, arg);
+ if (err)
+ break;
}
+ i915_active_release(ref);
+ if (err)
+ return err;
}
- /* In the future we may choose to await on all fences */
+ return 0;
+}
+
+static int rq_await_fence(void *arg, struct dma_fence *fence)
+{
+ return i915_request_await_dma_fence(arg, fence);
+}
- return err;
+int i915_request_await_active(struct i915_request *rq,
+ struct i915_active *ref,
+ unsigned int flags)
+{
+ return await_active(ref, flags, rq_await_fence, rq);
+}
+
+static int sw_await_fence(void *arg, struct dma_fence *fence)
+{
+ return i915_sw_fence_await_dma_fence(arg, fence, 0,
+ GFP_NOWAIT | __GFP_NOWARN);
+}
+
+int i915_sw_fence_await_active(struct i915_sw_fence *fence,
+ struct i915_active *ref,
+ unsigned int flags)
+{
+ return await_active(ref, flags, sw_await_fence, fence);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
@@ -823,7 +906,6 @@ __i915_active_fence_set(struct i915_active_fence *active,
__list_del_entry(&active->cb.node);
spin_unlock(prev->lock); /* serialise with prev->cb_list */
}
- GEM_BUG_ON(rcu_access_pointer(active->fence) != fence);
list_add_tail(&active->cb.node, &fence->cb_list);
spin_unlock_irqrestore(fence->lock, flags);
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index 973ff0447c6c..b3282ae7913c 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -183,7 +183,13 @@ static inline bool i915_active_has_exclusive(struct i915_active *ref)
int i915_active_wait(struct i915_active *ref);
-int i915_request_await_active(struct i915_request *rq, struct i915_active *ref);
+int i915_sw_fence_await_active(struct i915_sw_fence *fence,
+ struct i915_active *ref,
+ unsigned int flags);
+int i915_request_await_active(struct i915_request *rq,
+ struct i915_active *ref,
+ unsigned int flags);
+#define I915_ACTIVE_AWAIT_ALL BIT(0)
int i915_active_acquire(struct i915_active *ref);
bool i915_active_acquire_if_busy(struct i915_active *ref);
diff --git a/drivers/gpu/drm/i915/i915_buddy.c b/drivers/gpu/drm/i915/i915_buddy.c
index 66883af64ca1..20babbdb297d 100644
--- a/drivers/gpu/drm/i915/i915_buddy.c
+++ b/drivers/gpu/drm/i915/i915_buddy.c
@@ -312,7 +312,8 @@ i915_buddy_alloc(struct i915_buddy_mm *mm, unsigned int order)
return block;
out_free:
- __i915_buddy_free(mm, block);
+ if (i != order)
+ __i915_buddy_free(mm, block);
return ERR_PTR(err);
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e5eea915bd0d..6ca797128aa1 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -996,220 +996,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
return ret;
}
-static int ilk_drpc_info(struct seq_file *m)
-{
- struct drm_i915_private *i915 = node_to_i915(m->private);
- struct intel_uncore *uncore = &i915->uncore;
- u32 rgvmodectl, rstdbyctl;
- u16 crstandvid;
-
- rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
- rstdbyctl = intel_uncore_read(uncore, RSTDBYCTL);
- crstandvid = intel_uncore_read16(uncore, CRSTANDVID);
-
- seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
- seq_printf(m, "Boost freq: %d\n",
- (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
- MEMMODE_BOOST_FREQ_SHIFT);
- seq_printf(m, "HW control enabled: %s\n",
- yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
- seq_printf(m, "SW control enabled: %s\n",
- yesno(rgvmodectl & MEMMODE_SWMODE_EN));
- seq_printf(m, "Gated voltage change: %s\n",
- yesno(rgvmodectl & MEMMODE_RCLK_GATE));
- seq_printf(m, "Starting frequency: P%d\n",
- (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
- seq_printf(m, "Max P-state: P%d\n",
- (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
- seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
- seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
- seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
- seq_printf(m, "Render standby enabled: %s\n",
- yesno(!(rstdbyctl & RCX_SW_EXIT)));
- seq_puts(m, "Current RS state: ");
- switch (rstdbyctl & RSX_STATUS_MASK) {
- case RSX_STATUS_ON:
- seq_puts(m, "on\n");
- break;
- case RSX_STATUS_RC1:
- seq_puts(m, "RC1\n");
- break;
- case RSX_STATUS_RC1E:
- seq_puts(m, "RC1E\n");
- break;
- case RSX_STATUS_RS1:
- seq_puts(m, "RS1\n");
- break;
- case RSX_STATUS_RS2:
- seq_puts(m, "RS2 (RC6)\n");
- break;
- case RSX_STATUS_RS3:
- seq_puts(m, "RC3 (RC6+)\n");
- break;
- default:
- seq_puts(m, "unknown\n");
- break;
- }
-
- return 0;
-}
-
-static int i915_forcewake_domains(struct seq_file *m, void *data)
-{
- struct drm_i915_private *i915 = node_to_i915(m->private);
- struct intel_uncore *uncore = &i915->uncore;
- struct intel_uncore_forcewake_domain *fw_domain;
- unsigned int tmp;
-
- seq_printf(m, "user.bypass_count = %u\n",
- uncore->user_forcewake_count);
-
- for_each_fw_domain(fw_domain, uncore, tmp)
- seq_printf(m, "%s.wake_count = %u\n",
- intel_uncore_forcewake_domain_to_str(fw_domain->id),
- READ_ONCE(fw_domain->wake_count));
-
- return 0;
-}
-
-static void print_rc6_res(struct seq_file *m,
- const char *title,
- const i915_reg_t reg)
-{
- struct drm_i915_private *i915 = node_to_i915(m->private);
- intel_wakeref_t wakeref;
-
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- seq_printf(m, "%s %u (%llu us)\n", title,
- intel_uncore_read(&i915->uncore, reg),
- intel_rc6_residency_us(&i915->gt.rc6, reg));
-}
-
-static int vlv_drpc_info(struct seq_file *m)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- u32 rcctl1, pw_status;
-
- pw_status = I915_READ(VLV_GTLC_PW_STATUS);
- rcctl1 = I915_READ(GEN6_RC_CONTROL);
-
- seq_printf(m, "RC6 Enabled: %s\n",
- yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
- GEN6_RC_CTL_EI_MODE(1))));
- seq_printf(m, "Render Power Well: %s\n",
- (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
- seq_printf(m, "Media Power Well: %s\n",
- (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
-
- print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
- print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
-
- return i915_forcewake_domains(m, NULL);
-}
-
-static int gen6_drpc_info(struct seq_file *m)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- u32 gt_core_status, rcctl1, rc6vids = 0;
- u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
-
- gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
- trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
-
- rcctl1 = I915_READ(GEN6_RC_CONTROL);
- if (INTEL_GEN(dev_priv) >= 9) {
- gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
- gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
- }
-
- if (INTEL_GEN(dev_priv) <= 7)
- sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
- &rc6vids, NULL);
-
- seq_printf(m, "RC1e Enabled: %s\n",
- yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
- seq_printf(m, "RC6 Enabled: %s\n",
- yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
- if (INTEL_GEN(dev_priv) >= 9) {
- seq_printf(m, "Render Well Gating Enabled: %s\n",
- yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
- seq_printf(m, "Media Well Gating Enabled: %s\n",
- yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
- }
- seq_printf(m, "Deep RC6 Enabled: %s\n",
- yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
- seq_printf(m, "Deepest RC6 Enabled: %s\n",
- yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
- seq_puts(m, "Current RC state: ");
- switch (gt_core_status & GEN6_RCn_MASK) {
- case GEN6_RC0:
- if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
- seq_puts(m, "Core Power Down\n");
- else
- seq_puts(m, "on\n");
- break;
- case GEN6_RC3:
- seq_puts(m, "RC3\n");
- break;
- case GEN6_RC6:
- seq_puts(m, "RC6\n");
- break;
- case GEN6_RC7:
- seq_puts(m, "RC7\n");
- break;
- default:
- seq_puts(m, "Unknown\n");
- break;
- }
-
- seq_printf(m, "Core Power Down: %s\n",
- yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
- if (INTEL_GEN(dev_priv) >= 9) {
- seq_printf(m, "Render Power Well: %s\n",
- (gen9_powergate_status &
- GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
- seq_printf(m, "Media Power Well: %s\n",
- (gen9_powergate_status &
- GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
- }
-
- /* Not exactly sure what this is */
- print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
- GEN6_GT_GFX_RC6_LOCKED);
- print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
- print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
- print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
-
- if (INTEL_GEN(dev_priv) <= 7) {
- seq_printf(m, "RC6 voltage: %dmV\n",
- GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
- seq_printf(m, "RC6+ voltage: %dmV\n",
- GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
- seq_printf(m, "RC6++ voltage: %dmV\n",
- GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
- }
-
- return i915_forcewake_domains(m, NULL);
-}
-
-static int i915_drpc_info(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- intel_wakeref_t wakeref;
- int err = -ENODEV;
-
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- err = vlv_drpc_info(m);
- else if (INTEL_GEN(dev_priv) >= 6)
- err = gen6_drpc_info(m);
- else
- err = ilk_drpc_info(m);
- }
-
- return err;
-}
-
static int i915_ring_freq_table(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -1533,10 +1319,8 @@ stringify_guc_log_type(enum guc_log_buffer_type type)
return "";
}
-static void i915_guc_log_info(struct seq_file *m,
- struct drm_i915_private *dev_priv)
+static void i915_guc_log_info(struct seq_file *m, struct intel_guc_log *log)
{
- struct intel_guc_log *log = &dev_priv->gt.uc.guc.log;
enum guc_log_buffer_type type;
if (!intel_guc_log_relay_created(log)) {
@@ -1560,11 +1344,12 @@ static void i915_guc_log_info(struct seq_file *m,
static int i915_guc_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_uc *uc = &dev_priv->gt.uc;
- if (!USES_GUC(dev_priv))
+ if (!intel_uc_uses_guc(uc))
return -ENODEV;
- i915_guc_log_info(m, dev_priv);
+ i915_guc_log_info(m, &uc->guc.log);
/* Add more as required ... */
@@ -1574,11 +1359,11 @@ static int i915_guc_info(struct seq_file *m, void *data)
static int i915_guc_stage_pool(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- const struct intel_guc *guc = &dev_priv->gt.uc.guc;
- struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
+ struct intel_uc *uc = &dev_priv->gt.uc;
+ struct guc_stage_desc *desc = uc->guc.stage_desc_pool_vaddr;
int index;
- if (!USES_GUC_SUBMISSION(dev_priv))
+ if (!intel_uc_uses_guc_submission(uc))
return -ENODEV;
for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
@@ -1665,11 +1450,12 @@ static int i915_guc_log_dump(struct seq_file *m, void *data)
static int i915_guc_log_level_get(void *data, u64 *val)
{
struct drm_i915_private *dev_priv = data;
+ struct intel_uc *uc = &dev_priv->gt.uc;
- if (!USES_GUC(dev_priv))
+ if (!intel_uc_uses_guc(uc))
return -ENODEV;
- *val = intel_guc_log_get_level(&dev_priv->gt.uc.guc.log);
+ *val = intel_guc_log_get_level(&uc->guc.log);
return 0;
}
@@ -1677,11 +1463,12 @@ static int i915_guc_log_level_get(void *data, u64 *val)
static int i915_guc_log_level_set(void *data, u64 val)
{
struct drm_i915_private *dev_priv = data;
+ struct intel_uc *uc = &dev_priv->gt.uc;
- if (!USES_GUC(dev_priv))
+ if (!intel_uc_uses_guc(uc))
return -ENODEV;
- return intel_guc_log_set_level(&dev_priv->gt.uc.guc.log, val);
+ return intel_guc_log_set_level(&uc->guc.log, val);
}
DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
@@ -2359,10 +2146,8 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_guc_stage_pool", i915_guc_stage_pool, 0},
{"i915_huc_load_status", i915_huc_load_status_info, 0},
{"i915_frequency_info", i915_frequency_info, 0},
- {"i915_drpc_info", i915_drpc_info, 0},
{"i915_ring_freq_table", i915_ring_freq_table, 0},
{"i915_context_status", i915_context_status, 0},
- {"i915_forcewake_domains", i915_forcewake_domains, 0},
{"i915_swizzle_info", i915_swizzle_info, 0},
{"i915_llc", i915_llc, 0},
{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 4dd8294b68e1..82d9df15b22b 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -44,7 +44,6 @@
#include <drm/drm_ioctl.h>
#include <drm/drm_irq.h>
#include <drm/drm_probe_helper.h>
-#include <drm/i915_drm.h>
#include "display/intel_acpi.h"
#include "display/intel_audio.h"
@@ -58,6 +57,7 @@
#include "display/intel_hotplug.h"
#include "display/intel_overlay.h"
#include "display/intel_pipe_crc.h"
+#include "display/intel_psr.h"
#include "display/intel_sprite.h"
#include "display/intel_vga.h"
@@ -70,6 +70,7 @@
#include "i915_debugfs.h"
#include "i915_drv.h"
+#include "i915_ioc32.h"
#include "i915_irq.h"
#include "i915_memcpy.h"
#include "i915_perf.h"
@@ -79,6 +80,8 @@
#include "i915_sysfs.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
+#include "intel_dram.h"
+#include "intel_gvt.h"
#include "intel_memory_region.h"
#include "intel_pm.h"
#include "vlv_suspend.h"
@@ -212,7 +215,8 @@ intel_teardown_mchbar(struct drm_i915_private *dev_priv)
release_resource(&dev_priv->mch_res);
}
-static int i915_driver_modeset_probe(struct drm_i915_private *i915)
+/* part #1: call before irq install */
+static int i915_driver_modeset_probe_noirq(struct drm_i915_private *i915)
{
int ret;
@@ -236,15 +240,28 @@ static int i915_driver_modeset_probe(struct drm_i915_private *i915)
intel_csr_ucode_init(i915);
- ret = intel_irq_install(i915);
+ ret = intel_modeset_init_noirq(i915);
if (ret)
- goto cleanup_csr;
+ goto cleanup_vga_client;
+
+ return 0;
+
+cleanup_vga_client:
+ intel_vga_unregister(i915);
+out:
+ return ret;
+}
+
+/* part #2: call after irq install */
+static int i915_driver_modeset_probe(struct drm_i915_private *i915)
+{
+ int ret;
/* Important: The output setup functions called by modeset_init need
* working irqs for e.g. gmbus and dp aux transfers. */
ret = intel_modeset_init(i915);
if (ret)
- goto cleanup_irq;
+ goto out;
ret = i915_gem_init(i915);
if (ret)
@@ -264,6 +281,8 @@ static int i915_driver_modeset_probe(struct drm_i915_private *i915)
intel_init_ipc(i915);
+ intel_psr_set_force_mode_changed(i915->psr.dp);
+
return 0;
cleanup_gem:
@@ -271,16 +290,10 @@ cleanup_gem:
i915_gem_driver_remove(i915);
i915_gem_driver_release(i915);
cleanup_modeset:
+ /* FIXME */
intel_modeset_driver_remove(i915);
intel_irq_uninstall(i915);
intel_modeset_driver_remove_noirq(i915);
- goto cleanup_csr;
-cleanup_irq:
- intel_irq_uninstall(i915);
-cleanup_csr:
- intel_csr_ucode_fini(i915);
- intel_power_domains_driver_remove(i915);
- intel_vga_unregister(i915);
out:
return ret;
}
@@ -376,6 +389,7 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
pre |= IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0);
+ pre |= IS_GLK_REVID(dev_priv, 0, GLK_REVID_A2);
if (pre) {
drm_err(&dev_priv->drm, "This is a pre-production stepping. "
@@ -454,7 +468,6 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
intel_init_display_hooks(dev_priv);
intel_init_clock_gating_hooks(dev_priv);
intel_init_audio_hooks(dev_priv);
- intel_display_crc_init(dev_priv);
intel_detect_preproduction_hw(dev_priv);
@@ -553,494 +566,6 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
intel_gvt_sanitize_options(dev_priv);
}
-#define DRAM_TYPE_STR(type) [INTEL_DRAM_ ## type] = #type
-
-static const char *intel_dram_type_str(enum intel_dram_type type)
-{
- static const char * const str[] = {
- DRAM_TYPE_STR(UNKNOWN),
- DRAM_TYPE_STR(DDR3),
- DRAM_TYPE_STR(DDR4),
- DRAM_TYPE_STR(LPDDR3),
- DRAM_TYPE_STR(LPDDR4),
- };
-
- if (type >= ARRAY_SIZE(str))
- type = INTEL_DRAM_UNKNOWN;
-
- return str[type];
-}
-
-#undef DRAM_TYPE_STR
-
-static int intel_dimm_num_devices(const struct dram_dimm_info *dimm)
-{
- return dimm->ranks * 64 / (dimm->width ?: 1);
-}
-
-/* Returns total GB for the whole DIMM */
-static int skl_get_dimm_size(u16 val)
-{
- return val & SKL_DRAM_SIZE_MASK;
-}
-
-static int skl_get_dimm_width(u16 val)
-{
- if (skl_get_dimm_size(val) == 0)
- return 0;
-
- switch (val & SKL_DRAM_WIDTH_MASK) {
- case SKL_DRAM_WIDTH_X8:
- case SKL_DRAM_WIDTH_X16:
- case SKL_DRAM_WIDTH_X32:
- val = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
- return 8 << val;
- default:
- MISSING_CASE(val);
- return 0;
- }
-}
-
-static int skl_get_dimm_ranks(u16 val)
-{
- if (skl_get_dimm_size(val) == 0)
- return 0;
-
- val = (val & SKL_DRAM_RANK_MASK) >> SKL_DRAM_RANK_SHIFT;
-
- return val + 1;
-}
-
-/* Returns total GB for the whole DIMM */
-static int cnl_get_dimm_size(u16 val)
-{
- return (val & CNL_DRAM_SIZE_MASK) / 2;
-}
-
-static int cnl_get_dimm_width(u16 val)
-{
- if (cnl_get_dimm_size(val) == 0)
- return 0;
-
- switch (val & CNL_DRAM_WIDTH_MASK) {
- case CNL_DRAM_WIDTH_X8:
- case CNL_DRAM_WIDTH_X16:
- case CNL_DRAM_WIDTH_X32:
- val = (val & CNL_DRAM_WIDTH_MASK) >> CNL_DRAM_WIDTH_SHIFT;
- return 8 << val;
- default:
- MISSING_CASE(val);
- return 0;
- }
-}
-
-static int cnl_get_dimm_ranks(u16 val)
-{
- if (cnl_get_dimm_size(val) == 0)
- return 0;
-
- val = (val & CNL_DRAM_RANK_MASK) >> CNL_DRAM_RANK_SHIFT;
-
- return val + 1;
-}
-
-static bool
-skl_is_16gb_dimm(const struct dram_dimm_info *dimm)
-{
- /* Convert total GB to Gb per DRAM device */
- return 8 * dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16;
-}
-
-static void
-skl_dram_get_dimm_info(struct drm_i915_private *dev_priv,
- struct dram_dimm_info *dimm,
- int channel, char dimm_name, u16 val)
-{
- if (INTEL_GEN(dev_priv) >= 10) {
- dimm->size = cnl_get_dimm_size(val);
- dimm->width = cnl_get_dimm_width(val);
- dimm->ranks = cnl_get_dimm_ranks(val);
- } else {
- dimm->size = skl_get_dimm_size(val);
- dimm->width = skl_get_dimm_width(val);
- dimm->ranks = skl_get_dimm_ranks(val);
- }
-
- drm_dbg_kms(&dev_priv->drm,
- "CH%u DIMM %c size: %u GB, width: X%u, ranks: %u, 16Gb DIMMs: %s\n",
- channel, dimm_name, dimm->size, dimm->width, dimm->ranks,
- yesno(skl_is_16gb_dimm(dimm)));
-}
-
-static int
-skl_dram_get_channel_info(struct drm_i915_private *dev_priv,
- struct dram_channel_info *ch,
- int channel, u32 val)
-{
- skl_dram_get_dimm_info(dev_priv, &ch->dimm_l,
- channel, 'L', val & 0xffff);
- skl_dram_get_dimm_info(dev_priv, &ch->dimm_s,
- channel, 'S', val >> 16);
-
- if (ch->dimm_l.size == 0 && ch->dimm_s.size == 0) {
- drm_dbg_kms(&dev_priv->drm, "CH%u not populated\n", channel);
- return -EINVAL;
- }
-
- if (ch->dimm_l.ranks == 2 || ch->dimm_s.ranks == 2)
- ch->ranks = 2;
- else if (ch->dimm_l.ranks == 1 && ch->dimm_s.ranks == 1)
- ch->ranks = 2;
- else
- ch->ranks = 1;
-
- ch->is_16gb_dimm =
- skl_is_16gb_dimm(&ch->dimm_l) ||
- skl_is_16gb_dimm(&ch->dimm_s);
-
- drm_dbg_kms(&dev_priv->drm, "CH%u ranks: %u, 16Gb DIMMs: %s\n",
- channel, ch->ranks, yesno(ch->is_16gb_dimm));
-
- return 0;
-}
-
-static bool
-intel_is_dram_symmetric(const struct dram_channel_info *ch0,
- const struct dram_channel_info *ch1)
-{
- return !memcmp(ch0, ch1, sizeof(*ch0)) &&
- (ch0->dimm_s.size == 0 ||
- !memcmp(&ch0->dimm_l, &ch0->dimm_s, sizeof(ch0->dimm_l)));
-}
-
-static int
-skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
-{
- struct dram_info *dram_info = &dev_priv->dram_info;
- struct dram_channel_info ch0 = {}, ch1 = {};
- u32 val;
- int ret;
-
- val = I915_READ(SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
- ret = skl_dram_get_channel_info(dev_priv, &ch0, 0, val);
- if (ret == 0)
- dram_info->num_channels++;
-
- val = I915_READ(SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN);
- ret = skl_dram_get_channel_info(dev_priv, &ch1, 1, val);
- if (ret == 0)
- dram_info->num_channels++;
-
- if (dram_info->num_channels == 0) {
- drm_info(&dev_priv->drm,
- "Number of memory channels is zero\n");
- return -EINVAL;
- }
-
- /*
- * If any of the channel is single rank channel, worst case output
- * will be same as if single rank memory, so consider single rank
- * memory.
- */
- if (ch0.ranks == 1 || ch1.ranks == 1)
- dram_info->ranks = 1;
- else
- dram_info->ranks = max(ch0.ranks, ch1.ranks);
-
- if (dram_info->ranks == 0) {
- drm_info(&dev_priv->drm,
- "couldn't get memory rank information\n");
- return -EINVAL;
- }
-
- dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
-
- dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1);
-
- drm_dbg_kms(&dev_priv->drm, "Memory configuration is symmetric? %s\n",
- yesno(dram_info->symmetric_memory));
- return 0;
-}
-
-static enum intel_dram_type
-skl_get_dram_type(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- val = I915_READ(SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN);
-
- switch (val & SKL_DRAM_DDR_TYPE_MASK) {
- case SKL_DRAM_DDR_TYPE_DDR3:
- return INTEL_DRAM_DDR3;
- case SKL_DRAM_DDR_TYPE_DDR4:
- return INTEL_DRAM_DDR4;
- case SKL_DRAM_DDR_TYPE_LPDDR3:
- return INTEL_DRAM_LPDDR3;
- case SKL_DRAM_DDR_TYPE_LPDDR4:
- return INTEL_DRAM_LPDDR4;
- default:
- MISSING_CASE(val);
- return INTEL_DRAM_UNKNOWN;
- }
-}
-
-static int
-skl_get_dram_info(struct drm_i915_private *dev_priv)
-{
- struct dram_info *dram_info = &dev_priv->dram_info;
- u32 mem_freq_khz, val;
- int ret;
-
- dram_info->type = skl_get_dram_type(dev_priv);
- drm_dbg_kms(&dev_priv->drm, "DRAM type: %s\n",
- intel_dram_type_str(dram_info->type));
-
- ret = skl_dram_get_channels_info(dev_priv);
- if (ret)
- return ret;
-
- val = I915_READ(SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
- mem_freq_khz = DIV_ROUND_UP((val & SKL_REQ_DATA_MASK) *
- SKL_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
-
- dram_info->bandwidth_kbps = dram_info->num_channels *
- mem_freq_khz * 8;
-
- if (dram_info->bandwidth_kbps == 0) {
- drm_info(&dev_priv->drm,
- "Couldn't get system memory bandwidth\n");
- return -EINVAL;
- }
-
- dram_info->valid = true;
- return 0;
-}
-
-/* Returns Gb per DRAM device */
-static int bxt_get_dimm_size(u32 val)
-{
- switch (val & BXT_DRAM_SIZE_MASK) {
- case BXT_DRAM_SIZE_4GBIT:
- return 4;
- case BXT_DRAM_SIZE_6GBIT:
- return 6;
- case BXT_DRAM_SIZE_8GBIT:
- return 8;
- case BXT_DRAM_SIZE_12GBIT:
- return 12;
- case BXT_DRAM_SIZE_16GBIT:
- return 16;
- default:
- MISSING_CASE(val);
- return 0;
- }
-}
-
-static int bxt_get_dimm_width(u32 val)
-{
- if (!bxt_get_dimm_size(val))
- return 0;
-
- val = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT;
-
- return 8 << val;
-}
-
-static int bxt_get_dimm_ranks(u32 val)
-{
- if (!bxt_get_dimm_size(val))
- return 0;
-
- switch (val & BXT_DRAM_RANK_MASK) {
- case BXT_DRAM_RANK_SINGLE:
- return 1;
- case BXT_DRAM_RANK_DUAL:
- return 2;
- default:
- MISSING_CASE(val);
- return 0;
- }
-}
-
-static enum intel_dram_type bxt_get_dimm_type(u32 val)
-{
- if (!bxt_get_dimm_size(val))
- return INTEL_DRAM_UNKNOWN;
-
- switch (val & BXT_DRAM_TYPE_MASK) {
- case BXT_DRAM_TYPE_DDR3:
- return INTEL_DRAM_DDR3;
- case BXT_DRAM_TYPE_LPDDR3:
- return INTEL_DRAM_LPDDR3;
- case BXT_DRAM_TYPE_DDR4:
- return INTEL_DRAM_DDR4;
- case BXT_DRAM_TYPE_LPDDR4:
- return INTEL_DRAM_LPDDR4;
- default:
- MISSING_CASE(val);
- return INTEL_DRAM_UNKNOWN;
- }
-}
-
-static void bxt_get_dimm_info(struct dram_dimm_info *dimm,
- u32 val)
-{
- dimm->width = bxt_get_dimm_width(val);
- dimm->ranks = bxt_get_dimm_ranks(val);
-
- /*
- * Size in register is Gb per DRAM device. Convert to total
- * GB to match the way we report this for non-LP platforms.
- */
- dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm) / 8;
-}
-
-static int
-bxt_get_dram_info(struct drm_i915_private *dev_priv)
-{
- struct dram_info *dram_info = &dev_priv->dram_info;
- u32 dram_channels;
- u32 mem_freq_khz, val;
- u8 num_active_channels;
- int i;
-
- val = I915_READ(BXT_P_CR_MC_BIOS_REQ_0_0_0);
- mem_freq_khz = DIV_ROUND_UP((val & BXT_REQ_DATA_MASK) *
- BXT_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
-
- dram_channels = val & BXT_DRAM_CHANNEL_ACTIVE_MASK;
- num_active_channels = hweight32(dram_channels);
-
- /* Each active bit represents 4-byte channel */
- dram_info->bandwidth_kbps = (mem_freq_khz * num_active_channels * 4);
-
- if (dram_info->bandwidth_kbps == 0) {
- drm_info(&dev_priv->drm,
- "Couldn't get system memory bandwidth\n");
- return -EINVAL;
- }
-
- /*
- * Now read each DUNIT8/9/10/11 to check the rank of each dimms.
- */
- for (i = BXT_D_CR_DRP0_DUNIT_START; i <= BXT_D_CR_DRP0_DUNIT_END; i++) {
- struct dram_dimm_info dimm;
- enum intel_dram_type type;
-
- val = I915_READ(BXT_D_CR_DRP0_DUNIT(i));
- if (val == 0xFFFFFFFF)
- continue;
-
- dram_info->num_channels++;
-
- bxt_get_dimm_info(&dimm, val);
- type = bxt_get_dimm_type(val);
-
- drm_WARN_ON(&dev_priv->drm, type != INTEL_DRAM_UNKNOWN &&
- dram_info->type != INTEL_DRAM_UNKNOWN &&
- dram_info->type != type);
-
- drm_dbg_kms(&dev_priv->drm,
- "CH%u DIMM size: %u GB, width: X%u, ranks: %u, type: %s\n",
- i - BXT_D_CR_DRP0_DUNIT_START,
- dimm.size, dimm.width, dimm.ranks,
- intel_dram_type_str(type));
-
- /*
- * If any of the channel is single rank channel,
- * worst case output will be same as if single rank
- * memory, so consider single rank memory.
- */
- if (dram_info->ranks == 0)
- dram_info->ranks = dimm.ranks;
- else if (dimm.ranks == 1)
- dram_info->ranks = 1;
-
- if (type != INTEL_DRAM_UNKNOWN)
- dram_info->type = type;
- }
-
- if (dram_info->type == INTEL_DRAM_UNKNOWN ||
- dram_info->ranks == 0) {
- drm_info(&dev_priv->drm, "couldn't get memory information\n");
- return -EINVAL;
- }
-
- dram_info->valid = true;
- return 0;
-}
-
-static void
-intel_get_dram_info(struct drm_i915_private *dev_priv)
-{
- struct dram_info *dram_info = &dev_priv->dram_info;
- int ret;
-
- /*
- * Assume 16Gb DIMMs are present until proven otherwise.
- * This is only used for the level 0 watermark latency
- * w/a which does not apply to bxt/glk.
- */
- dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv);
-
- if (INTEL_GEN(dev_priv) < 9 || !HAS_DISPLAY(dev_priv))
- return;
-
- if (IS_GEN9_LP(dev_priv))
- ret = bxt_get_dram_info(dev_priv);
- else
- ret = skl_get_dram_info(dev_priv);
- if (ret)
- return;
-
- drm_dbg_kms(&dev_priv->drm, "DRAM bandwidth: %u kBps, channels: %u\n",
- dram_info->bandwidth_kbps,
- dram_info->num_channels);
-
- drm_dbg_kms(&dev_priv->drm, "DRAM ranks: %u, 16Gb DIMMs: %s\n",
- dram_info->ranks, yesno(dram_info->is_16gb_dimm));
-}
-
-static u32 gen9_edram_size_mb(struct drm_i915_private *dev_priv, u32 cap)
-{
- static const u8 ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
- static const u8 sets[4] = { 1, 1, 2, 2 };
-
- return EDRAM_NUM_BANKS(cap) *
- ways[EDRAM_WAYS_IDX(cap)] *
- sets[EDRAM_SETS_IDX(cap)];
-}
-
-static void edram_detect(struct drm_i915_private *dev_priv)
-{
- u32 edram_cap = 0;
-
- if (!(IS_HASWELL(dev_priv) ||
- IS_BROADWELL(dev_priv) ||
- INTEL_GEN(dev_priv) >= 9))
- return;
-
- edram_cap = __raw_uncore_read32(&dev_priv->uncore, HSW_EDRAM_CAP);
-
- /* NB: We can't write IDICR yet because we don't have gt funcs set up */
-
- if (!(edram_cap & EDRAM_ENABLED))
- return;
-
- /*
- * The needed capability bits for size calculation are not there with
- * pre gen9 so return 128MB always.
- */
- if (INTEL_GEN(dev_priv) < 9)
- dev_priv->edram_size_mb = 128;
- else
- dev_priv->edram_size_mb =
- gen9_edram_size_mb(dev_priv, edram_cap);
-
- dev_info(dev_priv->drm.dev,
- "Found %uMB of eDRAM\n", dev_priv->edram_size_mb);
-}
-
/**
* i915_driver_hw_probe - setup state requiring device access
* @dev_priv: device private
@@ -1084,7 +609,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
intel_sanitize_options(dev_priv);
/* needs to be done before ggtt probe */
- edram_detect(dev_priv);
+ intel_dram_edram_detect(dev_priv);
i915_perf_init(dev_priv);
@@ -1186,7 +711,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
* Fill the dram structure to get the system raw bandwidth and
* dram info. This will be used for memory latency calculation.
*/
- intel_get_dram_info(dev_priv);
+ intel_dram_detect(dev_priv);
intel_bw_init_hw(dev_priv);
@@ -1235,12 +760,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
i915_gem_driver_register(dev_priv);
i915_pmu_register(dev_priv);
- /*
- * Notify a valid surface after modesetting,
- * when running inside a VM.
- */
- if (intel_vgpu_active(dev_priv))
- I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
+ intel_vgpu_register(dev_priv);
/* Reveal our presence to userspace */
if (drm_dev_register(dev, 0) == 0) {
@@ -1370,8 +890,6 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
return ERR_PTR(err);
}
- i915->drm.dev_private = i915;
-
i915->drm.pdev = pdev;
pci_set_drvdata(pdev, i915);
@@ -1449,7 +967,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
disable_rpm_wakeref_asserts(&i915->runtime_pm);
- i915_detect_vgpu(i915);
+ intel_vgpu_detect(i915);
ret = i915_driver_mmio_probe(i915);
if (ret < 0)
@@ -1459,10 +977,18 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret < 0)
goto out_cleanup_mmio;
- ret = i915_driver_modeset_probe(i915);
+ ret = i915_driver_modeset_probe_noirq(i915);
if (ret < 0)
goto out_cleanup_hw;
+ ret = intel_irq_install(i915);
+ if (ret)
+ goto out_cleanup_modeset;
+
+ ret = i915_driver_modeset_probe(i915);
+ if (ret < 0)
+ goto out_cleanup_irq;
+
i915_driver_register(i915);
enable_rpm_wakeref_asserts(&i915->runtime_pm);
@@ -1471,6 +997,10 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
+out_cleanup_irq:
+ intel_irq_uninstall(i915);
+out_cleanup_modeset:
+ /* FIXME */
out_cleanup_hw:
i915_driver_hw_remove(i915);
intel_memory_regions_driver_release(i915);
@@ -1494,13 +1024,6 @@ void i915_driver_remove(struct drm_i915_private *i915)
i915_driver_unregister(i915);
- /*
- * After unregistering the device to prevent any new users, cancel
- * all in-flight requests so that we can quickly unbind the active
- * resources.
- */
- intel_gt_set_wedged(&i915->gt);
-
/* Flush any external code that still may be under the RCU lock */
synchronize_rcu();
@@ -2230,12 +1753,12 @@ const struct dev_pm_ops i915_pm_ops = {
static const struct file_operations i915_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
- .release = drm_release,
+ .release = drm_release_noglobal,
.unlocked_ioctl = drm_ioctl,
.mmap = i915_gem_mmap,
.poll = drm_poll,
.read = drm_read,
- .compat_ioctl = i915_compat_ioctl,
+ .compat_ioctl = i915_ioc32_compat_ioctl,
.llseek = noop_llseek,
};
@@ -2327,9 +1850,6 @@ static struct drm_driver driver = {
.gem_prime_export = i915_gem_prime_export,
.gem_prime_import = i915_gem_prime_import,
- .get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
- .get_scanout_position = i915_get_crtc_scanoutpos,
-
.dumb_create = i915_gem_dumb_create,
.dumb_map_offset = i915_gem_dumb_mmap_offset,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9928d00ea0b1..1f5b9a584f71 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -59,7 +59,6 @@
#include <drm/drm_connector.h>
#include <drm/i915_mei_hdcp_interface.h>
-#include "i915_fixed.h"
#include "i915_params.h"
#include "i915_reg.h"
#include "i915_utils.h"
@@ -105,18 +104,23 @@
#include "intel_region_lmem.h"
-#include "intel_gvt.h"
-
/* General customization:
*/
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20200114"
-#define DRIVER_TIMESTAMP 1579001978
+#define DRIVER_DATE "20200313"
+#define DRIVER_TIMESTAMP 1584144591
struct drm_i915_gem_object;
+/*
+ * The code assumes that the hpd_pins below have consecutive values and
+ * starting with HPD_PORT_A, the HPD pin associated with any port can be
+ * retrieved by adding the corresponding port (or phy) enum value to
+ * HPD_PORT_A in most cases. For example:
+ * HPD_PORT_C = HPD_PORT_A + PHY_C - PHY_A
+ */
enum hpd_pin {
HPD_NONE = 0,
HPD_TV = HPD_NONE, /* TV is known to be unreliable */
@@ -505,7 +509,7 @@ struct i915_psr {
bool dc3co_enabled;
u32 dc3co_exit_delay;
struct delayed_work dc3co_work;
- bool initially_probed;
+ bool force_mode_changed;
};
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
@@ -732,14 +736,6 @@ enum intel_ddb_partitioning {
INTEL_DDB_PART_5_6, /* IVB+ */
};
-struct intel_wm_level {
- bool enable;
- u32 pri_val;
- u32 spr_val;
- u32 cur_val;
- u32 fbc_val;
-};
-
struct ilk_wm_values {
u32 wm_pipe[3];
u32 wm_lp[3];
@@ -798,56 +794,6 @@ static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
return false;
}
-struct skl_wm_level {
- u16 min_ddb_alloc;
- u16 plane_res_b;
- u8 plane_res_l;
- bool plane_en;
- bool ignore_lines;
-};
-
-/* Stores plane specific WM parameters */
-struct skl_wm_params {
- bool x_tiled, y_tiled;
- bool rc_surface;
- bool is_planar;
- u32 width;
- u8 cpp;
- u32 plane_pixel_rate;
- u32 y_min_scanlines;
- u32 plane_bytes_per_line;
- uint_fixed_16_16_t plane_blocks_per_line;
- uint_fixed_16_16_t y_tile_minimum;
- u32 linetime_us;
- u32 dbuf_block_size;
-};
-
-enum intel_pipe_crc_source {
- INTEL_PIPE_CRC_SOURCE_NONE,
- INTEL_PIPE_CRC_SOURCE_PLANE1,
- INTEL_PIPE_CRC_SOURCE_PLANE2,
- INTEL_PIPE_CRC_SOURCE_PLANE3,
- INTEL_PIPE_CRC_SOURCE_PLANE4,
- INTEL_PIPE_CRC_SOURCE_PLANE5,
- INTEL_PIPE_CRC_SOURCE_PLANE6,
- INTEL_PIPE_CRC_SOURCE_PLANE7,
- INTEL_PIPE_CRC_SOURCE_PIPE,
- /* TV/DP on pre-gen5/vlv can't use the pipe source. */
- INTEL_PIPE_CRC_SOURCE_TV,
- INTEL_PIPE_CRC_SOURCE_DP_B,
- INTEL_PIPE_CRC_SOURCE_DP_C,
- INTEL_PIPE_CRC_SOURCE_DP_D,
- INTEL_PIPE_CRC_SOURCE_AUTO,
- INTEL_PIPE_CRC_SOURCE_MAX,
-};
-
-#define INTEL_PIPE_CRC_ENTRIES_NR 128
-struct intel_pipe_crc {
- spinlock_t lock;
- int skipped;
- enum intel_pipe_crc_source source;
-};
-
struct i915_frontbuffer_tracking {
spinlock_t lock;
@@ -865,13 +811,6 @@ struct i915_virtual_gpu {
u32 caps;
};
-/* used in computing the new watermarks state */
-struct intel_wm_config {
- unsigned int num_pipes_active;
- bool sprites_enabled;
- bool sprites_scaled;
-};
-
struct intel_cdclk_config {
unsigned int cdclk, vco, ref, bypass;
u8 voltage_level;
@@ -1043,21 +982,24 @@ struct drm_i915_private {
struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
-#ifdef CONFIG_DEBUG_FS
- struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
-#endif
+ /**
+ * dpll and cdclk state is protected by connection_mutex
+ * dpll.lock serializes intel_{prepare,enable,disable}_shared_dpll.
+ * Must be global rather than per dpll, because on some platforms plls
+ * share registers.
+ */
+ struct {
+ struct mutex lock;
- /* dpll and cdclk state is protected by connection_mutex */
- int num_shared_dpll;
- struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
- const struct intel_dpll_mgr *dpll_mgr;
+ int num_shared_dpll;
+ struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
+ const struct intel_dpll_mgr *mgr;
- /*
- * dpll_lock serializes intel_{prepare,enable,disable}_shared_dpll.
- * Must be global rather than per dpll, because on some platforms
- * plls share registers.
- */
- struct mutex dpll_lock;
+ struct {
+ int nssc;
+ int ssc;
+ } ref_clks;
+ } dpll;
struct list_head global_obj_list;
@@ -1078,8 +1020,6 @@ struct drm_i915_private {
struct work_struct free_work;
} atomic_helper;
- u16 orig_clock;
-
bool mchbar_need_disable;
struct intel_l3_parity l3_parity;
@@ -1274,16 +1214,6 @@ struct drm_i915_private {
*/
};
-struct dram_dimm_info {
- u8 size, width, ranks;
-};
-
-struct dram_channel_info {
- struct dram_dimm_info dimm_l, dimm_s;
- u8 ranks;
- bool is_16gb_dimm;
-};
-
static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
{
return container_of(dev, struct drm_i915_private, drm);
@@ -1554,6 +1484,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define GLK_REVID_A0 0x0
#define GLK_REVID_A1 0x1
+#define GLK_REVID_A2 0x2
+#define GLK_REVID_B0 0x3
#define IS_GLK_REVID(dev_priv, since, until) \
(IS_GEMINILAKE(dev_priv) && IS_REVID(dev_priv, since, until))
@@ -1692,10 +1624,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_GT_UC(dev_priv) (INTEL_INFO(dev_priv)->has_gt_uc)
-/* Having GuC is not the same as using GuC */
-#define USES_GUC(dev_priv) intel_uc_uses_guc(&(dev_priv)->gt.uc)
-#define USES_GUC_SUBMISSION(dev_priv) intel_uc_uses_guc_submission(&(dev_priv)->gt.uc)
-
#define HAS_POOLED_EU(dev_priv) (INTEL_INFO(dev_priv)->has_pooled_eu)
#define HAS_GLOBAL_MOCS_REGISTERS(dev_priv) (INTEL_INFO(dev_priv)->has_global_mocs)
@@ -1741,11 +1669,6 @@ intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
}
/* i915_drv.c */
-#ifdef CONFIG_COMPAT
-long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
-#else
-#define i915_compat_ioctl NULL
-#endif
extern const struct dev_pm_ops i915_pm_ops;
int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
@@ -1754,16 +1677,6 @@ void i915_driver_remove(struct drm_i915_private *i915);
int i915_resume_switcheroo(struct drm_i915_private *i915);
int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state);
-static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
-{
- return dev_priv->gvt;
-}
-
-static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
-{
- return dev_priv->vgpu.active;
-}
-
int i915_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -1828,12 +1741,6 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
-static inline int __must_check
-i915_mutex_lock_interruptible(struct drm_device *dev)
-{
- return mutex_lock_interruptible(&dev->struct_mutex);
-}
-
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
@@ -2004,10 +1911,4 @@ i915_coherent_map_type(struct drm_i915_private *i915)
return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
}
-static inline bool intel_guc_submission_is_enabled(struct intel_guc *guc)
-{
- return intel_guc_is_submission_supported(guc) &&
- intel_guc_is_ready(guc);
-}
-
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d92cf966fa3f..ca5420012a22 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -26,7 +26,6 @@
*/
#include <drm/drm_vma_manager.h>
-#include <drm/i915_drm.h>
#include <linux/dma-fence-array.h>
#include <linux/kthread.h>
#include <linux/dma-resv.h>
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 0697bedebeef..4518b9b35c3d 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -26,8 +26,6 @@
*
*/
-#include <drm/i915_drm.h>
-
#include "gem/i915_gem_context.h"
#include "gt/intel_gt_requests.h"
@@ -292,7 +290,8 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
GEM_BUG_ON(!drm_mm_node_allocated(node));
vma = container_of(node, typeof(*vma), node);
- /* If we are using coloring to insert guard pages between
+ /*
+ * If we are using coloring to insert guard pages between
* different cache domains within the address space, we have
* to check whether the objects on either side of our range
* abutt and conflict. If they are in conflict, then we evict
@@ -309,22 +308,18 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
}
}
- if (flags & PIN_NONBLOCK &&
- (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))) {
+ if (i915_vma_is_pinned(vma)) {
ret = -ENOSPC;
break;
}
- /* Overlap of objects in the same batch? */
- if (i915_vma_is_pinned(vma)) {
+ if (flags & PIN_NONBLOCK && i915_vma_is_active(vma)) {
ret = -ENOSPC;
- if (vma->exec_flags &&
- *vma->exec_flags & EXEC_OBJECT_PINNED)
- ret = -EINVAL;
break;
}
- /* Never show fear in the face of dragons!
+ /*
+ * Never show fear in the face of dragons!
*
* We cannot directly remove this node from within this
* iterator and as with i915_gem_evict_something() we employ
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index 049cd3785347..d152b648c73c 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -21,10 +21,9 @@
* IN THE SOFTWARE.
*/
-#include <drm/i915_drm.h>
-
#include "i915_drv.h"
#include "i915_scatterlist.h"
+#include "i915_pvinfo.h"
#include "i915_vgpu.h"
/**
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index e7834fa1e0ac..cb43381b0d37 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -15,8 +15,6 @@
#include <asm/set_memory.h>
#include <asm/smp.h>
-#include <drm/i915_drm.h>
-
#include "display/intel_frontbuffer.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_requests.h"
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index c1007245f46d..8e45ca3d2ede 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -28,9 +28,10 @@
*/
#include <linux/compat.h>
-#include <drm/i915_drm.h>
#include <drm/drm_ioctl.h>
+
#include "i915_drv.h"
+#include "i915_ioc32.h"
struct drm_i915_getparam32 {
s32 param;
@@ -67,7 +68,7 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
};
/**
- * i915_compat_ioctl - handle the mistakes of the past
+ * i915_ioc32_compat_ioctl - handle the mistakes of the past
* @filp: the file pointer
* @cmd: the ioctl command (and encoded flags)
* @arg: the ioctl argument (from userspace)
@@ -75,7 +76,7 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
* Called whenever a 32-bit process running under a 64-bit kernel
* performs an ioctl on /dev/dri/card<n>.
*/
-long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+long i915_ioc32_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
unsigned int nr = DRM_IOCTL_NR(cmd);
drm_ioctl_compat_t *fn = NULL;
diff --git a/drivers/gpu/drm/i915/i915_ioc32.h b/drivers/gpu/drm/i915/i915_ioc32.h
new file mode 100644
index 000000000000..40dcd55ca213
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_ioc32.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __I915_IOC32_H__
+#define __I915_IOC32_H__
+
+#ifdef CONFIG_COMPAT
+struct file;
+long i915_ioc32_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg);
+#else
+#define i915_ioc32_compat_ioctl NULL
+#endif
+
+#endif /* __I915_IOC32_H__ */
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 3d0cd0960bd2..9f0653cf0510 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -34,7 +34,6 @@
#include <drm/drm_drv.h>
#include <drm/drm_irq.h>
-#include <drm/i915_drm.h>
#include "display/intel_display_types.h"
#include "display/intel_fifo_underrun.h"
@@ -169,6 +168,14 @@ static const u32 hpd_tgp[HPD_NUM_PINS] = {
[HPD_PORT_I] = SDE_TC_HOTPLUG_ICP(PORT_TC6),
};
+static void
+intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+
+ drm_crtc_handle_vblank(&crtc->base);
+}
+
void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
i915_reg_t iir, i915_reg_t ier)
{
@@ -772,13 +779,15 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
return (position + crtc->scanline_offset) % vtotal;
}
-bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int index,
- bool in_vblank_irq, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode)
+static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
+ bool in_vblank_irq,
+ int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
{
+ struct drm_device *dev = _crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(drm_crtc_from_index(dev, index));
+ struct intel_crtc *crtc = to_intel_crtc(_crtc);
enum pipe pipe = crtc->pipe;
int position;
int vbl_start, vbl_end, hsync_start, htotal, vtotal;
@@ -890,6 +899,14 @@ bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int index,
return true;
}
+bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
+ ktime_t *vblank_time, bool in_vblank_irq)
+{
+ return drm_crtc_vblank_helper_get_vblank_timestamp_internal(
+ crtc, max_error, vblank_time, in_vblank_irq,
+ i915_get_crtc_scanoutpos);
+}
+
int intel_get_crtc_scanline(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -1200,8 +1217,8 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
u32 crc2, u32 crc3,
u32 crc4)
{
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
trace_intel_pipe_crc(crtc, crcs);
@@ -1364,7 +1381,7 @@ static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
for_each_pipe(dev_priv, pipe) {
if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
- drm_handle_vblank(&dev_priv->drm, pipe);
+ intel_handle_vblank(dev_priv, pipe);
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev_priv, pipe);
@@ -1382,7 +1399,7 @@ static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
for_each_pipe(dev_priv, pipe) {
if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
- drm_handle_vblank(&dev_priv->drm, pipe);
+ intel_handle_vblank(dev_priv, pipe);
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
@@ -1406,7 +1423,7 @@ static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
for_each_pipe(dev_priv, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
- drm_handle_vblank(&dev_priv->drm, pipe);
+ intel_handle_vblank(dev_priv, pipe);
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
@@ -1432,7 +1449,7 @@ static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
for_each_pipe(dev_priv, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
- drm_handle_vblank(&dev_priv->drm, pipe);
+ intel_handle_vblank(dev_priv, pipe);
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev_priv, pipe);
@@ -1739,11 +1756,12 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
if (pch_iir & SDE_POISON)
drm_err(&dev_priv->drm, "PCH poison interrupt\n");
- if (pch_iir & SDE_FDI_MASK)
+ if (pch_iir & SDE_FDI_MASK) {
for_each_pipe(dev_priv, pipe)
drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
pipe_name(pipe),
I915_READ(FDI_RX_IIR(pipe)));
+ }
if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n");
@@ -1823,11 +1841,12 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n");
- if (pch_iir & SDE_FDI_MASK_CPT)
+ if (pch_iir & SDE_FDI_MASK_CPT) {
for_each_pipe(dev_priv, pipe)
drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
pipe_name(pipe),
I915_READ(FDI_RX_IIR(pipe)));
+ }
if (pch_iir & SDE_ERROR_CPT)
cpt_serr_int_handler(dev_priv);
@@ -1968,7 +1987,7 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
for_each_pipe(dev_priv, pipe) {
if (de_iir & DE_PIPE_VBLANK(pipe))
- drm_handle_vblank(&dev_priv->drm, pipe);
+ intel_handle_vblank(dev_priv, pipe);
if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
@@ -2021,7 +2040,7 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
for_each_pipe(dev_priv, pipe) {
if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
- drm_handle_vblank(&dev_priv->drm, pipe);
+ intel_handle_vblank(dev_priv, pipe);
}
/* check event from PCH */
@@ -2334,7 +2353,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
if (iir & GEN8_PIPE_VBLANK)
- drm_handle_vblank(&dev_priv->drm, pipe);
+ intel_handle_vblank(dev_priv, pipe);
if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
hsw_pipe_crc_irq_handler(dev_priv, pipe);
diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h
index 812c47a9c2d6..25f25cd95818 100644
--- a/drivers/gpu/drm/i915/i915_irq.h
+++ b/drivers/gpu/drm/i915/i915_irq.h
@@ -101,10 +101,8 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
u8 pipe_mask);
-bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
- bool in_vblank_irq, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode);
+bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
+ ktime_t *vblank_time, bool in_vblank_irq);
u32 i915_get_vblank_counter(struct drm_crtc *crtc);
u32 g4x_get_vblank_counter(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 24b1f0ce8743..2c80a0194c80 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -26,6 +26,7 @@
#include <linux/vga_switcheroo.h>
#include <drm/drm_drv.h>
+#include <drm/i915_pciids.h>
#include "display/intel_fbdev.h"
@@ -437,7 +438,7 @@ static const struct intel_device_info snb_m_gt2_info = {
.has_rc6 = 1, \
.has_rc6p = 1, \
.has_rps = true, \
- .ppgtt_type = INTEL_PPGTT_FULL, \
+ .ppgtt_type = INTEL_PPGTT_ALIASING, \
.ppgtt_size = 31, \
IVB_PIPE_OFFSETS, \
IVB_CURSOR_OFFSETS, \
@@ -494,7 +495,7 @@ static const struct intel_device_info vlv_info = {
.has_rps = true,
.display.has_gmch = 1,
.display.has_hotplug = 1,
- .ppgtt_type = INTEL_PPGTT_FULL,
+ .ppgtt_type = INTEL_PPGTT_ALIASING,
.ppgtt_size = 31,
.has_snoop = true,
.has_coherent_ggtt = false,
@@ -822,7 +823,6 @@ static const struct intel_device_info tgl_info = {
GEN12_FEATURES,
PLATFORM(INTEL_TIGERLAKE),
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
- .require_force_probe = 1,
.display.has_modular_fia = 1,
.engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index b5249ee5bda6..551be589d6f4 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -555,8 +555,9 @@ static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
aging_tail = hw_tail;
stream->oa_buffer.aging_timestamp = now;
} else {
- DRM_ERROR("Ignoring spurious out of range OA buffer tail pointer = %x\n",
- hw_tail);
+ drm_err(&stream->perf->i915->drm,
+ "Ignoring spurious out of range OA buffer tail pointer = %x\n",
+ hw_tail);
}
}
@@ -745,7 +746,8 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
*/
if (drm_WARN_ON(&uncore->i915->drm,
(OA_BUFFER_SIZE - head) < report_size)) {
- DRM_ERROR("Spurious OA head ptr: non-integral report offset\n");
+ drm_err(&uncore->i915->drm,
+ "Spurious OA head ptr: non-integral report offset\n");
break;
}
@@ -1041,7 +1043,8 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
*/
if (drm_WARN_ON(&uncore->i915->drm,
(OA_BUFFER_SIZE - head) < report_size)) {
- DRM_ERROR("Spurious OA head ptr: non-integral report offset\n");
+ drm_err(&uncore->i915->drm,
+ "Spurious OA head ptr: non-integral report offset\n");
break;
}
@@ -1339,9 +1342,10 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
ce->tag = stream->specific_ctx_id;
- DRM_DEBUG_DRIVER("filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
- stream->specific_ctx_id,
- stream->specific_ctx_id_mask);
+ drm_dbg(&stream->perf->i915->drm,
+ "filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
+ stream->specific_ctx_id,
+ stream->specific_ctx_id_mask);
return 0;
}
@@ -1401,8 +1405,10 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
/*
* Unset exclusive_stream first, it will be checked while disabling
* the metric set on gen8+.
+ *
+ * See i915_oa_init_reg_state() and lrc_configure_all_contexts()
*/
- perf->exclusive_stream = NULL;
+ WRITE_ONCE(perf->exclusive_stream, NULL);
perf->ops.disable_metric_set(stream);
free_oa_buffer(stream);
@@ -1966,9 +1972,10 @@ out:
return i915_vma_get(oa_bo->vma);
}
-static int emit_oa_config(struct i915_perf_stream *stream,
- struct i915_oa_config *oa_config,
- struct intel_context *ce)
+static struct i915_request *
+emit_oa_config(struct i915_perf_stream *stream,
+ struct i915_oa_config *oa_config,
+ struct intel_context *ce)
{
struct i915_request *rq;
struct i915_vma *vma;
@@ -1976,7 +1983,7 @@ static int emit_oa_config(struct i915_perf_stream *stream,
vma = get_oa_vma(stream, oa_config);
if (IS_ERR(vma))
- return PTR_ERR(vma);
+ return ERR_CAST(vma);
err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (err)
@@ -2001,13 +2008,17 @@ static int emit_oa_config(struct i915_perf_stream *stream,
err = rq->engine->emit_bb_start(rq,
vma->node.start, 0,
I915_DISPATCH_SECURE);
+ if (err)
+ goto err_add_request;
+
+ i915_request_get(rq);
err_add_request:
i915_request_add(rq);
err_vma_unpin:
i915_vma_unpin(vma);
err_vma_put:
i915_vma_put(vma);
- return err;
+ return err ? ERR_PTR(err) : rq;
}
static struct intel_context *oa_context(struct i915_perf_stream *stream)
@@ -2015,7 +2026,8 @@ static struct intel_context *oa_context(struct i915_perf_stream *stream)
return stream->pinned_ctx ?: stream->engine->kernel_context;
}
-static int hsw_enable_metric_set(struct i915_perf_stream *stream)
+static struct i915_request *
+hsw_enable_metric_set(struct i915_perf_stream *stream)
{
struct intel_uncore *uncore = stream->uncore;
@@ -2190,7 +2202,9 @@ static int gen8_modify_self(struct intel_context *ce,
struct i915_request *rq;
int err;
+ intel_engine_pm_get(ce->engine);
rq = i915_request_create(ce);
+ intel_engine_pm_put(ce->engine);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -2418,7 +2432,8 @@ static int lrc_configure_all_contexts(struct i915_perf_stream *stream,
return oa_configure_all_contexts(stream, regs, ARRAY_SIZE(regs));
}
-static int gen8_enable_metric_set(struct i915_perf_stream *stream)
+static struct i915_request *
+gen8_enable_metric_set(struct i915_perf_stream *stream)
{
struct intel_uncore *uncore = stream->uncore;
struct i915_oa_config *oa_config = stream->oa_config;
@@ -2460,7 +2475,7 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream)
*/
ret = lrc_configure_all_contexts(stream, oa_config);
if (ret)
- return ret;
+ return ERR_PTR(ret);
return emit_oa_config(stream, oa_config, oa_context(stream));
}
@@ -2472,7 +2487,8 @@ static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream)
0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
}
-static int gen12_enable_metric_set(struct i915_perf_stream *stream)
+static struct i915_request *
+gen12_enable_metric_set(struct i915_perf_stream *stream)
{
struct intel_uncore *uncore = stream->uncore;
struct i915_oa_config *oa_config = stream->oa_config;
@@ -2503,7 +2519,7 @@ static int gen12_enable_metric_set(struct i915_perf_stream *stream)
*/
ret = gen12_configure_all_contexts(stream, oa_config);
if (ret)
- return ret;
+ return ERR_PTR(ret);
/*
* For Gen12, performance counters are context
@@ -2513,7 +2529,7 @@ static int gen12_enable_metric_set(struct i915_perf_stream *stream)
if (stream->ctx) {
ret = gen12_configure_oar_context(stream, true);
if (ret)
- return ret;
+ return ERR_PTR(ret);
}
return emit_oa_config(stream, oa_config, oa_context(stream));
@@ -2657,7 +2673,8 @@ static void gen7_oa_disable(struct i915_perf_stream *stream)
if (intel_wait_for_register(uncore,
GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0,
50))
- DRM_ERROR("wait for OA to be disabled timed out\n");
+ drm_err(&stream->perf->i915->drm,
+ "wait for OA to be disabled timed out\n");
}
static void gen8_oa_disable(struct i915_perf_stream *stream)
@@ -2668,7 +2685,8 @@ static void gen8_oa_disable(struct i915_perf_stream *stream)
if (intel_wait_for_register(uncore,
GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0,
50))
- DRM_ERROR("wait for OA to be disabled timed out\n");
+ drm_err(&stream->perf->i915->drm,
+ "wait for OA to be disabled timed out\n");
}
static void gen12_oa_disable(struct i915_perf_stream *stream)
@@ -2680,7 +2698,16 @@ static void gen12_oa_disable(struct i915_perf_stream *stream)
GEN12_OAG_OACONTROL,
GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE, 0,
50))
- DRM_ERROR("wait for OA to be disabled timed out\n");
+ drm_err(&stream->perf->i915->drm,
+ "wait for OA to be disabled timed out\n");
+
+ intel_uncore_write(uncore, GEN12_OA_TLB_INV_CR, 1);
+ if (intel_wait_for_register(uncore,
+ GEN12_OA_TLB_INV_CR,
+ 1, 0,
+ 50))
+ drm_err(&stream->perf->i915->drm,
+ "wait for OA tlb invalidate timed out\n");
}
/**
@@ -2708,6 +2735,20 @@ static const struct i915_perf_stream_ops i915_oa_stream_ops = {
.read = i915_oa_read,
};
+static int i915_perf_stream_enable_sync(struct i915_perf_stream *stream)
+{
+ struct i915_request *rq;
+
+ rq = stream->perf->ops.enable_metric_set(stream);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
+ i915_request_put(rq);
+
+ return 0;
+}
+
/**
* i915_oa_stream_init - validate combined props for OA stream and init
* @stream: An i915 perf stream
@@ -2840,9 +2881,9 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
goto err_oa_buf_alloc;
stream->ops = &i915_oa_stream_ops;
- perf->exclusive_stream = stream;
+ WRITE_ONCE(perf->exclusive_stream, stream);
- ret = perf->ops.enable_metric_set(stream);
+ ret = i915_perf_stream_enable_sync(stream);
if (ret) {
DRM_DEBUG("Unable to enable metric set\n");
goto err_enable;
@@ -2860,7 +2901,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
return 0;
err_enable:
- perf->exclusive_stream = NULL;
+ WRITE_ONCE(perf->exclusive_stream, NULL);
perf->ops.disable_metric_set(stream);
free_oa_buffer(stream);
@@ -2886,12 +2927,11 @@ void i915_oa_init_reg_state(const struct intel_context *ce,
{
struct i915_perf_stream *stream;
- /* perf.exclusive_stream serialised by lrc_configure_all_contexts() */
-
if (engine->class != RENDER_CLASS)
return;
- stream = engine->i915->perf.exclusive_stream;
+ /* perf.exclusive_stream serialised by lrc_configure_all_contexts() */
+ stream = READ_ONCE(engine->i915->perf.exclusive_stream);
/*
* For gen12, only CTX_R_PWR_CLK_STATE needs update, but the caller
* is already doing that, so nothing to be done for gen12 here.
@@ -3160,7 +3200,7 @@ static long i915_perf_config_locked(struct i915_perf_stream *stream,
return -EINVAL;
if (config != stream->oa_config) {
- int err;
+ struct i915_request *rq;
/*
* If OA is bound to a specific context, emit the
@@ -3171,11 +3211,13 @@ static long i915_perf_config_locked(struct i915_perf_stream *stream,
* When set globally, we use a low priority kernel context,
* so it will effectively take effect when idle.
*/
- err = emit_oa_config(stream, config, oa_context(stream));
- if (err == 0)
+ rq = emit_oa_config(stream, config, oa_context(stream));
+ if (!IS_ERR(rq)) {
config = xchg(&stream->oa_config, config);
- else
- ret = err;
+ i915_request_put(rq);
+ } else {
+ ret = PTR_ERR(rq);
+ }
}
i915_oa_config_put(config);
diff --git a/drivers/gpu/drm/i915/i915_perf_types.h b/drivers/gpu/drm/i915/i915_perf_types.h
index 45e581455f5d..a0e22f00f6cf 100644
--- a/drivers/gpu/drm/i915/i915_perf_types.h
+++ b/drivers/gpu/drm/i915/i915_perf_types.h
@@ -339,7 +339,8 @@ struct i915_oa_ops {
* counter reports being sampled. May apply system constraints such as
* disabling EU clock gating as required.
*/
- int (*enable_metric_set)(struct i915_perf_stream *stream);
+ struct i915_request *
+ (*enable_metric_set)(struct i915_perf_stream *stream);
/**
* @disable_metric_set: Remove system constraints associated with using
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index a3b61fb96226..2c062534eac1 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -822,11 +822,6 @@ static ssize_t i915_pmu_event_show(struct device *dev,
return sprintf(buf, "config=0x%lx\n", eattr->val);
}
-static struct attribute_group i915_pmu_events_attr_group = {
- .name = "events",
- /* Patch in attrs at runtime. */
-};
-
static ssize_t
i915_pmu_get_attr_cpumask(struct device *dev,
struct device_attribute *attr,
@@ -846,13 +841,6 @@ static const struct attribute_group i915_pmu_cpumask_attr_group = {
.attrs = i915_cpumask_attrs,
};
-static const struct attribute_group *i915_pmu_attr_groups[] = {
- &i915_pmu_format_attr_group,
- &i915_pmu_events_attr_group,
- &i915_pmu_cpumask_attr_group,
- NULL
-};
-
#define __event(__config, __name, __unit) \
{ \
.config = (__config), \
@@ -1026,23 +1014,23 @@ err_alloc:
static void free_event_attributes(struct i915_pmu *pmu)
{
- struct attribute **attr_iter = i915_pmu_events_attr_group.attrs;
+ struct attribute **attr_iter = pmu->events_attr_group.attrs;
for (; *attr_iter; attr_iter++)
kfree((*attr_iter)->name);
- kfree(i915_pmu_events_attr_group.attrs);
+ kfree(pmu->events_attr_group.attrs);
kfree(pmu->i915_attr);
kfree(pmu->pmu_attr);
- i915_pmu_events_attr_group.attrs = NULL;
+ pmu->events_attr_group.attrs = NULL;
pmu->i915_attr = NULL;
pmu->pmu_attr = NULL;
}
static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
{
- struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node);
+ struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node);
GEM_BUG_ON(!pmu->base.event_init);
@@ -1055,7 +1043,7 @@ static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
{
- struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node);
+ struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node);
unsigned int target;
GEM_BUG_ON(!pmu->base.event_init);
@@ -1072,8 +1060,6 @@ static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
return 0;
}
-static enum cpuhp_state cpuhp_slot = CPUHP_INVALID;
-
static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu)
{
enum cpuhp_state slot;
@@ -1087,21 +1073,22 @@ static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu)
return ret;
slot = ret;
- ret = cpuhp_state_add_instance(slot, &pmu->node);
+ ret = cpuhp_state_add_instance(slot, &pmu->cpuhp.node);
if (ret) {
cpuhp_remove_multi_state(slot);
return ret;
}
- cpuhp_slot = slot;
+ pmu->cpuhp.slot = slot;
return 0;
}
static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu)
{
- WARN_ON(cpuhp_slot == CPUHP_INVALID);
- WARN_ON(cpuhp_state_remove_instance(cpuhp_slot, &pmu->node));
- cpuhp_remove_multi_state(cpuhp_slot);
+ WARN_ON(pmu->cpuhp.slot == CPUHP_INVALID);
+ WARN_ON(cpuhp_state_remove_instance(pmu->cpuhp.slot, &pmu->cpuhp.node));
+ cpuhp_remove_multi_state(pmu->cpuhp.slot);
+ pmu->cpuhp.slot = CPUHP_INVALID;
}
static bool is_igp(struct drm_i915_private *i915)
@@ -1118,6 +1105,13 @@ static bool is_igp(struct drm_i915_private *i915)
void i915_pmu_register(struct drm_i915_private *i915)
{
struct i915_pmu *pmu = &i915->pmu;
+ const struct attribute_group *attr_groups[] = {
+ &i915_pmu_format_attr_group,
+ &pmu->events_attr_group,
+ &i915_pmu_cpumask_attr_group,
+ NULL
+ };
+
int ret = -ENOMEM;
if (INTEL_GEN(i915) <= 2) {
@@ -1128,6 +1122,7 @@ void i915_pmu_register(struct drm_i915_private *i915)
spin_lock_init(&pmu->lock);
hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
pmu->timer.function = i915_sample;
+ pmu->cpuhp.slot = CPUHP_INVALID;
if (!is_igp(i915)) {
pmu->name = kasprintf(GFP_KERNEL,
@@ -1143,11 +1138,16 @@ void i915_pmu_register(struct drm_i915_private *i915)
if (!pmu->name)
goto err;
- i915_pmu_events_attr_group.attrs = create_event_attributes(pmu);
- if (!i915_pmu_events_attr_group.attrs)
+ pmu->events_attr_group.name = "events";
+ pmu->events_attr_group.attrs = create_event_attributes(pmu);
+ if (!pmu->events_attr_group.attrs)
goto err_name;
- pmu->base.attr_groups = i915_pmu_attr_groups;
+ pmu->base.attr_groups = kmemdup(attr_groups, sizeof(attr_groups),
+ GFP_KERNEL);
+ if (!pmu->base.attr_groups)
+ goto err_attr;
+
pmu->base.task_ctx_nr = perf_invalid_context;
pmu->base.event_init = i915_pmu_event_init;
pmu->base.add = i915_pmu_event_add;
@@ -1159,7 +1159,7 @@ void i915_pmu_register(struct drm_i915_private *i915)
ret = perf_pmu_register(&pmu->base, pmu->name, -1);
if (ret)
- goto err_attr;
+ goto err_groups;
ret = i915_pmu_register_cpuhp_state(pmu);
if (ret)
@@ -1169,6 +1169,8 @@ void i915_pmu_register(struct drm_i915_private *i915)
err_unreg:
perf_pmu_unregister(&pmu->base);
+err_groups:
+ kfree(pmu->base.attr_groups);
err_attr:
pmu->base.event_init = NULL;
free_event_attributes(pmu);
@@ -1194,6 +1196,7 @@ void i915_pmu_unregister(struct drm_i915_private *i915)
perf_pmu_unregister(&pmu->base);
pmu->base.event_init = NULL;
+ kfree(pmu->base.attr_groups);
if (!is_igp(i915))
kfree(pmu->name);
free_event_attributes(pmu);
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index 6c1647c5daf2..941f0c14037c 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -10,7 +10,7 @@
#include <linux/hrtimer.h>
#include <linux/perf_event.h>
#include <linux/spinlock_types.h>
-#include <drm/i915_drm.h>
+#include <uapi/drm/i915_drm.h>
struct drm_i915_private;
@@ -39,9 +39,12 @@ struct i915_pmu_sample {
struct i915_pmu {
/**
- * @node: List node for CPU hotplug handling.
+ * @cpuhp: Struct used for CPU hotplug handling.
*/
- struct hlist_node node;
+ struct {
+ struct hlist_node node;
+ enum cpuhp_state slot;
+ } cpuhp;
/**
* @base: PMU base.
*/
@@ -105,6 +108,10 @@ struct i915_pmu {
*/
ktime_t sleep_last;
/**
+ * @events_attr_group: Device events attribute group.
+ */
+ struct attribute_group events_attr_group;
+ /**
* @i915_attr: Memory block holding device attributes.
*/
void *i915_attr;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index b09c1d6dc0aa..59e64acc2c56 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -693,6 +693,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OABUFFER_SIZE_8M (6 << 3)
#define OABUFFER_SIZE_16M (7 << 3)
+#define GEN12_OA_TLB_INV_CR _MMIO(0xceec)
+
/* Gen12 OAR unit */
#define GEN12_OAR_OACONTROL _MMIO(0x2960)
#define GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT 1
@@ -2865,6 +2867,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MI_ARB_STATE _MMIO(0x20e4) /* 915+ only */
#define MBUS_ABOX_CTL _MMIO(0x45038)
+#define MBUS_ABOX1_CTL _MMIO(0x45048)
+#define MBUS_ABOX2_CTL _MMIO(0x4504C)
#define MBUS_ABOX_BW_CREDIT_MASK (3 << 20)
#define MBUS_ABOX_BW_CREDIT(x) ((x) << 20)
#define MBUS_ABOX_B_CREDIT_MASK (0xF << 16)
@@ -3283,6 +3287,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
/* Framebuffer compression for Ivybridge */
#define IVB_FBC_RT_BASE _MMIO(0x7020)
+#define IVB_FBC_RT_BASE_UPPER _MMIO(0x7024)
#define IPS_CTL _MMIO(0x43408)
#define IPS_ENABLE (1 << 31)
@@ -4858,16 +4863,6 @@ enum {
#define _PP_STATUS 0x61200
#define PP_STATUS(pps_idx) _MMIO_PPS(pps_idx, _PP_STATUS)
#define PP_ON REG_BIT(31)
-
-#define _PP_CONTROL_1 0xc7204
-#define _PP_CONTROL_2 0xc7304
-#define ICP_PP_CONTROL(x) _MMIO(((x) == 1) ? _PP_CONTROL_1 : \
- _PP_CONTROL_2)
-#define POWER_CYCLE_DELAY_MASK REG_GENMASK(8, 4)
-#define VDD_OVERRIDE_FORCE REG_BIT(3)
-#define BACKLIGHT_ENABLE REG_BIT(2)
-#define PWR_DOWN_ON_RESET REG_BIT(1)
-#define PWR_STATE_TARGET REG_BIT(0)
/*
* Indicates that all dependencies of the panel are on:
*
@@ -4928,6 +4923,7 @@ enum {
#define PFIT_ENABLE (1 << 31)
#define PFIT_PIPE_MASK (3 << 29)
#define PFIT_PIPE_SHIFT 29
+#define PFIT_PIPE(pipe) ((pipe) << 29)
#define VERT_INTERP_DISABLE (0 << 10)
#define VERT_INTERP_BILINEAR (1 << 10)
#define VERT_INTERP_MASK (3 << 10)
@@ -5877,7 +5873,6 @@ enum {
#define _PIPEAGCMAX 0x70010
#define _PIPEBGCMAX 0x71010
-#define PIPEGCMAX_RGB_MASK REG_GENMASK(15, 0)
#define PIPEGCMAX(pipe, i) _MMIO_PIPE2(pipe, _PIPEAGCMAX + (i) * 4)
#define _PIPE_MISC_A 0x70030
@@ -5886,6 +5881,7 @@ enum {
#define PIPEMISC_YUV420_MODE_FULL_BLEND (1 << 26) /* glk+ */
#define PIPEMISC_HDR_MODE_PRECISION (1 << 23) /* icl+ */
#define PIPEMISC_OUTPUT_COLORSPACE_YUV (1 << 11)
+#define PIPEMISC_PIXEL_ROUNDING_TRUNC REG_BIT(8) /* tgl+ */
#define PIPEMISC_DITHER_BPC_MASK (7 << 5)
#define PIPEMISC_DITHER_8_BPC (0 << 5)
#define PIPEMISC_DITHER_10_BPC (1 << 5)
@@ -7764,6 +7760,7 @@ enum {
#define BW_BUDDY1_CTL _MMIO(0x45140)
#define BW_BUDDY2_CTL _MMIO(0x45150)
#define BW_BUDDY_DISABLE REG_BIT(31)
+#define BW_BUDDY_TLB_REQ_TIMER_MASK REG_GENMASK(21, 16)
#define BW_BUDDY1_PAGE_MASK _MMIO(0x45144)
#define BW_BUDDY2_PAGE_MASK _MMIO(0x45154)
@@ -9145,14 +9142,19 @@ enum {
#define THROTTLE_12_5 (7 << 2)
#define DISABLE_EARLY_EOT (1 << 1)
-#define GEN7_ROW_CHICKEN2 _MMIO(0xe4f4)
-#define GEN12_DISABLE_EARLY_READ BIT(14)
+#define GEN7_ROW_CHICKEN2 _MMIO(0xe4f4)
+#define GEN12_DISABLE_EARLY_READ REG_BIT(14)
+#define GEN12_PUSH_CONST_DEREF_HOLD_DIS REG_BIT(8)
#define GEN7_ROW_CHICKEN2_GT2 _MMIO(0xf4f4)
#define DOP_CLOCK_GATING_DISABLE (1 << 0)
#define PUSH_CONSTANT_DEREF_DISABLE (1 << 8)
#define GEN11_TDL_CLOCK_GATING_FIX_DISABLE (1 << 1)
+#define GEN9_ROW_CHICKEN4 _MMIO(0xe48c)
+#define GEN12_DISABLE_TDL_PUSH REG_BIT(9)
+#define GEN11_DIS_PICK_2ND_EU REG_BIT(7)
+
#define HSW_ROW_CHICKEN3 _MMIO(0xe49c)
#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
@@ -9253,6 +9255,10 @@ enum {
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 (7 << 16)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 (8 << 16)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 (9 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_296703 (10 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_297000 (11 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_593407 (12 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_594000 (13 << 16)
#define AUD_CONFIG_DISABLE_NCTS (1 << 3)
/* HSW Audio */
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 6daf18dbb3d4..c0df71d7d0ff 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -51,7 +51,6 @@ struct execute_cb {
static struct i915_global_request {
struct i915_global base;
struct kmem_cache *slab_requests;
- struct kmem_cache *slab_dependencies;
struct kmem_cache *slab_execute_cbs;
} global;
@@ -291,7 +290,7 @@ bool i915_request_retire(struct i915_request *rq)
spin_unlock_irq(&rq->lock);
remove_from_client(rq);
- list_del(&rq->link);
+ __list_del_entry(&rq->link); /* poison neither prev/next (RCU walks) */
intel_context_exit(rq->context);
intel_context_unpin(rq->context);
@@ -364,6 +363,50 @@ __await_execution(struct i915_request *rq,
return 0;
}
+static bool fatal_error(int error)
+{
+ switch (error) {
+ case 0: /* not an error! */
+ case -EAGAIN: /* innocent victim of a GT reset (__i915_request_reset) */
+ case -ETIMEDOUT: /* waiting for Godot (timer_i915_sw_fence_wake) */
+ return false;
+ default:
+ return true;
+ }
+}
+
+void __i915_request_skip(struct i915_request *rq)
+{
+ GEM_BUG_ON(!fatal_error(rq->fence.error));
+
+ if (rq->infix == rq->postfix)
+ return;
+
+ /*
+ * As this request likely depends on state from the lost
+ * context, clear out all the user operations leaving the
+ * breadcrumb at the end (so we get the fence notifications).
+ */
+ __i915_request_fill(rq, 0);
+ rq->infix = rq->postfix;
+}
+
+void i915_request_set_error_once(struct i915_request *rq, int error)
+{
+ int old;
+
+ GEM_BUG_ON(!IS_ERR_VALUE((long)error));
+
+ if (i915_request_signaled(rq))
+ return;
+
+ old = READ_ONCE(rq->fence.error);
+ do {
+ if (fatal_error(old))
+ return;
+ } while (!try_cmpxchg(&rq->fence.error, &old, error));
+}
+
bool __i915_request_submit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
@@ -393,8 +436,10 @@ bool __i915_request_submit(struct i915_request *request)
if (i915_request_completed(request))
goto xfer;
- if (intel_context_is_banned(request->context))
- i915_request_skip(request, -EIO);
+ if (unlikely(intel_context_is_banned(request->context)))
+ i915_request_set_error_once(request, -EIO);
+ if (unlikely(fatal_error(request->fence.error)))
+ __i915_request_skip(request);
/*
* Are we using semaphores when the gpu is already saturated?
@@ -520,7 +565,7 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
trace_i915_request_submit(request);
if (unlikely(fence->error))
- i915_request_skip(request, fence->error);
+ i915_request_set_error_once(request, fence->error);
/*
* We need to serialize use of the submit_request() callback
@@ -543,19 +588,31 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
return NOTIFY_DONE;
}
+static void irq_semaphore_cb(struct irq_work *wrk)
+{
+ struct i915_request *rq =
+ container_of(wrk, typeof(*rq), semaphore_work);
+
+ i915_schedule_bump_priority(rq, I915_PRIORITY_NOSEMAPHORE);
+ i915_request_put(rq);
+}
+
static int __i915_sw_fence_call
semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
- struct i915_request *request =
- container_of(fence, typeof(*request), semaphore);
+ struct i915_request *rq = container_of(fence, typeof(*rq), semaphore);
switch (state) {
case FENCE_COMPLETE:
- i915_schedule_bump_priority(request, I915_PRIORITY_NOSEMAPHORE);
+ if (!(READ_ONCE(rq->sched.attr.priority) & I915_PRIORITY_NOSEMAPHORE)) {
+ i915_request_get(rq);
+ init_irq_work(&rq->semaphore_work, irq_semaphore_cb);
+ irq_work_queue(&rq->semaphore_work);
+ }
break;
case FENCE_FREE:
- i915_request_put(request);
+ i915_request_put(rq);
break;
}
@@ -692,6 +749,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
RCU_INIT_POINTER(rq->timeline, tl);
RCU_INIT_POINTER(rq->hwsp_cacheline, tl->hwsp_cacheline);
rq->hwsp_seqno = tl->hwsp_seqno;
+ GEM_BUG_ON(i915_request_completed(rq));
rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
@@ -737,6 +795,8 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
rq->infix = rq->ring->emit; /* end of header; start of user payload */
intel_context_mark_active(ce);
+ list_add_tail_rcu(&rq->link, &tl->requests);
+
return rq;
err_unwind:
@@ -790,16 +850,26 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
struct dma_fence *fence;
int err;
- GEM_BUG_ON(i915_request_timeline(rq) ==
- rcu_access_pointer(signal->timeline));
+ if (i915_request_timeline(rq) == rcu_access_pointer(signal->timeline))
+ return 0;
+
+ if (i915_request_started(signal))
+ return 0;
fence = NULL;
rcu_read_lock();
spin_lock_irq(&signal->lock);
- if (!i915_request_started(signal) &&
- !list_is_first(&signal->link,
- &rcu_dereference(signal->timeline)->requests)) {
- struct i915_request *prev = list_prev_entry(signal, link);
+ do {
+ struct list_head *pos = READ_ONCE(signal->link.prev);
+ struct i915_request *prev;
+
+ /* Confirm signal has not been retired, the link is valid */
+ if (unlikely(i915_request_started(signal)))
+ break;
+
+ /* Is signal the earliest request on its timeline? */
+ if (pos == &rcu_dereference(signal->timeline)->requests)
+ break;
/*
* Peek at the request before us in the timeline. That
@@ -807,20 +877,25 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
* after acquiring a reference to it, confirm that it is
* still part of the signaler's timeline.
*/
- if (i915_request_get_rcu(prev)) {
- if (list_next_entry(prev, link) == signal)
- fence = &prev->fence;
- else
- i915_request_put(prev);
+ prev = list_entry(pos, typeof(*prev), link);
+ if (!i915_request_get_rcu(prev))
+ break;
+
+ /* After the strong barrier, confirm prev is still attached */
+ if (unlikely(READ_ONCE(prev->link.next) != &signal->link)) {
+ i915_request_put(prev);
+ break;
}
- }
+
+ fence = &prev->fence;
+ } while (0);
spin_unlock_irq(&signal->lock);
rcu_read_unlock();
if (!fence)
return 0;
err = 0;
- if (intel_timeline_sync_is_later(i915_request_timeline(rq), fence))
+ if (!intel_timeline_sync_is_later(i915_request_timeline(rq), fence))
err = i915_sw_fence_await_dma_fence(&rq->submit,
fence, 0,
I915_FENCE_GFP);
@@ -844,7 +919,7 @@ already_busywaiting(struct i915_request *rq)
*
* See the are-we-too-late? check in __i915_request_submit().
*/
- return rq->sched.semaphores | rq->engine->saturated;
+ return rq->sched.semaphores | READ_ONCE(rq->engine->saturated);
}
static int
@@ -902,6 +977,8 @@ emit_semaphore_wait(struct i915_request *to,
struct i915_request *from,
gfp_t gfp)
{
+ const intel_engine_mask_t mask = READ_ONCE(from->engine)->mask;
+
if (!intel_context_use_semaphores(to->context))
goto await_fence;
@@ -909,7 +986,7 @@ emit_semaphore_wait(struct i915_request *to,
goto await_fence;
/* Just emit the first semaphore we see as request space is limited. */
- if (already_busywaiting(to) & from->engine->mask)
+ if (already_busywaiting(to) & mask)
goto await_fence;
if (i915_request_await_start(to, from) < 0)
@@ -922,7 +999,7 @@ emit_semaphore_wait(struct i915_request *to,
if (__emit_semaphore_wait(to, from, from->fence.seqno))
goto await_fence;
- to->sched.semaphores |= from->engine->mask;
+ to->sched.semaphores |= mask;
to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
return 0;
@@ -1065,14 +1142,45 @@ __i915_request_await_execution(struct i915_request *to,
&from->fence))
return 0;
- /* Ensure both start together [after all semaphores in signal] */
- if (intel_engine_has_semaphores(to->engine))
- err = __emit_semaphore_wait(to, from, from->fence.seqno - 1);
- else
- err = i915_request_await_start(to, from);
+ /*
+ * Wait until the start of this request.
+ *
+ * The execution cb fires when we submit the request to HW. But in
+ * many cases this may be long before the request itself is ready to
+ * run (consider that we submit 2 requests for the same context, where
+ * the request of interest is behind an indefinite spinner). So we hook
+ * up to both to reduce our queues and keep the execution lag minimised
+ * in the worst case, though we hope that the await_start is elided.
+ */
+ err = i915_request_await_start(to, from);
if (err < 0)
return err;
+ /*
+ * Ensure both start together [after all semaphores in signal]
+ *
+ * Now that we are queued to the HW at roughly the same time (thanks
+ * to the execute cb) and are ready to run at roughly the same time
+ * (thanks to the await start), our signaler may still be indefinitely
+ * delayed by waiting on a semaphore from a remote engine. If our
+ * signaler depends on a semaphore, so indirectly do we, and we do not
+ * want to start our payload until our signaler also starts theirs.
+ * So we wait.
+ *
+ * However, there is also a second condition for which we need to wait
+ * for the precise start of the signaler. Consider that the signaler
+ * was submitted in a chain of requests following another context
+ * (with just an ordinary intra-engine fence dependency between the
+ * two). In this case the signaler is queued to HW, but not for
+ * immediate execution, and so we must wait until it reaches the
+ * active slot.
+ */
+ if (intel_engine_has_semaphores(to->engine)) {
+ err = __emit_semaphore_wait(to, from, from->fence.seqno - 1);
+ if (err < 0)
+ return err;
+ }
+
/* Couple the dependency tree for PI on this exposed to->fence */
if (to->engine->schedule) {
err = i915_sched_node_add_dependency(&to->sched, &from->sched);
@@ -1193,23 +1301,6 @@ i915_request_await_object(struct i915_request *to,
return ret;
}
-void i915_request_skip(struct i915_request *rq, int error)
-{
- GEM_BUG_ON(!IS_ERR_VALUE((long)error));
- dma_fence_set_error(&rq->fence, error);
-
- if (rq->infix == rq->postfix)
- return;
-
- /*
- * As this request likely depends on state from the lost
- * context, clear out all the user operations leaving the
- * breadcrumb at the end (so we get the fence notifications).
- */
- __i915_request_fill(rq, 0);
- rq->infix = rq->postfix;
-}
-
static struct i915_request *
__i915_request_add_to_timeline(struct i915_request *rq)
{
@@ -1239,7 +1330,17 @@ __i915_request_add_to_timeline(struct i915_request *rq)
prev = to_request(__i915_active_fence_set(&timeline->last_request,
&rq->fence));
if (prev && !i915_request_completed(prev)) {
- if (is_power_of_2(prev->engine->mask | rq->engine->mask))
+ /*
+ * The requests are supposed to be kept in order. However,
+ * we need to be wary in case the timeline->last_request
+ * is used as a barrier for external modification to this
+ * context.
+ */
+ GEM_BUG_ON(prev->context == rq->context &&
+ i915_seqno_passed(prev->fence.seqno,
+ rq->fence.seqno));
+
+ if (is_power_of_2(READ_ONCE(prev->engine)->mask | rq->engine->mask))
i915_sw_fence_await_sw_fence(&rq->submit,
&prev->submit,
&rq->submitq);
@@ -1254,8 +1355,6 @@ __i915_request_add_to_timeline(struct i915_request *rq)
0);
}
- list_add_tail(&rq->link, &timeline->requests);
-
/*
* Make sure that no request gazumped us - if it was allocated after
* our i915_request_alloc() and called __i915_request_add() before
@@ -1315,9 +1414,9 @@ void __i915_request_queue(struct i915_request *rq,
* decide whether to preempt the entire chain so that it is ready to
* run at the earliest possible convenience.
*/
- i915_sw_fence_commit(&rq->semaphore);
if (attr && rq->engine->schedule)
rq->engine->schedule(rq, attr);
+ i915_sw_fence_commit(&rq->semaphore);
i915_sw_fence_commit(&rq->submit);
}
@@ -1325,39 +1424,23 @@ void i915_request_add(struct i915_request *rq)
{
struct intel_timeline * const tl = i915_request_timeline(rq);
struct i915_sched_attr attr = {};
- struct i915_request *prev;
+ struct i915_gem_context *ctx;
lockdep_assert_held(&tl->mutex);
lockdep_unpin_lock(&tl->mutex, rq->cookie);
trace_i915_request_add(rq);
+ __i915_request_commit(rq);
- prev = __i915_request_commit(rq);
-
- if (rcu_access_pointer(rq->context->gem_context))
- attr = i915_request_gem_context(rq)->sched;
+ /* XXX placeholder for selftests */
+ rcu_read_lock();
+ ctx = rcu_dereference(rq->context->gem_context);
+ if (ctx)
+ attr = ctx->sched;
+ rcu_read_unlock();
- /*
- * Boost actual workloads past semaphores!
- *
- * With semaphores we spin on one engine waiting for another,
- * simply to reduce the latency of starting our work when
- * the signaler completes. However, if there is any other
- * work that we could be doing on this engine instead, that
- * is better utilisation and will reduce the overall duration
- * of the current work. To avoid PI boosting a semaphore
- * far in the distance past over useful work, we keep a history
- * of any semaphore use along our dependency chain.
- */
if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
attr.priority |= I915_PRIORITY_NOSEMAPHORE;
-
- /*
- * Boost priorities to new clients (new request flows).
- *
- * Allow interactive/synchronous clients to jump ahead of
- * the bulk clients. (FQ_CODEL)
- */
if (list_empty(&rq->sched.signalers_list))
attr.priority |= I915_PRIORITY_WAIT;
@@ -1365,32 +1448,10 @@ void i915_request_add(struct i915_request *rq)
__i915_request_queue(rq, &attr);
local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
- /*
- * In typical scenarios, we do not expect the previous request on
- * the timeline to be still tracked by timeline->last_request if it
- * has been completed. If the completed request is still here, that
- * implies that request retirement is a long way behind submission,
- * suggesting that we haven't been retiring frequently enough from
- * the combination of retire-before-alloc, waiters and the background
- * retirement worker. So if the last request on this timeline was
- * already completed, do a catch up pass, flushing the retirement queue
- * up to this client. Since we have now moved the heaviest operations
- * during retirement onto secondary workers, such as freeing objects
- * or contexts, retiring a bunch of requests is mostly list management
- * (and cache misses), and so we should not be overly penalizing this
- * client by performing excess work, though we may still performing
- * work on behalf of others -- but instead we should benefit from
- * improved resource management. (Well, that's the theory at least.)
- */
- if (prev &&
- i915_request_completed(prev) &&
- rcu_access_pointer(prev->timeline) == tl)
- i915_request_retire_upto(prev);
-
mutex_unlock(&tl->mutex);
}
-static unsigned long local_clock_us(unsigned int *cpu)
+static unsigned long local_clock_ns(unsigned int *cpu)
{
unsigned long t;
@@ -1407,7 +1468,7 @@ static unsigned long local_clock_us(unsigned int *cpu)
* stop busywaiting, see busywait_stop().
*/
*cpu = get_cpu();
- t = local_clock() >> 10;
+ t = local_clock();
put_cpu();
return t;
@@ -1417,15 +1478,15 @@ static bool busywait_stop(unsigned long timeout, unsigned int cpu)
{
unsigned int this_cpu;
- if (time_after(local_clock_us(&this_cpu), timeout))
+ if (time_after(local_clock_ns(&this_cpu), timeout))
return true;
return this_cpu != cpu;
}
-static bool __i915_spin_request(const struct i915_request * const rq,
- int state, unsigned long timeout_us)
+static bool __i915_spin_request(const struct i915_request * const rq, int state)
{
+ unsigned long timeout_ns;
unsigned int cpu;
/*
@@ -1453,7 +1514,8 @@ static bool __i915_spin_request(const struct i915_request * const rq,
* takes to sleep on a request, on the order of a microsecond.
*/
- timeout_us += local_clock_us(&cpu);
+ timeout_ns = READ_ONCE(rq->engine->props.max_busywait_duration_ns);
+ timeout_ns += local_clock_ns(&cpu);
do {
if (i915_request_completed(rq))
return true;
@@ -1461,7 +1523,7 @@ static bool __i915_spin_request(const struct i915_request * const rq,
if (signal_pending_state(state, current))
break;
- if (busywait_stop(timeout_us, cpu))
+ if (busywait_stop(timeout_ns, cpu))
break;
cpu_relax();
@@ -1547,8 +1609,8 @@ long i915_request_wait(struct i915_request *rq,
* completion. That requires having a good predictor for the request
* duration, which we currently lack.
*/
- if (IS_ACTIVE(CONFIG_DRM_I915_SPIN_REQUEST) &&
- __i915_spin_request(rq, state, CONFIG_DRM_I915_SPIN_REQUEST)) {
+ if (IS_ACTIVE(CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT) &&
+ __i915_spin_request(rq, state)) {
dma_fence_signal(&rq->fence);
goto out;
}
@@ -1614,14 +1676,12 @@ out:
static void i915_global_request_shrink(void)
{
- kmem_cache_shrink(global.slab_dependencies);
kmem_cache_shrink(global.slab_execute_cbs);
kmem_cache_shrink(global.slab_requests);
}
static void i915_global_request_exit(void)
{
- kmem_cache_destroy(global.slab_dependencies);
kmem_cache_destroy(global.slab_execute_cbs);
kmem_cache_destroy(global.slab_requests);
}
@@ -1651,17 +1711,9 @@ int __init i915_global_request_init(void)
if (!global.slab_execute_cbs)
goto err_requests;
- global.slab_dependencies = KMEM_CACHE(i915_dependency,
- SLAB_HWCACHE_ALIGN |
- SLAB_RECLAIM_ACCOUNT);
- if (!global.slab_dependencies)
- goto err_execute_cbs;
-
i915_global_register(&global.base);
return 0;
-err_execute_cbs:
- kmem_cache_destroy(global.slab_execute_cbs);
err_requests:
kmem_cache_destroy(global.slab_requests);
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index da8420f03232..3c552bfea67a 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -26,6 +26,7 @@
#define I915_REQUEST_H
#include <linux/dma-fence.h>
+#include <linux/irq_work.h>
#include <linux/lockdep.h>
#include "gem/i915_gem_context_types.h"
@@ -208,6 +209,7 @@ struct i915_request {
};
struct list_head execute_cb;
struct i915_sw_fence semaphore;
+ struct irq_work semaphore_work;
/*
* A list of everyone we wait upon, and everyone who waits upon us.
@@ -303,6 +305,9 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp);
struct i915_request * __must_check
i915_request_create(struct intel_context *ce);
+void i915_request_set_error_once(struct i915_request *rq, int error);
+void __i915_request_skip(struct i915_request *rq);
+
struct i915_request *__i915_request_commit(struct i915_request *request);
void __i915_request_queue(struct i915_request *rq,
const struct i915_sched_attr *attr);
@@ -352,8 +357,6 @@ void i915_request_add(struct i915_request *rq);
bool __i915_request_submit(struct i915_request *request);
void i915_request_submit(struct i915_request *request);
-void i915_request_skip(struct i915_request *request, int error);
-
void __i915_request_unsubmit(struct i915_request *request);
void i915_request_unsubmit(struct i915_request *request);
@@ -395,7 +398,9 @@ static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
static inline u32 __hwsp_seqno(const struct i915_request *rq)
{
- return READ_ONCE(*rq->hwsp_seqno);
+ const u32 *hwsp = READ_ONCE(rq->hwsp_seqno);
+
+ return READ_ONCE(*hwsp);
}
/**
@@ -509,7 +514,8 @@ static inline bool i915_request_completed(const struct i915_request *rq)
static inline void i915_request_mark_complete(struct i915_request *rq)
{
- rq->hwsp_seqno = (u32 *)&rq->fence.seqno; /* decouple from HWSP */
+ WRITE_ONCE(rq->hwsp_seqno, /* decouple from HWSP */
+ (u32 *)&rq->fence.seqno);
}
static inline bool i915_request_has_waitboost(const struct i915_request *rq)
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index e19a37a83397..68b06a7ba667 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -209,6 +209,8 @@ static void kick_submission(struct intel_engine_cs *engine,
if (!inflight)
goto unlock;
+ engine->execlists.queue_priority_hint = prio;
+
/*
* If we are already the currently executing context, don't
* bother evaluating if we should preempt ourselves.
@@ -216,7 +218,6 @@ static void kick_submission(struct intel_engine_cs *engine,
if (inflight->context == rq->context)
goto unlock;
- engine->execlists.queue_priority_hint = prio;
if (need_preempt(prio, rq_prio(inflight)))
tasklet_hi_schedule(&engine->execlists.tasklet);
@@ -227,10 +228,10 @@ unlock:
static void __i915_schedule(struct i915_sched_node *node,
const struct i915_sched_attr *attr)
{
+ const int prio = max(attr->priority, node->attr.priority);
struct intel_engine_cs *engine;
struct i915_dependency *dep, *p;
struct i915_dependency stack;
- const int prio = attr->priority;
struct sched_cache cache;
LIST_HEAD(dfs);
@@ -238,9 +239,6 @@ static void __i915_schedule(struct i915_sched_node *node,
lockdep_assert_held(&schedule_lock);
GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
- if (prio <= READ_ONCE(node->attr.priority))
- return;
-
if (node_signaled(node))
return;
@@ -324,7 +322,7 @@ static void __i915_schedule(struct i915_sched_node *node,
GEM_BUG_ON(node_to_request(node)->engine != engine);
- node->attr.priority = prio;
+ WRITE_ONCE(node->attr.priority, prio);
/*
* Once the request is ready, it will be placed into the
@@ -363,6 +361,9 @@ static void __bump_priority(struct i915_sched_node *node, unsigned int bump)
{
struct i915_sched_attr attr = node->attr;
+ if (attr.priority & bump)
+ return;
+
attr.priority |= bump;
__i915_schedule(node, &attr);
}
@@ -486,7 +487,7 @@ void i915_sched_node_fini(struct i915_sched_node *node)
list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
GEM_BUG_ON(!list_empty(&dep->dfs_link));
- list_del(&dep->wait_link);
+ list_del_rcu(&dep->wait_link);
if (dep->flags & I915_DEPENDENCY_ALLOC)
i915_dependency_free(dep);
}
@@ -497,7 +498,7 @@ void i915_sched_node_fini(struct i915_sched_node *node)
GEM_BUG_ON(dep->signaler != node);
GEM_BUG_ON(!list_empty(&dep->dfs_link));
- list_del(&dep->signal_link);
+ list_del_rcu(&dep->signal_link);
if (dep->flags & I915_DEPENDENCY_ALLOC)
i915_dependency_free(dep);
}
@@ -526,7 +527,8 @@ static struct i915_global_scheduler global = { {
int __init i915_global_scheduler_init(void)
{
global.slab_dependencies = KMEM_CACHE(i915_dependency,
- SLAB_HWCACHE_ALIGN);
+ SLAB_HWCACHE_ALIGN |
+ SLAB_TYPESAFE_BY_RCU);
if (!global.slab_dependencies)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 8812cdd9007f..ed2be3489f8e 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -24,8 +24,6 @@
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <drm/i915_drm.h>
-
#include "display/intel_fbc.h"
#include "display/intel_gmbus.h"
#include "display/intel_vga.h"
diff --git a/drivers/gpu/drm/i915/i915_switcheroo.c b/drivers/gpu/drm/i915/i915_switcheroo.c
index 39c79e1c5b52..ed69b5d4a375 100644
--- a/drivers/gpu/drm/i915/i915_switcheroo.c
+++ b/drivers/gpu/drm/i915/i915_switcheroo.c
@@ -43,7 +43,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
* locking inversion with the driver load path. And the access here is
* completely racy anyway. So don't bother with locking for now.
*/
- return i915 && i915->drm.open_count == 0;
+ return i915 && atomic_read(&i915->drm.open_count) == 0;
}
static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index c14d762bd652..45d32ef42787 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -32,6 +32,7 @@
#include "gt/intel_rc6.h"
#include "gt/intel_rps.h"
+#include "gt/sysfs_engines.h"
#include "i915_drv.h"
#include "i915_sysfs.h"
@@ -606,6 +607,8 @@ void i915_setup_sysfs(struct drm_i915_private *dev_priv)
drm_err(&dev_priv->drm, "RPS sysfs setup failed\n");
i915_setup_error_capture(kdev);
+
+ intel_engines_add_sysfs(dev_priv);
}
void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c
index 632d6953c78d..029854ae65fc 100644
--- a/drivers/gpu/drm/i915/i915_utils.c
+++ b/drivers/gpu/drm/i915/i915_utils.c
@@ -8,7 +8,6 @@
#include "i915_drv.h"
#include "i915_utils.h"
-#define FDO_BUG_URL "https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs"
#define FDO_BUG_MSG "Please file a bug on drm/i915; see " FDO_BUG_URL " for details."
void
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index b0ade76bec90..03a73d2bd50d 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -34,6 +34,8 @@
struct drm_i915_private;
struct timer_list;
+#define FDO_BUG_URL "https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs"
+
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
#if 0
@@ -100,12 +102,24 @@ bool i915_error_injected(void);
typeof(max) max__ = (max); \
(void)(&start__ == &size__); \
(void)(&start__ == &max__); \
- start__ > max__ || size__ > max__ - start__; \
+ start__ >= max__ || size__ > max__ - start__; \
})
#define range_overflows_t(type, start, size, max) \
range_overflows((type)(start), (type)(size), (type)(max))
+#define range_overflows_end(start, size, max) ({ \
+ typeof(start) start__ = (start); \
+ typeof(size) size__ = (size); \
+ typeof(max) max__ = (max); \
+ (void)(&start__ == &size__); \
+ (void)(&start__ == &max__); \
+ start__ > max__ || size__ > max__ - start__; \
+})
+
+#define range_overflows_end_t(type, start, size, max) \
+ range_overflows_end((type)(start), (type)(size), (type)(max))
+
/* Note we don't consider signbits :| */
#define overflows_type(x, T) \
(sizeof(x) > sizeof(T) && (x) >> BITS_PER_TYPE(T))
@@ -234,6 +248,11 @@ static inline u64 ptr_to_u64(const void *ptr)
__idx; \
})
+static inline bool is_power_of_2_u64(u64 n)
+{
+ return (n != 0 && ((n & (n - 1)) == 0));
+}
+
static inline void __list_del_many(struct list_head *head,
struct list_head *first)
{
@@ -241,6 +260,12 @@ static inline void __list_del_many(struct list_head *head,
WRITE_ONCE(head->next, first);
}
+static inline int list_is_last_rcu(const struct list_head *list,
+ const struct list_head *head)
+{
+ return READ_ONCE(list->next) == head;
+}
+
/*
* Wait until the work is finally complete, even if it tries to postpone
* by requeueing itself. Note, that if the worker never cancels itself,
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 4afe21662266..70fca72f5162 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -21,6 +21,8 @@
* SOFTWARE.
*/
+#include "i915_drv.h"
+#include "i915_pvinfo.h"
#include "i915_vgpu.h"
/**
@@ -51,13 +53,13 @@
*/
/**
- * i915_detect_vgpu - detect virtual GPU
+ * intel_vgpu_detect - detect virtual GPU
* @dev_priv: i915 device private
*
* This function is called at the initialization stage, to detect whether
* running on a vGPU.
*/
-void i915_detect_vgpu(struct drm_i915_private *dev_priv)
+void intel_vgpu_detect(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
u64 magic;
@@ -102,11 +104,36 @@ out:
pci_iounmap(pdev, shared_area);
}
+void intel_vgpu_register(struct drm_i915_private *i915)
+{
+ /*
+ * Notify a valid surface after modesetting, when running inside a VM.
+ */
+ if (intel_vgpu_active(i915))
+ intel_uncore_write(&i915->uncore, vgtif_reg(display_ready),
+ VGT_DRV_DISPLAY_READY);
+}
+
+bool intel_vgpu_active(struct drm_i915_private *dev_priv)
+{
+ return dev_priv->vgpu.active;
+}
+
bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv)
{
return dev_priv->vgpu.caps & VGT_CAPS_FULL_PPGTT;
}
+bool intel_vgpu_has_hwsp_emulation(struct drm_i915_private *dev_priv)
+{
+ return dev_priv->vgpu.caps & VGT_CAPS_HWSP_EMULATION;
+}
+
+bool intel_vgpu_has_huge_gtt(struct drm_i915_private *dev_priv)
+{
+ return dev_priv->vgpu.caps & VGT_CAPS_HUGE_GTT;
+}
+
struct _balloon_info_ {
/*
* There are up to 2 regions per mappable/unmappable graphic
diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h
index 8b3663dad193..ffbb77d08048 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.h
+++ b/drivers/gpu/drm/i915/i915_vgpu.h
@@ -24,24 +24,17 @@
#ifndef _I915_VGPU_H_
#define _I915_VGPU_H_
-#include "i915_drv.h"
-#include "i915_pvinfo.h"
+#include <linux/types.h>
-void i915_detect_vgpu(struct drm_i915_private *dev_priv);
+struct drm_i915_private;
+struct i915_ggtt;
-bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv);
-
-static inline bool
-intel_vgpu_has_hwsp_emulation(struct drm_i915_private *dev_priv)
-{
- return dev_priv->vgpu.caps & VGT_CAPS_HWSP_EMULATION;
-}
-
-static inline bool
-intel_vgpu_has_huge_gtt(struct drm_i915_private *dev_priv)
-{
- return dev_priv->vgpu.caps & VGT_CAPS_HUGE_GTT;
-}
+void intel_vgpu_detect(struct drm_i915_private *i915);
+bool intel_vgpu_active(struct drm_i915_private *i915);
+void intel_vgpu_register(struct drm_i915_private *i915);
+bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *i915);
+bool intel_vgpu_has_hwsp_emulation(struct drm_i915_private *i915);
+bool intel_vgpu_has_huge_gtt(struct drm_i915_private *i915);
int intel_vgt_balloon(struct i915_ggtt *ggtt);
void intel_vgt_deballoon(struct i915_ggtt *ggtt);
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 74dc3ba59ce5..08699fa069aa 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -641,7 +641,6 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
u64 start, end;
int ret;
- GEM_BUG_ON(i915_vma_is_closed(vma));
GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
@@ -919,6 +918,11 @@ int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
if (err)
goto err_fence;
+ if (unlikely(i915_vma_is_closed(vma))) {
+ err = -ENOENT;
+ goto err_unlock;
+ }
+
bound = atomic_read(&vma->flags);
if (unlikely(bound & I915_VMA_ERROR)) {
err = -ENOMEM;
@@ -1093,6 +1097,7 @@ void i915_vma_release(struct kref *ref)
void i915_vma_parked(struct intel_gt *gt)
{
struct i915_vma *vma, *next;
+ LIST_HEAD(closed);
spin_lock_irq(&gt->closed_lock);
list_for_each_entry_safe(vma, next, &gt->closed_vma, closed_link) {
@@ -1104,28 +1109,26 @@ void i915_vma_parked(struct intel_gt *gt)
if (!kref_get_unless_zero(&obj->base.refcount))
continue;
- if (i915_vm_tryopen(vm)) {
- list_del_init(&vma->closed_link);
- } else {
+ if (!i915_vm_tryopen(vm)) {
i915_gem_object_put(obj);
- obj = NULL;
+ continue;
}
- spin_unlock_irq(&gt->closed_lock);
+ list_move(&vma->closed_link, &closed);
+ }
+ spin_unlock_irq(&gt->closed_lock);
+
+ /* As the GT is held idle, no vma can be reopened as we destroy them */
+ list_for_each_entry_safe(vma, next, &closed, closed_link) {
+ struct drm_i915_gem_object *obj = vma->obj;
+ struct i915_address_space *vm = vma->vm;
- if (obj) {
- __i915_vma_put(vma);
- i915_gem_object_put(obj);
- }
+ INIT_LIST_HEAD(&vma->closed_link);
+ __i915_vma_put(vma);
+ i915_gem_object_put(obj);
i915_vm_close(vm);
-
- /* Restart after dropping lock */
- spin_lock_irq(&gt->closed_lock);
- next = list_first_entry(&gt->closed_vma,
- typeof(*next), closed_link);
}
- spin_unlock_irq(&gt->closed_lock);
}
static void __i915_vma_iounmap(struct i915_vma *vma)
@@ -1169,7 +1172,7 @@ int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
GEM_BUG_ON(!i915_vma_is_pinned(vma));
/* Wait for the vma to be bound before we start! */
- err = i915_request_await_active(rq, &vma->active);
+ err = i915_request_await_active(rq, &vma->active, 0);
if (err)
return err;
diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h
index e0942efd5236..63831cdb7402 100644
--- a/drivers/gpu/drm/i915/i915_vma_types.h
+++ b/drivers/gpu/drm/i915/i915_vma_types.h
@@ -273,21 +273,10 @@ struct i915_vma {
struct rb_node obj_node;
struct hlist_node obj_hash;
- /** This vma's place in the execbuf reservation list */
- struct list_head exec_link;
- struct list_head reloc_link;
-
/** This vma's place in the eviction list */
struct list_head evict_link;
struct list_head closed_link;
-
- /**
- * Used for performing relocations during execbuffer insertion.
- */
- unsigned int *exec_flags;
- struct hlist_node exec_node;
- u32 exec_handle;
};
#endif
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 8e99ad097830..d7fe12734db8 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -23,6 +23,7 @@
*/
#include <drm/drm_print.h>
+#include <drm/i915_pciids.h>
#include "display/intel_cdclk.h"
#include "intel_device_info.h"
diff --git a/drivers/gpu/drm/i915/intel_dram.c b/drivers/gpu/drm/i915/intel_dram.c
new file mode 100644
index 000000000000..6b922efb1d7c
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dram.c
@@ -0,0 +1,500 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_dram.h"
+
+struct dram_dimm_info {
+ u8 size, width, ranks;
+};
+
+struct dram_channel_info {
+ struct dram_dimm_info dimm_l, dimm_s;
+ u8 ranks;
+ bool is_16gb_dimm;
+};
+
+#define DRAM_TYPE_STR(type) [INTEL_DRAM_ ## type] = #type
+
+static const char *intel_dram_type_str(enum intel_dram_type type)
+{
+ static const char * const str[] = {
+ DRAM_TYPE_STR(UNKNOWN),
+ DRAM_TYPE_STR(DDR3),
+ DRAM_TYPE_STR(DDR4),
+ DRAM_TYPE_STR(LPDDR3),
+ DRAM_TYPE_STR(LPDDR4),
+ };
+
+ if (type >= ARRAY_SIZE(str))
+ type = INTEL_DRAM_UNKNOWN;
+
+ return str[type];
+}
+
+#undef DRAM_TYPE_STR
+
+static int intel_dimm_num_devices(const struct dram_dimm_info *dimm)
+{
+ return dimm->ranks * 64 / (dimm->width ?: 1);
+}
+
+/* Returns total GB for the whole DIMM */
+static int skl_get_dimm_size(u16 val)
+{
+ return val & SKL_DRAM_SIZE_MASK;
+}
+
+static int skl_get_dimm_width(u16 val)
+{
+ if (skl_get_dimm_size(val) == 0)
+ return 0;
+
+ switch (val & SKL_DRAM_WIDTH_MASK) {
+ case SKL_DRAM_WIDTH_X8:
+ case SKL_DRAM_WIDTH_X16:
+ case SKL_DRAM_WIDTH_X32:
+ val = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
+ return 8 << val;
+ default:
+ MISSING_CASE(val);
+ return 0;
+ }
+}
+
+static int skl_get_dimm_ranks(u16 val)
+{
+ if (skl_get_dimm_size(val) == 0)
+ return 0;
+
+ val = (val & SKL_DRAM_RANK_MASK) >> SKL_DRAM_RANK_SHIFT;
+
+ return val + 1;
+}
+
+/* Returns total GB for the whole DIMM */
+static int cnl_get_dimm_size(u16 val)
+{
+ return (val & CNL_DRAM_SIZE_MASK) / 2;
+}
+
+static int cnl_get_dimm_width(u16 val)
+{
+ if (cnl_get_dimm_size(val) == 0)
+ return 0;
+
+ switch (val & CNL_DRAM_WIDTH_MASK) {
+ case CNL_DRAM_WIDTH_X8:
+ case CNL_DRAM_WIDTH_X16:
+ case CNL_DRAM_WIDTH_X32:
+ val = (val & CNL_DRAM_WIDTH_MASK) >> CNL_DRAM_WIDTH_SHIFT;
+ return 8 << val;
+ default:
+ MISSING_CASE(val);
+ return 0;
+ }
+}
+
+static int cnl_get_dimm_ranks(u16 val)
+{
+ if (cnl_get_dimm_size(val) == 0)
+ return 0;
+
+ val = (val & CNL_DRAM_RANK_MASK) >> CNL_DRAM_RANK_SHIFT;
+
+ return val + 1;
+}
+
+static bool
+skl_is_16gb_dimm(const struct dram_dimm_info *dimm)
+{
+ /* Convert total GB to Gb per DRAM device */
+ return 8 * dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16;
+}
+
+static void
+skl_dram_get_dimm_info(struct drm_i915_private *i915,
+ struct dram_dimm_info *dimm,
+ int channel, char dimm_name, u16 val)
+{
+ if (INTEL_GEN(i915) >= 10) {
+ dimm->size = cnl_get_dimm_size(val);
+ dimm->width = cnl_get_dimm_width(val);
+ dimm->ranks = cnl_get_dimm_ranks(val);
+ } else {
+ dimm->size = skl_get_dimm_size(val);
+ dimm->width = skl_get_dimm_width(val);
+ dimm->ranks = skl_get_dimm_ranks(val);
+ }
+
+ drm_dbg_kms(&i915->drm,
+ "CH%u DIMM %c size: %u GB, width: X%u, ranks: %u, 16Gb DIMMs: %s\n",
+ channel, dimm_name, dimm->size, dimm->width, dimm->ranks,
+ yesno(skl_is_16gb_dimm(dimm)));
+}
+
+static int
+skl_dram_get_channel_info(struct drm_i915_private *i915,
+ struct dram_channel_info *ch,
+ int channel, u32 val)
+{
+ skl_dram_get_dimm_info(i915, &ch->dimm_l,
+ channel, 'L', val & 0xffff);
+ skl_dram_get_dimm_info(i915, &ch->dimm_s,
+ channel, 'S', val >> 16);
+
+ if (ch->dimm_l.size == 0 && ch->dimm_s.size == 0) {
+ drm_dbg_kms(&i915->drm, "CH%u not populated\n", channel);
+ return -EINVAL;
+ }
+
+ if (ch->dimm_l.ranks == 2 || ch->dimm_s.ranks == 2)
+ ch->ranks = 2;
+ else if (ch->dimm_l.ranks == 1 && ch->dimm_s.ranks == 1)
+ ch->ranks = 2;
+ else
+ ch->ranks = 1;
+
+ ch->is_16gb_dimm = skl_is_16gb_dimm(&ch->dimm_l) ||
+ skl_is_16gb_dimm(&ch->dimm_s);
+
+ drm_dbg_kms(&i915->drm, "CH%u ranks: %u, 16Gb DIMMs: %s\n",
+ channel, ch->ranks, yesno(ch->is_16gb_dimm));
+
+ return 0;
+}
+
+static bool
+intel_is_dram_symmetric(const struct dram_channel_info *ch0,
+ const struct dram_channel_info *ch1)
+{
+ return !memcmp(ch0, ch1, sizeof(*ch0)) &&
+ (ch0->dimm_s.size == 0 ||
+ !memcmp(&ch0->dimm_l, &ch0->dimm_s, sizeof(ch0->dimm_l)));
+}
+
+static int
+skl_dram_get_channels_info(struct drm_i915_private *i915)
+{
+ struct dram_info *dram_info = &i915->dram_info;
+ struct dram_channel_info ch0 = {}, ch1 = {};
+ u32 val;
+ int ret;
+
+ val = intel_uncore_read(&i915->uncore,
+ SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
+ ret = skl_dram_get_channel_info(i915, &ch0, 0, val);
+ if (ret == 0)
+ dram_info->num_channels++;
+
+ val = intel_uncore_read(&i915->uncore,
+ SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN);
+ ret = skl_dram_get_channel_info(i915, &ch1, 1, val);
+ if (ret == 0)
+ dram_info->num_channels++;
+
+ if (dram_info->num_channels == 0) {
+ drm_info(&i915->drm, "Number of memory channels is zero\n");
+ return -EINVAL;
+ }
+
+ /*
+ * If any of the channel is single rank channel, worst case output
+ * will be same as if single rank memory, so consider single rank
+ * memory.
+ */
+ if (ch0.ranks == 1 || ch1.ranks == 1)
+ dram_info->ranks = 1;
+ else
+ dram_info->ranks = max(ch0.ranks, ch1.ranks);
+
+ if (dram_info->ranks == 0) {
+ drm_info(&i915->drm, "couldn't get memory rank information\n");
+ return -EINVAL;
+ }
+
+ dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
+
+ dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1);
+
+ drm_dbg_kms(&i915->drm, "Memory configuration is symmetric? %s\n",
+ yesno(dram_info->symmetric_memory));
+
+ return 0;
+}
+
+static enum intel_dram_type
+skl_get_dram_type(struct drm_i915_private *i915)
+{
+ u32 val;
+
+ val = intel_uncore_read(&i915->uncore,
+ SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN);
+
+ switch (val & SKL_DRAM_DDR_TYPE_MASK) {
+ case SKL_DRAM_DDR_TYPE_DDR3:
+ return INTEL_DRAM_DDR3;
+ case SKL_DRAM_DDR_TYPE_DDR4:
+ return INTEL_DRAM_DDR4;
+ case SKL_DRAM_DDR_TYPE_LPDDR3:
+ return INTEL_DRAM_LPDDR3;
+ case SKL_DRAM_DDR_TYPE_LPDDR4:
+ return INTEL_DRAM_LPDDR4;
+ default:
+ MISSING_CASE(val);
+ return INTEL_DRAM_UNKNOWN;
+ }
+}
+
+static int
+skl_get_dram_info(struct drm_i915_private *i915)
+{
+ struct dram_info *dram_info = &i915->dram_info;
+ u32 mem_freq_khz, val;
+ int ret;
+
+ dram_info->type = skl_get_dram_type(i915);
+ drm_dbg_kms(&i915->drm, "DRAM type: %s\n",
+ intel_dram_type_str(dram_info->type));
+
+ ret = skl_dram_get_channels_info(i915);
+ if (ret)
+ return ret;
+
+ val = intel_uncore_read(&i915->uncore,
+ SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
+ mem_freq_khz = DIV_ROUND_UP((val & SKL_REQ_DATA_MASK) *
+ SKL_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
+
+ dram_info->bandwidth_kbps = dram_info->num_channels *
+ mem_freq_khz * 8;
+
+ if (dram_info->bandwidth_kbps == 0) {
+ drm_info(&i915->drm,
+ "Couldn't get system memory bandwidth\n");
+ return -EINVAL;
+ }
+
+ dram_info->valid = true;
+ return 0;
+}
+
+/* Returns Gb per DRAM device */
+static int bxt_get_dimm_size(u32 val)
+{
+ switch (val & BXT_DRAM_SIZE_MASK) {
+ case BXT_DRAM_SIZE_4GBIT:
+ return 4;
+ case BXT_DRAM_SIZE_6GBIT:
+ return 6;
+ case BXT_DRAM_SIZE_8GBIT:
+ return 8;
+ case BXT_DRAM_SIZE_12GBIT:
+ return 12;
+ case BXT_DRAM_SIZE_16GBIT:
+ return 16;
+ default:
+ MISSING_CASE(val);
+ return 0;
+ }
+}
+
+static int bxt_get_dimm_width(u32 val)
+{
+ if (!bxt_get_dimm_size(val))
+ return 0;
+
+ val = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT;
+
+ return 8 << val;
+}
+
+static int bxt_get_dimm_ranks(u32 val)
+{
+ if (!bxt_get_dimm_size(val))
+ return 0;
+
+ switch (val & BXT_DRAM_RANK_MASK) {
+ case BXT_DRAM_RANK_SINGLE:
+ return 1;
+ case BXT_DRAM_RANK_DUAL:
+ return 2;
+ default:
+ MISSING_CASE(val);
+ return 0;
+ }
+}
+
+static enum intel_dram_type bxt_get_dimm_type(u32 val)
+{
+ if (!bxt_get_dimm_size(val))
+ return INTEL_DRAM_UNKNOWN;
+
+ switch (val & BXT_DRAM_TYPE_MASK) {
+ case BXT_DRAM_TYPE_DDR3:
+ return INTEL_DRAM_DDR3;
+ case BXT_DRAM_TYPE_LPDDR3:
+ return INTEL_DRAM_LPDDR3;
+ case BXT_DRAM_TYPE_DDR4:
+ return INTEL_DRAM_DDR4;
+ case BXT_DRAM_TYPE_LPDDR4:
+ return INTEL_DRAM_LPDDR4;
+ default:
+ MISSING_CASE(val);
+ return INTEL_DRAM_UNKNOWN;
+ }
+}
+
+static void bxt_get_dimm_info(struct dram_dimm_info *dimm, u32 val)
+{
+ dimm->width = bxt_get_dimm_width(val);
+ dimm->ranks = bxt_get_dimm_ranks(val);
+
+ /*
+ * Size in register is Gb per DRAM device. Convert to total
+ * GB to match the way we report this for non-LP platforms.
+ */
+ dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm) / 8;
+}
+
+static int bxt_get_dram_info(struct drm_i915_private *i915)
+{
+ struct dram_info *dram_info = &i915->dram_info;
+ u32 dram_channels;
+ u32 mem_freq_khz, val;
+ u8 num_active_channels;
+ int i;
+
+ val = intel_uncore_read(&i915->uncore, BXT_P_CR_MC_BIOS_REQ_0_0_0);
+ mem_freq_khz = DIV_ROUND_UP((val & BXT_REQ_DATA_MASK) *
+ BXT_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
+
+ dram_channels = val & BXT_DRAM_CHANNEL_ACTIVE_MASK;
+ num_active_channels = hweight32(dram_channels);
+
+ /* Each active bit represents 4-byte channel */
+ dram_info->bandwidth_kbps = (mem_freq_khz * num_active_channels * 4);
+
+ if (dram_info->bandwidth_kbps == 0) {
+ drm_info(&i915->drm,
+ "Couldn't get system memory bandwidth\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Now read each DUNIT8/9/10/11 to check the rank of each dimms.
+ */
+ for (i = BXT_D_CR_DRP0_DUNIT_START; i <= BXT_D_CR_DRP0_DUNIT_END; i++) {
+ struct dram_dimm_info dimm;
+ enum intel_dram_type type;
+
+ val = intel_uncore_read(&i915->uncore, BXT_D_CR_DRP0_DUNIT(i));
+ if (val == 0xFFFFFFFF)
+ continue;
+
+ dram_info->num_channels++;
+
+ bxt_get_dimm_info(&dimm, val);
+ type = bxt_get_dimm_type(val);
+
+ drm_WARN_ON(&i915->drm, type != INTEL_DRAM_UNKNOWN &&
+ dram_info->type != INTEL_DRAM_UNKNOWN &&
+ dram_info->type != type);
+
+ drm_dbg_kms(&i915->drm,
+ "CH%u DIMM size: %u GB, width: X%u, ranks: %u, type: %s\n",
+ i - BXT_D_CR_DRP0_DUNIT_START,
+ dimm.size, dimm.width, dimm.ranks,
+ intel_dram_type_str(type));
+
+ /*
+ * If any of the channel is single rank channel,
+ * worst case output will be same as if single rank
+ * memory, so consider single rank memory.
+ */
+ if (dram_info->ranks == 0)
+ dram_info->ranks = dimm.ranks;
+ else if (dimm.ranks == 1)
+ dram_info->ranks = 1;
+
+ if (type != INTEL_DRAM_UNKNOWN)
+ dram_info->type = type;
+ }
+
+ if (dram_info->type == INTEL_DRAM_UNKNOWN || dram_info->ranks == 0) {
+ drm_info(&i915->drm, "couldn't get memory information\n");
+ return -EINVAL;
+ }
+
+ dram_info->valid = true;
+
+ return 0;
+}
+
+void intel_dram_detect(struct drm_i915_private *i915)
+{
+ struct dram_info *dram_info = &i915->dram_info;
+ int ret;
+
+ /*
+ * Assume 16Gb DIMMs are present until proven otherwise.
+ * This is only used for the level 0 watermark latency
+ * w/a which does not apply to bxt/glk.
+ */
+ dram_info->is_16gb_dimm = !IS_GEN9_LP(i915);
+
+ if (INTEL_GEN(i915) < 9 || !HAS_DISPLAY(i915))
+ return;
+
+ if (IS_GEN9_LP(i915))
+ ret = bxt_get_dram_info(i915);
+ else
+ ret = skl_get_dram_info(i915);
+ if (ret)
+ return;
+
+ drm_dbg_kms(&i915->drm, "DRAM bandwidth: %u kBps, channels: %u\n",
+ dram_info->bandwidth_kbps, dram_info->num_channels);
+
+ drm_dbg_kms(&i915->drm, "DRAM ranks: %u, 16Gb DIMMs: %s\n",
+ dram_info->ranks, yesno(dram_info->is_16gb_dimm));
+}
+
+static u32 gen9_edram_size_mb(struct drm_i915_private *i915, u32 cap)
+{
+ static const u8 ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
+ static const u8 sets[4] = { 1, 1, 2, 2 };
+
+ return EDRAM_NUM_BANKS(cap) *
+ ways[EDRAM_WAYS_IDX(cap)] *
+ sets[EDRAM_SETS_IDX(cap)];
+}
+
+void intel_dram_edram_detect(struct drm_i915_private *i915)
+{
+ u32 edram_cap = 0;
+
+ if (!(IS_HASWELL(i915) || IS_BROADWELL(i915) || INTEL_GEN(i915) >= 9))
+ return;
+
+ edram_cap = __raw_uncore_read32(&i915->uncore, HSW_EDRAM_CAP);
+
+ /* NB: We can't write IDICR yet because we don't have gt funcs set up */
+
+ if (!(edram_cap & EDRAM_ENABLED))
+ return;
+
+ /*
+ * The needed capability bits for size calculation are not there with
+ * pre gen9 so return 128MB always.
+ */
+ if (INTEL_GEN(i915) < 9)
+ i915->edram_size_mb = 128;
+ else
+ i915->edram_size_mb = gen9_edram_size_mb(i915, edram_cap);
+
+ dev_info(i915->drm.dev,
+ "Found %uMB of eDRAM\n", i915->edram_size_mb);
+}
diff --git a/drivers/gpu/drm/i915/intel_dram.h b/drivers/gpu/drm/i915/intel_dram.h
new file mode 100644
index 000000000000..4ba13c13162c
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dram.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __INTEL_DRAM_H__
+#define __INTEL_DRAM_H__
+
+struct drm_i915_private;
+
+void intel_dram_edram_detect(struct drm_i915_private *i915);
+void intel_dram_detect(struct drm_i915_private *i915);
+
+#endif /* __INTEL_DRAM_H__ */
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index 38ebd5562c7c..21b91313cc5d 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -22,6 +22,7 @@
*/
#include "i915_drv.h"
+#include "i915_vgpu.h"
#include "intel_gvt.h"
/**
@@ -105,7 +106,7 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
return 0;
}
- if (USES_GUC_SUBMISSION(dev_priv)) {
+ if (intel_uc_wants_guc_submission(&dev_priv->gt.uc)) {
drm_err(&dev_priv->drm,
"i915 GVT-g loading failed due to Graphics virtualization is not yet supported with GuC submission\n");
return -EIO;
@@ -124,6 +125,11 @@ bail:
return 0;
}
+static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
+{
+ return dev_priv->gvt;
+}
+
/**
* intel_gvt_driver_remove - cleanup GVT components when i915 driver is
* unbinding
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index ffac0b862ca5..8375054ba27d 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -40,12 +40,36 @@
#include "gt/intel_llc.h"
#include "i915_drv.h"
+#include "i915_fixed.h"
#include "i915_irq.h"
#include "i915_trace.h"
#include "intel_pm.h"
#include "intel_sideband.h"
#include "../../../platform/x86/intel_ips.h"
+/* Stores plane specific WM parameters */
+struct skl_wm_params {
+ bool x_tiled, y_tiled;
+ bool rc_surface;
+ bool is_planar;
+ u32 width;
+ u8 cpp;
+ u32 plane_pixel_rate;
+ u32 y_min_scanlines;
+ u32 plane_bytes_per_line;
+ uint_fixed_16_16_t plane_blocks_per_line;
+ uint_fixed_16_16_t y_tile_minimum;
+ u32 linetime_us;
+ u32 dbuf_block_size;
+};
+
+/* used in computing the new watermarks state */
+struct intel_wm_config {
+ unsigned int num_pipes_active;
+ bool sprites_enabled;
+ bool sprites_scaled;
+};
+
static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
{
if (HAS_LLC(dev_priv)) {
@@ -128,16 +152,6 @@ static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
*/
I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
PWM1_GATING_DIS | PWM2_GATING_DIS);
-
- /* WaDDIIOTimeout:glk */
- if (IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1)) {
- u32 val = I915_READ(CHICKEN_MISC_2);
- val &= ~(GLK_CL0_PWR_DOWN |
- GLK_CL1_PWR_DOWN |
- GLK_CL2_PWR_DOWN);
- I915_WRITE(CHICKEN_MISC_2, val);
- }
-
}
static void pnv_get_mem_freq(struct drm_i915_private *dev_priv)
@@ -469,9 +483,9 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
enum pipe pipe = crtc->pipe;
int sprite0_start, sprite1_start;
+ u32 dsparb, dsparb2, dsparb3;
switch (pipe) {
- u32 dsparb, dsparb2, dsparb3;
case PIPE_A:
dsparb = I915_READ(DSPARB);
dsparb2 = I915_READ(DSPARB2);
@@ -1969,6 +1983,7 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
const struct vlv_fifo_state *fifo_state =
&crtc_state->wm.vlv.fifo_state;
int sprite0_start, sprite1_start, fifo_size;
+ u32 dsparb, dsparb2, dsparb3;
if (!crtc_state->fifo_changed)
return;
@@ -1994,7 +2009,6 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
spin_lock(&uncore->lock);
switch (crtc->pipe) {
- u32 dsparb, dsparb2, dsparb3;
case PIPE_A:
dsparb = intel_uncore_read_fw(uncore, DSPARB);
dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
@@ -2776,7 +2790,7 @@ static bool ilk_validate_wm_level(int level,
}
static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
- const struct intel_crtc *intel_crtc,
+ const struct intel_crtc *crtc,
int level,
struct intel_crtc_state *crtc_state,
const struct intel_plane_state *pristate,
@@ -3107,7 +3121,7 @@ static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_pipe_wm *pipe_wm;
struct intel_plane *plane;
const struct intel_plane_state *plane_state;
@@ -3147,7 +3161,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
usable_level = 0;
memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
- ilk_compute_wm_level(dev_priv, intel_crtc, 0, crtc_state,
+ ilk_compute_wm_level(dev_priv, crtc, 0, crtc_state,
pristate, sprstate, curstate, &pipe_wm->wm[0]);
if (!ilk_validate_pipe_wm(dev_priv, pipe_wm))
@@ -3158,7 +3172,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
for (level = 1; level <= usable_level; level++) {
struct intel_wm_level *wm = &pipe_wm->wm[level];
- ilk_compute_wm_level(dev_priv, intel_crtc, level, crtc_state,
+ ilk_compute_wm_level(dev_priv, crtc, level, crtc_state,
pristate, sprstate, curstate, wm);
/*
@@ -3843,7 +3857,7 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv)
}
static u8 skl_compute_dbuf_slices(const struct intel_crtc_state *crtc_state,
- u32 active_pipes);
+ u8 active_pipes);
static void
skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
@@ -4178,56 +4192,57 @@ struct dbuf_slice_conf_entry {
* as is from BSpec itself - that way it is at least easier
* to compare, change and check.
*/
-static struct dbuf_slice_conf_entry icl_allowed_dbufs[] =
+static const struct dbuf_slice_conf_entry icl_allowed_dbufs[] =
/* Autogenerated with igt/tools/intel_dbuf_map tool: */
{
{
.active_pipes = BIT(PIPE_A),
.dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1)
- }
+ [PIPE_A] = BIT(DBUF_S1),
+ },
},
{
.active_pipes = BIT(PIPE_B),
.dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S1)
- }
+ [PIPE_B] = BIT(DBUF_S1),
+ },
},
{
.active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
.dbuf_mask = {
[PIPE_A] = BIT(DBUF_S1),
- [PIPE_B] = BIT(DBUF_S2)
- }
+ [PIPE_B] = BIT(DBUF_S2),
+ },
},
{
.active_pipes = BIT(PIPE_C),
.dbuf_mask = {
- [PIPE_C] = BIT(DBUF_S2)
- }
+ [PIPE_C] = BIT(DBUF_S2),
+ },
},
{
.active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
.dbuf_mask = {
[PIPE_A] = BIT(DBUF_S1),
- [PIPE_C] = BIT(DBUF_S2)
- }
+ [PIPE_C] = BIT(DBUF_S2),
+ },
},
{
.active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
.dbuf_mask = {
[PIPE_B] = BIT(DBUF_S1),
- [PIPE_C] = BIT(DBUF_S2)
- }
+ [PIPE_C] = BIT(DBUF_S2),
+ },
},
{
.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
.dbuf_mask = {
[PIPE_A] = BIT(DBUF_S1),
[PIPE_B] = BIT(DBUF_S1),
- [PIPE_C] = BIT(DBUF_S2)
- }
+ [PIPE_C] = BIT(DBUF_S2),
+ },
},
+ {}
};
/*
@@ -4240,106 +4255,106 @@ static struct dbuf_slice_conf_entry icl_allowed_dbufs[] =
* as is from BSpec itself - that way it is at least easier
* to compare, change and check.
*/
-static struct dbuf_slice_conf_entry tgl_allowed_dbufs[] =
+static const struct dbuf_slice_conf_entry tgl_allowed_dbufs[] =
/* Autogenerated with igt/tools/intel_dbuf_map tool: */
{
{
.active_pipes = BIT(PIPE_A),
.dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2)
- }
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
},
{
.active_pipes = BIT(PIPE_B),
.dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2)
- }
+ [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
},
{
.active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
.dbuf_mask = {
[PIPE_A] = BIT(DBUF_S2),
- [PIPE_B] = BIT(DBUF_S1)
- }
+ [PIPE_B] = BIT(DBUF_S1),
+ },
},
{
.active_pipes = BIT(PIPE_C),
.dbuf_mask = {
- [PIPE_C] = BIT(DBUF_S2) | BIT(DBUF_S1)
- }
+ [PIPE_C] = BIT(DBUF_S2) | BIT(DBUF_S1),
+ },
},
{
.active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
.dbuf_mask = {
[PIPE_A] = BIT(DBUF_S1),
- [PIPE_C] = BIT(DBUF_S2)
- }
+ [PIPE_C] = BIT(DBUF_S2),
+ },
},
{
.active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
.dbuf_mask = {
[PIPE_B] = BIT(DBUF_S1),
- [PIPE_C] = BIT(DBUF_S2)
- }
+ [PIPE_C] = BIT(DBUF_S2),
+ },
},
{
.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
.dbuf_mask = {
[PIPE_A] = BIT(DBUF_S1),
[PIPE_B] = BIT(DBUF_S1),
- [PIPE_C] = BIT(DBUF_S2)
- }
+ [PIPE_C] = BIT(DBUF_S2),
+ },
},
{
.active_pipes = BIT(PIPE_D),
.dbuf_mask = {
- [PIPE_D] = BIT(DBUF_S2) | BIT(DBUF_S1)
- }
+ [PIPE_D] = BIT(DBUF_S2) | BIT(DBUF_S1),
+ },
},
{
.active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
.dbuf_mask = {
[PIPE_A] = BIT(DBUF_S1),
- [PIPE_D] = BIT(DBUF_S2)
- }
+ [PIPE_D] = BIT(DBUF_S2),
+ },
},
{
.active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
.dbuf_mask = {
[PIPE_B] = BIT(DBUF_S1),
- [PIPE_D] = BIT(DBUF_S2)
- }
+ [PIPE_D] = BIT(DBUF_S2),
+ },
},
{
.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
.dbuf_mask = {
[PIPE_A] = BIT(DBUF_S1),
[PIPE_B] = BIT(DBUF_S1),
- [PIPE_D] = BIT(DBUF_S2)
- }
+ [PIPE_D] = BIT(DBUF_S2),
+ },
},
{
.active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
.dbuf_mask = {
[PIPE_C] = BIT(DBUF_S1),
- [PIPE_D] = BIT(DBUF_S2)
- }
+ [PIPE_D] = BIT(DBUF_S2),
+ },
},
{
.active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
.dbuf_mask = {
[PIPE_A] = BIT(DBUF_S1),
[PIPE_C] = BIT(DBUF_S2),
- [PIPE_D] = BIT(DBUF_S2)
- }
+ [PIPE_D] = BIT(DBUF_S2),
+ },
},
{
.active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
.dbuf_mask = {
[PIPE_B] = BIT(DBUF_S1),
[PIPE_C] = BIT(DBUF_S2),
- [PIPE_D] = BIT(DBUF_S2)
- }
+ [PIPE_D] = BIT(DBUF_S2),
+ },
},
{
.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
@@ -4347,19 +4362,18 @@ static struct dbuf_slice_conf_entry tgl_allowed_dbufs[] =
[PIPE_A] = BIT(DBUF_S1),
[PIPE_B] = BIT(DBUF_S1),
[PIPE_C] = BIT(DBUF_S2),
- [PIPE_D] = BIT(DBUF_S2)
- }
+ [PIPE_D] = BIT(DBUF_S2),
+ },
},
+ {}
};
-static u8 compute_dbuf_slices(enum pipe pipe,
- u32 active_pipes,
- const struct dbuf_slice_conf_entry *dbuf_slices,
- int size)
+static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes,
+ const struct dbuf_slice_conf_entry *dbuf_slices)
{
int i;
- for (i = 0; i < size; i++) {
+ for (i = 0; i < dbuf_slices[i].active_pipes; i++) {
if (dbuf_slices[i].active_pipes == active_pipes)
return dbuf_slices[i].dbuf_mask[pipe];
}
@@ -4371,8 +4385,7 @@ static u8 compute_dbuf_slices(enum pipe pipe,
* returns correspondent DBuf slice mask as stated in BSpec for particular
* platform.
*/
-static u32 icl_compute_dbuf_slices(enum pipe pipe,
- u32 active_pipes)
+static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes)
{
/*
* FIXME: For ICL this is still a bit unclear as prev BSpec revision
@@ -4386,32 +4399,25 @@ static u32 icl_compute_dbuf_slices(enum pipe pipe,
* still here - we will need it once those additional constraints
* pop up.
*/
- return compute_dbuf_slices(pipe, active_pipes,
- icl_allowed_dbufs,
- ARRAY_SIZE(icl_allowed_dbufs));
+ return compute_dbuf_slices(pipe, active_pipes, icl_allowed_dbufs);
}
-static u32 tgl_compute_dbuf_slices(enum pipe pipe,
- u32 active_pipes)
+static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes)
{
- return compute_dbuf_slices(pipe, active_pipes,
- tgl_allowed_dbufs,
- ARRAY_SIZE(tgl_allowed_dbufs));
+ return compute_dbuf_slices(pipe, active_pipes, tgl_allowed_dbufs);
}
static u8 skl_compute_dbuf_slices(const struct intel_crtc_state *crtc_state,
- u32 active_pipes)
+ u8 active_pipes)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
if (IS_GEN(dev_priv, 12))
- return tgl_compute_dbuf_slices(pipe,
- active_pipes);
+ return tgl_compute_dbuf_slices(pipe, active_pipes);
else if (IS_GEN(dev_priv, 11))
- return icl_compute_dbuf_slices(pipe,
- active_pipes);
+ return icl_compute_dbuf_slices(pipe, active_pipes);
/*
* For anything else just return one slice yet.
* Should be extended for other platforms.
@@ -4470,14 +4476,10 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
u64 *plane_data_rate,
u64 *uv_plane_data_rate)
{
- struct drm_atomic_state *state = crtc_state->uapi.state;
struct intel_plane *plane;
const struct intel_plane_state *plane_state;
u64 total_data_rate = 0;
- if (WARN_ON(!state))
- return 0;
-
/* Calculate and cache data rate for each plane */
intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
enum plane_id plane_id = plane->id;
@@ -4505,9 +4507,6 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state;
u64 total_data_rate = 0;
- if (WARN_ON(!crtc_state->uapi.state))
- return 0;
-
/* Calculate and cache data rate for each plane */
intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
enum plane_id plane_id = plane->id;
@@ -4548,10 +4547,8 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
static int
skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state)
{
- struct drm_atomic_state *state = crtc_state->uapi.state;
- struct drm_crtc *crtc = crtc_state->uapi.crtc;
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct skl_ddb_entry *alloc = &crtc_state->wm.skl.ddb;
u16 alloc_size, start = 0;
u16 total[I915_MAX_PLANES] = {};
@@ -4568,9 +4565,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state)
memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
memset(crtc_state->wm.skl.plane_ddb_uv, 0, sizeof(crtc_state->wm.skl.plane_ddb_uv));
- if (drm_WARN_ON(&dev_priv->drm, !state))
- return 0;
-
if (!crtc_state->hw.active) {
alloc->start = alloc->end = 0;
return 0;
@@ -4609,7 +4603,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state)
*/
for (level = ilk_wm_max_level(dev_priv); level >= 0; level--) {
blocks = 0;
- for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ for_each_plane_id_on_crtc(crtc, plane_id) {
const struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
@@ -4646,7 +4640,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state)
* watermark level, plus an extra share of the leftover blocks
* proportional to its relative data rate.
*/
- for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ for_each_plane_id_on_crtc(crtc, plane_id) {
const struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
u64 rate;
@@ -4685,7 +4679,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state)
/* Set the actual DDB start/end points for each plane */
start = alloc->start;
- for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ for_each_plane_id_on_crtc(crtc, plane_id) {
struct skl_ddb_entry *plane_alloc =
&crtc_state->wm.skl.plane_ddb_y[plane_id];
struct skl_ddb_entry *uv_plane_alloc =
@@ -4719,7 +4713,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state)
* that aren't actually possible.
*/
for (level++; level <= ilk_wm_max_level(dev_priv); level++) {
- for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ for_each_plane_id_on_crtc(crtc, plane_id) {
struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
@@ -4756,7 +4750,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state)
* Go back and disable the transition watermark if it turns out we
* don't have enough DDB blocks for it.
*/
- for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ for_each_plane_id_on_crtc(crtc, plane_id) {
struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
@@ -5126,21 +5120,30 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state,
{
struct drm_device *dev = crtc_state->uapi.crtc->dev;
const struct drm_i915_private *dev_priv = to_i915(dev);
- u16 trans_min, trans_y_tile_min;
- const u16 trans_amount = 10; /* This is configurable amount */
+ u16 trans_min, trans_amount, trans_y_tile_min;
u16 wm0_sel_res_b, trans_offset_b, res_blocks;
- /* Transition WM are not recommended by HW team for GEN9 */
- if (INTEL_GEN(dev_priv) <= 9)
- return;
-
/* Transition WM don't make any sense if ipc is disabled */
if (!dev_priv->ipc_enabled)
return;
- trans_min = 14;
+ /*
+ * WaDisableTWM:skl,kbl,cfl,bxt
+ * Transition WM are not recommended by HW team for GEN9
+ */
+ if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv))
+ return;
+
if (INTEL_GEN(dev_priv) >= 11)
trans_min = 4;
+ else
+ trans_min = 14;
+
+ /* Display WA #1140: glk,cnl */
+ if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+ trans_amount = 0;
+ else
+ trans_amount = 10; /* This is configurable amount */
trans_offset_b = trans_min + trans_amount;
@@ -5167,7 +5170,6 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state,
/* WA BUG:1938466 add one block for non y-tile planes */
if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0))
res_blocks += 1;
-
}
/*
@@ -5410,8 +5412,12 @@ static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv,
int level, max_level = ilk_wm_max_level(dev_priv);
for (level = 0; level <= max_level; level++) {
- if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]) ||
- !skl_wm_level_equals(&wm1->uv_wm[level], &wm2->uv_wm[level]))
+ /*
+ * We don't check uv_wm as the hardware doesn't actually
+ * use it. It only gets used for calculating the required
+ * ddb allocation.
+ */
+ if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]))
return false;
}
@@ -5768,16 +5774,24 @@ skl_compute_wm(struct intel_atomic_state *state)
ret = skl_build_pipe_wm(new_crtc_state);
if (ret)
return ret;
-
- ret = skl_wm_add_affected_planes(state, crtc);
- if (ret)
- return ret;
}
ret = skl_compute_ddb(state);
if (ret)
return ret;
+ /*
+ * skl_compute_ddb() will have adjusted the final watermarks
+ * based on how much ddb is available. Now we can actually
+ * check if the final watermarks changed.
+ */
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ ret = skl_wm_add_affected_planes(state, crtc);
+ if (ret)
+ return ret;
+ }
+
skl_print_wm_changes(state);
return 0;
@@ -6812,21 +6826,6 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN,
I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE);
- /* WaEnable32PlaneMode:icl */
- I915_WRITE(GEN9_CSFE_CHICKEN1_RCS,
- _MASKED_BIT_ENABLE(GEN11_ENABLE_32_PLANE_MODE));
-
- /*
- * Wa_1408615072:icl,ehl (vsunit)
- * Wa_1407596294:icl,ehl (hsunit)
- */
- intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE,
- 0, VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
-
- /* Wa_1407352427:icl,ehl */
- intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE2,
- 0, PSDUNIT_CLKGATE_DIS);
-
/*Wa_14010594013:icl, ehl */
intel_uncore_rmw(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1,
0, CNL_DELAY_PMRSP);
@@ -6837,10 +6836,6 @@ static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
u32 vd_pg_enable = 0;
unsigned int i;
- /* Wa_1408615072:tgl */
- intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE2,
- 0, VSUNIT_CLKGATE_DIS_TGL);
-
/* This is not a WA. Enable VD HCP & MFX_ENC powergate */
for (i = 0; i < I915_MAX_VCS; i++) {
if (HAS_ENGINE(dev_priv, _VCS(i)))
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
index ef572a0c2566..68bbb1580162 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -201,11 +201,57 @@ static int live_active_retire(void *arg)
return err;
}
+static int live_active_barrier(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct live_active *active;
+ int err = 0;
+
+ /* Check that we get a callback when requests retire upon waiting */
+
+ active = __live_alloc(i915);
+ if (!active)
+ return -ENOMEM;
+
+ err = i915_active_acquire(&active->base);
+ if (err)
+ goto out;
+
+ for_each_uabi_engine(engine, i915) {
+ err = i915_active_acquire_preallocate_barrier(&active->base,
+ engine);
+ if (err)
+ break;
+
+ i915_active_acquire_barrier(&active->base);
+ }
+
+ i915_active_release(&active->base);
+
+ if (err == 0)
+ err = i915_active_wait(&active->base);
+
+ if (err == 0 && !READ_ONCE(active->retired)) {
+ pr_err("i915_active not retired after flushing barriers!\n");
+ err = -EINVAL;
+ }
+
+out:
+ __live_put(active);
+
+ if (igt_flush_test(i915))
+ err = -EIO;
+
+ return err;
+}
+
int i915_active_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_active_wait),
SUBTEST(live_active_retire),
+ SUBTEST(live_active_barrier),
};
if (intel_gt_is_wedged(&i915->gt))
@@ -265,28 +311,40 @@ static void spin_unlock_wait(spinlock_t *lock)
spin_unlock_irq(lock);
}
+static void active_flush(struct i915_active *ref,
+ struct i915_active_fence *active)
+{
+ struct dma_fence *fence;
+
+ fence = xchg(__active_fence_slot(active), NULL);
+ if (!fence)
+ return;
+
+ spin_lock_irq(fence->lock);
+ __list_del_entry(&active->cb.node);
+ spin_unlock_irq(fence->lock); /* serialise with fence->cb_list */
+ atomic_dec(&ref->count);
+
+ GEM_BUG_ON(!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
+}
+
void i915_active_unlock_wait(struct i915_active *ref)
{
if (i915_active_acquire_if_busy(ref)) {
struct active_node *it, *n;
+ /* Wait for all active callbacks */
rcu_read_lock();
- rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
- struct dma_fence *f;
-
- /* Wait for all active callbacks */
- f = rcu_dereference(it->base.fence);
- if (f)
- spin_unlock_wait(f->lock);
- }
+ active_flush(ref, &ref->excl);
+ rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node)
+ active_flush(ref, &it->base);
rcu_read_unlock();
i915_active_release(ref);
}
/* And wait for the retire callback */
- spin_lock_irq(&ref->tree_lock);
- spin_unlock_irq(&ref->tree_lock);
+ spin_unlock_wait(&ref->tree_lock);
/* ... which may have been on a thread instead */
flush_work(&ref->work);
diff --git a/drivers/gpu/drm/i915/selftests/i915_buddy.c b/drivers/gpu/drm/i915/selftests/i915_buddy.c
index 1b856bae67b5..939a6caebb03 100644
--- a/drivers/gpu/drm/i915/selftests/i915_buddy.c
+++ b/drivers/gpu/drm/i915/selftests/i915_buddy.c
@@ -298,10 +298,12 @@ static void igt_mm_config(u64 *size, u64 *chunk_size)
static int igt_buddy_alloc_smoke(void *arg)
{
struct i915_buddy_mm mm;
- int max_order;
+ IGT_TIMEOUT(end_time);
+ I915_RND_STATE(prng);
u64 chunk_size;
u64 mm_size;
- int err;
+ int *order;
+ int err, i;
igt_mm_config(&mm_size, &chunk_size);
@@ -313,10 +315,16 @@ static int igt_buddy_alloc_smoke(void *arg)
return err;
}
- for (max_order = mm.max_order; max_order >= 0; max_order--) {
+ order = i915_random_order(mm.max_order + 1, &prng);
+ if (!order)
+ goto out_fini;
+
+ for (i = 0; i <= mm.max_order; ++i) {
struct i915_buddy_block *block;
- int order;
+ int max_order = order[i];
+ bool timeout = false;
LIST_HEAD(blocks);
+ int order;
u64 total;
err = igt_check_mm(&mm);
@@ -360,6 +368,11 @@ retry:
}
total += i915_buddy_block_size(&mm, block);
+
+ if (__igt_timeout(end_time, NULL)) {
+ timeout = true;
+ break;
+ }
} while (total < mm.size);
if (!err)
@@ -373,7 +386,7 @@ retry:
pr_err("post-mm check failed\n");
}
- if (err)
+ if (err || timeout)
break;
cond_resched();
@@ -382,6 +395,8 @@ retry:
if (err == -ENOMEM)
err = 0;
+ kfree(order);
+out_fini:
i915_buddy_fini(&mm);
return err;
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index 34138c7bdd15..0a953bfc0585 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -43,6 +43,7 @@ selftest(reset, intel_reset_live_selftests)
selftest(memory_region, intel_memory_region_live_selftests)
selftest(hangcheck, intel_hangcheck_live_selftests)
selftest(execlists, intel_execlists_live_selftests)
+selftest(ring_submission, intel_ring_submission_live_selftests)
selftest(perf, i915_perf_live_selftests)
/* Here be dragons: keep last to run last! */
selftest(late_gt_pm, intel_gt_pm_late_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index e8a58fe49c39..9ad4ab088466 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -183,7 +183,7 @@ igt_spinner_create_request(struct igt_spinner *spin,
cancel_rq:
if (err) {
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
i915_request_add(rq);
}
unpin_hws:
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 3b8986983afc..754d0eb6beaa 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -144,7 +144,6 @@ struct drm_i915_private *mock_gem_device(void)
goto put_device;
}
i915->drm.pdev = pdev;
- i915->drm.dev_private = i915;
intel_runtime_pm_init_early(&i915->runtime_pm);
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 8cb2665b2c74..4da22a94790c 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -446,7 +446,7 @@ static int imx_ldb_register(struct drm_device *drm,
if (imx_ldb_ch->bridge) {
ret = drm_bridge_attach(&imx_ldb_ch->encoder,
- imx_ldb_ch->bridge, NULL);
+ imx_ldb_ch->bridge, NULL, 0);
if (ret) {
DRM_ERROR("Failed to initialize bridge with drm\n");
return ret;
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 28826c0aa24a..6776ebb3246d 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -359,7 +359,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
if (!fb)
return 0;
- if (!state->crtc)
+ if (WARN_ON(!state->crtc))
return -EINVAL;
crtc_state =
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 3dca424059f7..08fafa4bf8c2 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -24,6 +24,7 @@
struct imx_parallel_display {
struct drm_connector connector;
struct drm_encoder encoder;
+ struct drm_bridge bridge;
struct device *dev;
void *edid;
int edid_len;
@@ -31,7 +32,7 @@ struct imx_parallel_display {
u32 bus_flags;
struct drm_display_mode mode;
struct drm_panel *panel;
- struct drm_bridge *bridge;
+ struct drm_bridge *next_bridge;
};
static inline struct imx_parallel_display *con_to_imxpd(struct drm_connector *c)
@@ -44,6 +45,11 @@ static inline struct imx_parallel_display *enc_to_imxpd(struct drm_encoder *e)
return container_of(e, struct imx_parallel_display, encoder);
}
+static inline struct imx_parallel_display *bridge_to_imxpd(struct drm_bridge *b)
+{
+ return container_of(b, struct imx_parallel_display, bridge);
+}
+
static int imx_pd_connector_get_modes(struct drm_connector *connector)
{
struct imx_parallel_display *imxpd = con_to_imxpd(connector);
@@ -89,37 +95,148 @@ static struct drm_encoder *imx_pd_connector_best_encoder(
return &imxpd->encoder;
}
-static void imx_pd_encoder_enable(struct drm_encoder *encoder)
+static void imx_pd_bridge_enable(struct drm_bridge *bridge)
{
- struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
+ struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge);
drm_panel_prepare(imxpd->panel);
drm_panel_enable(imxpd->panel);
}
-static void imx_pd_encoder_disable(struct drm_encoder *encoder)
+static void imx_pd_bridge_disable(struct drm_bridge *bridge)
{
- struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
+ struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge);
drm_panel_disable(imxpd->panel);
drm_panel_unprepare(imxpd->panel);
}
-static int imx_pd_encoder_atomic_check(struct drm_encoder *encoder,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
+static const u32 imx_pd_bus_fmts[] = {
+ MEDIA_BUS_FMT_RGB888_1X24,
+ MEDIA_BUS_FMT_BGR888_1X24,
+ MEDIA_BUS_FMT_GBR888_1X24,
+ MEDIA_BUS_FMT_RGB666_1X18,
+ MEDIA_BUS_FMT_RGB666_1X24_CPADHI,
+ MEDIA_BUS_FMT_RGB565_1X16,
+};
+
+static u32 *
+imx_pd_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ unsigned int *num_output_fmts)
{
- struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
struct drm_display_info *di = &conn_state->connector->display_info;
- struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
+ struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge);
+ u32 *output_fmts;
- if (!imxpd->bus_format && di->num_bus_formats) {
- imx_crtc_state->bus_flags = di->bus_flags;
- imx_crtc_state->bus_format = di->bus_formats[0];
- } else {
- imx_crtc_state->bus_flags = imxpd->bus_flags;
- imx_crtc_state->bus_format = imxpd->bus_format;
+ if (!imxpd->bus_format && !di->num_bus_formats) {
+ *num_output_fmts = ARRAY_SIZE(imx_pd_bus_fmts);
+ return kmemdup(imx_pd_bus_fmts, sizeof(imx_pd_bus_fmts),
+ GFP_KERNEL);
+ }
+
+ *num_output_fmts = 1;
+ output_fmts = kmalloc(sizeof(*output_fmts), GFP_KERNEL);
+ if (!output_fmts)
+ return NULL;
+
+ if (!imxpd->bus_format && di->num_bus_formats)
+ output_fmts[0] = di->bus_formats[0];
+ else
+ output_fmts[0] = imxpd->bus_format;
+
+ return output_fmts;
+}
+
+static bool imx_pd_format_supported(u32 output_fmt)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(imx_pd_bus_fmts); i++) {
+ if (imx_pd_bus_fmts[i] == output_fmt)
+ return true;
+ }
+
+ return false;
+}
+
+static u32 *
+imx_pd_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge);
+ u32 *input_fmts;
+
+ /*
+ * If the next bridge does not support bus format negotiation, let's
+ * use the static bus format definition (imxpd->bus_format) if it's
+ * specified, RGB888 when it's not.
+ */
+ if (output_fmt == MEDIA_BUS_FMT_FIXED)
+ output_fmt = imxpd->bus_format ? : MEDIA_BUS_FMT_RGB888_1X24;
+
+ /* Now make sure the requested output format is supported. */
+ if ((imxpd->bus_format && imxpd->bus_format != output_fmt) ||
+ !imx_pd_format_supported(output_fmt)) {
+ *num_input_fmts = 0;
+ return NULL;
+ }
+
+ *num_input_fmts = 1;
+ input_fmts = kmalloc(sizeof(*input_fmts), GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+
+ input_fmts[0] = output_fmt;
+ return input_fmts;
+}
+
+static int imx_pd_bridge_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
+ struct drm_display_info *di = &conn_state->connector->display_info;
+ struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge);
+ struct drm_bridge_state *next_bridge_state = NULL;
+ struct drm_bridge *next_bridge;
+ u32 bus_flags, bus_fmt;
+
+ next_bridge = drm_bridge_get_next_bridge(bridge);
+ if (next_bridge)
+ next_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
+ next_bridge);
+
+ if (next_bridge_state)
+ bus_flags = next_bridge_state->input_bus_cfg.flags;
+ else if (!imxpd->bus_format && di->num_bus_formats)
+ bus_flags = di->bus_flags;
+ else
+ bus_flags = imxpd->bus_flags;
+
+ bus_fmt = bridge_state->input_bus_cfg.format;
+ if (!imx_pd_format_supported(bus_fmt))
+ return -EINVAL;
+
+ if (bus_flags &
+ ~(DRM_BUS_FLAG_DE_LOW | DRM_BUS_FLAG_DE_HIGH |
+ DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE |
+ DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)) {
+ dev_warn(imxpd->dev, "invalid bus_flags (%x)\n", bus_flags);
+ return -EINVAL;
}
+
+ bridge_state->output_bus_cfg.flags = bus_flags;
+ bridge_state->input_bus_cfg.flags = bus_flags;
+ imx_crtc_state->bus_flags = bus_flags;
+ imx_crtc_state->bus_format = bridge_state->input_bus_cfg.format;
imx_crtc_state->di_hsync_pin = 2;
imx_crtc_state->di_vsync_pin = 3;
@@ -143,10 +260,15 @@ static const struct drm_encoder_funcs imx_pd_encoder_funcs = {
.destroy = imx_drm_encoder_destroy,
};
-static const struct drm_encoder_helper_funcs imx_pd_encoder_helper_funcs = {
- .enable = imx_pd_encoder_enable,
- .disable = imx_pd_encoder_disable,
- .atomic_check = imx_pd_encoder_atomic_check,
+static const struct drm_bridge_funcs imx_pd_bridge_funcs = {
+ .enable = imx_pd_bridge_enable,
+ .disable = imx_pd_bridge_disable,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_check = imx_pd_bridge_atomic_check,
+ .atomic_get_input_bus_fmts = imx_pd_bridge_atomic_get_input_bus_fmts,
+ .atomic_get_output_bus_fmts = imx_pd_bridge_atomic_get_output_bus_fmts,
};
static int imx_pd_register(struct drm_device *drm,
@@ -166,11 +288,13 @@ static int imx_pd_register(struct drm_device *drm,
*/
imxpd->connector.dpms = DRM_MODE_DPMS_OFF;
- drm_encoder_helper_add(encoder, &imx_pd_encoder_helper_funcs);
drm_encoder_init(drm, encoder, &imx_pd_encoder_funcs,
DRM_MODE_ENCODER_NONE, NULL);
- if (!imxpd->bridge) {
+ imxpd->bridge.funcs = &imx_pd_bridge_funcs;
+ drm_bridge_attach(encoder, &imxpd->bridge, NULL, 0);
+
+ if (!imxpd->next_bridge) {
drm_connector_helper_add(&imxpd->connector,
&imx_pd_connector_helper_funcs);
drm_connector_init(drm, &imxpd->connector,
@@ -181,8 +305,9 @@ static int imx_pd_register(struct drm_device *drm,
if (imxpd->panel)
drm_panel_attach(imxpd->panel, &imxpd->connector);
- if (imxpd->bridge) {
- ret = drm_bridge_attach(encoder, imxpd->bridge, NULL);
+ if (imxpd->next_bridge) {
+ ret = drm_bridge_attach(encoder, imxpd->next_bridge,
+ &imxpd->bridge, 0);
if (ret < 0) {
dev_err(imxpd->dev, "failed to attach bridge: %d\n",
ret);
@@ -227,7 +352,8 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
imxpd->bus_format = bus_format;
/* port@1 is the output port */
- ret = drm_of_find_panel_or_bridge(np, 1, 0, &imxpd->panel, &imxpd->bridge);
+ ret = drm_of_find_panel_or_bridge(np, 1, 0, &imxpd->panel,
+ &imxpd->next_bridge);
if (ret && ret != -ENODEV)
return ret;
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.c b/drivers/gpu/drm/ingenic/ingenic-drm.c
index 6d47ef7b148c..9dfe7cb530e1 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm.c
@@ -737,7 +737,7 @@ static int ingenic_drm_probe(struct platform_device *pdev)
return ret;
}
- ret = drm_bridge_attach(&priv->encoder, bridge, NULL);
+ ret = drm_bridge_attach(&priv->encoder, bridge, NULL, 0);
if (ret) {
dev_err(dev, "Unable to attach bridge");
return ret;
diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c
index 124efe4fa97b..2daac64d8955 100644
--- a/drivers/gpu/drm/lima/lima_drv.c
+++ b/drivers/gpu/drm/lima/lima_drv.c
@@ -15,10 +15,14 @@
#include "lima_vm.h"
int lima_sched_timeout_ms;
+uint lima_heap_init_nr_pages = 8;
MODULE_PARM_DESC(sched_timeout_ms, "task run timeout in ms");
module_param_named(sched_timeout_ms, lima_sched_timeout_ms, int, 0444);
+MODULE_PARM_DESC(heap_init_nr_pages, "heap buffer init number of pages");
+module_param_named(heap_init_nr_pages, lima_heap_init_nr_pages, uint, 0444);
+
static int lima_ioctl_get_param(struct drm_device *dev, void *data, struct drm_file *file)
{
struct drm_lima_get_param *args = data;
@@ -68,7 +72,7 @@ static int lima_ioctl_gem_create(struct drm_device *dev, void *data, struct drm_
if (args->pad)
return -EINVAL;
- if (args->flags)
+ if (args->flags & ~(LIMA_BO_FLAG_HEAP))
return -EINVAL;
if (args->size == 0)
@@ -241,6 +245,12 @@ static const struct drm_ioctl_desc lima_drm_driver_ioctls[] = {
DEFINE_DRM_GEM_FOPS(lima_drm_driver_fops);
+/**
+ * Changelog:
+ *
+ * - 1.1.0 - add heap buffer support
+ */
+
static struct drm_driver lima_drm_driver = {
.driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ,
.open = lima_drm_driver_open,
@@ -250,9 +260,9 @@ static struct drm_driver lima_drm_driver = {
.fops = &lima_drm_driver_fops,
.name = "lima",
.desc = "lima DRM",
- .date = "20190217",
+ .date = "20191231",
.major = 1,
- .minor = 0,
+ .minor = 1,
.patchlevel = 0,
.gem_create_object = lima_gem_create_object,
diff --git a/drivers/gpu/drm/lima/lima_drv.h b/drivers/gpu/drm/lima/lima_drv.h
index 69c7344715c9..f492ecc6a5d9 100644
--- a/drivers/gpu/drm/lima/lima_drv.h
+++ b/drivers/gpu/drm/lima/lima_drv.h
@@ -9,6 +9,7 @@
#include "lima_ctx.h"
extern int lima_sched_timeout_ms;
+extern uint lima_heap_init_nr_pages;
struct lima_vm;
struct lima_bo;
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index d0059d8c97d8..5404e0d668db 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -4,6 +4,8 @@
#include <linux/mm.h>
#include <linux/sync_file.h>
#include <linux/pagemap.h>
+#include <linux/shmem_fs.h>
+#include <linux/dma-mapping.h>
#include <drm/drm_file.h>
#include <drm/drm_syncobj.h>
@@ -15,6 +17,83 @@
#include "lima_gem.h"
#include "lima_vm.h"
+int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
+{
+ struct page **pages;
+ struct address_space *mapping = bo->base.base.filp->f_mapping;
+ struct device *dev = bo->base.base.dev->dev;
+ size_t old_size = bo->heap_size;
+ size_t new_size = bo->heap_size ? bo->heap_size * 2 :
+ (lima_heap_init_nr_pages << PAGE_SHIFT);
+ struct sg_table sgt;
+ int i, ret;
+
+ if (bo->heap_size >= bo->base.base.size)
+ return -ENOSPC;
+
+ new_size = min(new_size, bo->base.base.size);
+
+ mutex_lock(&bo->base.pages_lock);
+
+ if (bo->base.pages) {
+ pages = bo->base.pages;
+ } else {
+ pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
+ sizeof(*pages), GFP_KERNEL | __GFP_ZERO);
+ if (!pages) {
+ mutex_unlock(&bo->base.pages_lock);
+ return -ENOMEM;
+ }
+
+ bo->base.pages = pages;
+ bo->base.pages_use_count = 1;
+
+ mapping_set_unevictable(mapping);
+ }
+
+ for (i = old_size >> PAGE_SHIFT; i < new_size >> PAGE_SHIFT; i++) {
+ struct page *page = shmem_read_mapping_page(mapping, i);
+
+ if (IS_ERR(page)) {
+ mutex_unlock(&bo->base.pages_lock);
+ return PTR_ERR(page);
+ }
+ pages[i] = page;
+ }
+
+ mutex_unlock(&bo->base.pages_lock);
+
+ ret = sg_alloc_table_from_pages(&sgt, pages, i, 0,
+ new_size, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ if (bo->base.sgt) {
+ dma_unmap_sg(dev, bo->base.sgt->sgl,
+ bo->base.sgt->nents, DMA_BIDIRECTIONAL);
+ sg_free_table(bo->base.sgt);
+ } else {
+ bo->base.sgt = kmalloc(sizeof(*bo->base.sgt), GFP_KERNEL);
+ if (!bo->base.sgt) {
+ sg_free_table(&sgt);
+ return -ENOMEM;
+ }
+ }
+
+ dma_map_sg(dev, sgt.sgl, sgt.nents, DMA_BIDIRECTIONAL);
+
+ *bo->base.sgt = sgt;
+
+ if (vm) {
+ ret = lima_vm_map_bo(vm, bo, old_size >> PAGE_SHIFT);
+ if (ret)
+ return ret;
+ }
+
+ bo->heap_size = new_size;
+ return 0;
+}
+
int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
u32 size, u32 flags, u32 *handle)
{
@@ -22,7 +101,8 @@ int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
gfp_t mask;
struct drm_gem_shmem_object *shmem;
struct drm_gem_object *obj;
- struct sg_table *sgt;
+ struct lima_bo *bo;
+ bool is_heap = flags & LIMA_BO_FLAG_HEAP;
shmem = drm_gem_shmem_create(dev, size);
if (IS_ERR(shmem))
@@ -36,10 +116,18 @@ int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
mask |= __GFP_DMA32;
mapping_set_gfp_mask(obj->filp->f_mapping, mask);
- sgt = drm_gem_shmem_get_pages_sgt(obj);
- if (IS_ERR(sgt)) {
- err = PTR_ERR(sgt);
- goto out;
+ if (is_heap) {
+ bo = to_lima_bo(obj);
+ err = lima_heap_alloc(bo, NULL);
+ if (err)
+ goto out;
+ } else {
+ struct sg_table *sgt = drm_gem_shmem_get_pages_sgt(obj);
+
+ if (IS_ERR(sgt)) {
+ err = PTR_ERR(sgt);
+ goto out;
+ }
}
err = drm_gem_handle_create(file, obj, handle);
@@ -79,17 +167,47 @@ static void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *f
lima_vm_bo_del(vm, bo);
}
+static int lima_gem_pin(struct drm_gem_object *obj)
+{
+ struct lima_bo *bo = to_lima_bo(obj);
+
+ if (bo->heap_size)
+ return -EINVAL;
+
+ return drm_gem_shmem_pin(obj);
+}
+
+static void *lima_gem_vmap(struct drm_gem_object *obj)
+{
+ struct lima_bo *bo = to_lima_bo(obj);
+
+ if (bo->heap_size)
+ return ERR_PTR(-EINVAL);
+
+ return drm_gem_shmem_vmap(obj);
+}
+
+static int lima_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ struct lima_bo *bo = to_lima_bo(obj);
+
+ if (bo->heap_size)
+ return -EINVAL;
+
+ return drm_gem_shmem_mmap(obj, vma);
+}
+
static const struct drm_gem_object_funcs lima_gem_funcs = {
.free = lima_gem_free_object,
.open = lima_gem_object_open,
.close = lima_gem_object_close,
.print_info = drm_gem_shmem_print_info,
- .pin = drm_gem_shmem_pin,
+ .pin = lima_gem_pin,
.unpin = drm_gem_shmem_unpin,
.get_sg_table = drm_gem_shmem_get_sg_table,
- .vmap = drm_gem_shmem_vmap,
+ .vmap = lima_gem_vmap,
.vunmap = drm_gem_shmem_vunmap,
- .mmap = drm_gem_shmem_mmap,
+ .mmap = lima_gem_mmap,
};
struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t size)
diff --git a/drivers/gpu/drm/lima/lima_gem.h b/drivers/gpu/drm/lima/lima_gem.h
index 1800feb3e47f..ccea06142f4b 100644
--- a/drivers/gpu/drm/lima/lima_gem.h
+++ b/drivers/gpu/drm/lima/lima_gem.h
@@ -7,12 +7,15 @@
#include <drm/drm_gem_shmem_helper.h>
struct lima_submit;
+struct lima_vm;
struct lima_bo {
struct drm_gem_shmem_object base;
struct mutex lock;
struct list_head va;
+
+ size_t heap_size;
};
static inline struct lima_bo *
@@ -31,6 +34,7 @@ static inline struct dma_resv *lima_bo_resv(struct lima_bo *bo)
return bo->base.base.resv;
}
+int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm);
struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t size);
int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
u32 size, u32 flags, u32 *handle);
diff --git a/drivers/gpu/drm/lima/lima_gp.c b/drivers/gpu/drm/lima/lima_gp.c
index ccf49faedebf..d8841c870d90 100644
--- a/drivers/gpu/drm/lima/lima_gp.c
+++ b/drivers/gpu/drm/lima/lima_gp.c
@@ -11,6 +11,8 @@
#include "lima_device.h"
#include "lima_gp.h"
#include "lima_regs.h"
+#include "lima_gem.h"
+#include "lima_vm.h"
#define gp_write(reg, data) writel(data, ip->iomem + reg)
#define gp_read(reg) readl(ip->iomem + reg)
@@ -20,6 +22,7 @@ static irqreturn_t lima_gp_irq_handler(int irq, void *data)
struct lima_ip *ip = data;
struct lima_device *dev = ip->dev;
struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp;
+ struct lima_sched_task *task = pipe->current_task;
u32 state = gp_read(LIMA_GP_INT_STAT);
u32 status = gp_read(LIMA_GP_STATUS);
bool done = false;
@@ -29,8 +32,16 @@ static irqreturn_t lima_gp_irq_handler(int irq, void *data)
return IRQ_NONE;
if (state & LIMA_GP_IRQ_MASK_ERROR) {
- dev_err(dev->dev, "gp error irq state=%x status=%x\n",
- state, status);
+ if ((state & LIMA_GP_IRQ_MASK_ERROR) ==
+ LIMA_GP_IRQ_PLBU_OUT_OF_MEM) {
+ dev_dbg(dev->dev, "gp out of heap irq status=%x\n",
+ status);
+ } else {
+ dev_err(dev->dev, "gp error irq state=%x status=%x\n",
+ state, status);
+ if (task)
+ task->recoverable = false;
+ }
/* mask all interrupts before hard reset */
gp_write(LIMA_GP_INT_MASK, 0);
@@ -43,6 +54,7 @@ static irqreturn_t lima_gp_irq_handler(int irq, void *data)
bool active = status & (LIMA_GP_STATUS_VS_ACTIVE |
LIMA_GP_STATUS_PLBU_ACTIVE);
done = valid && !active;
+ pipe->error = false;
}
gp_write(LIMA_GP_INT_CLEAR, state);
@@ -121,6 +133,22 @@ static void lima_gp_task_run(struct lima_sched_pipe *pipe,
u32 cmd = 0;
int i;
+ /* update real heap buffer size for GP */
+ for (i = 0; i < task->num_bos; i++) {
+ struct lima_bo *bo = task->bos[i];
+
+ if (bo->heap_size &&
+ lima_vm_get_va(task->vm, bo) ==
+ f[LIMA_GP_PLBU_ALLOC_START_ADDR >> 2]) {
+ f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2] =
+ f[LIMA_GP_PLBU_ALLOC_START_ADDR >> 2] +
+ bo->heap_size;
+ task->recoverable = true;
+ task->heap = bo;
+ break;
+ }
+ }
+
if (f[LIMA_GP_VSCL_START_ADDR >> 2] !=
f[LIMA_GP_VSCL_END_ADDR >> 2])
cmd |= LIMA_GP_CMD_START_VS;
@@ -184,6 +212,36 @@ static void lima_gp_task_mmu_error(struct lima_sched_pipe *pipe)
lima_sched_pipe_task_done(pipe);
}
+static int lima_gp_task_recover(struct lima_sched_pipe *pipe)
+{
+ struct lima_ip *ip = pipe->processor[0];
+ struct lima_sched_task *task = pipe->current_task;
+ struct drm_lima_gp_frame *frame = task->frame;
+ u32 *f = frame->frame;
+ size_t fail_size =
+ f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2] -
+ f[LIMA_GP_PLBU_ALLOC_START_ADDR >> 2];
+
+ if (fail_size == task->heap->heap_size) {
+ int ret;
+
+ ret = lima_heap_alloc(task->heap, task->vm);
+ if (ret < 0)
+ return ret;
+ }
+
+ gp_write(LIMA_GP_INT_MASK, LIMA_GP_IRQ_MASK_USED);
+ /* Resume from where we stopped, i.e. new start is old end */
+ gp_write(LIMA_GP_PLBU_ALLOC_START_ADDR,
+ f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2]);
+ f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2] =
+ f[LIMA_GP_PLBU_ALLOC_START_ADDR >> 2] + task->heap->heap_size;
+ gp_write(LIMA_GP_PLBU_ALLOC_END_ADDR,
+ f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2]);
+ gp_write(LIMA_GP_CMD, LIMA_GP_CMD_UPDATE_PLBU_ALLOC);
+ return 0;
+}
+
static void lima_gp_print_version(struct lima_ip *ip)
{
u32 version, major, minor;
@@ -270,6 +328,7 @@ int lima_gp_pipe_init(struct lima_device *dev)
pipe->task_fini = lima_gp_task_fini;
pipe->task_error = lima_gp_task_error;
pipe->task_mmu_error = lima_gp_task_mmu_error;
+ pipe->task_recover = lima_gp_task_recover;
return 0;
}
diff --git a/drivers/gpu/drm/lima/lima_mmu.c b/drivers/gpu/drm/lima/lima_mmu.c
index 97ec09dee572..f79d2af427e7 100644
--- a/drivers/gpu/drm/lima/lima_mmu.c
+++ b/drivers/gpu/drm/lima/lima_mmu.c
@@ -99,6 +99,11 @@ void lima_mmu_fini(struct lima_ip *ip)
}
+void lima_mmu_flush_tlb(struct lima_ip *ip)
+{
+ mmu_write(LIMA_MMU_COMMAND, LIMA_MMU_COMMAND_ZAP_CACHE);
+}
+
void lima_mmu_switch_vm(struct lima_ip *ip, struct lima_vm *vm)
{
struct lima_device *dev = ip->dev;
diff --git a/drivers/gpu/drm/lima/lima_mmu.h b/drivers/gpu/drm/lima/lima_mmu.h
index 8c78319bcc8e..4f8ccbebcba1 100644
--- a/drivers/gpu/drm/lima/lima_mmu.h
+++ b/drivers/gpu/drm/lima/lima_mmu.h
@@ -10,6 +10,7 @@ struct lima_vm;
int lima_mmu_init(struct lima_ip *ip);
void lima_mmu_fini(struct lima_ip *ip);
+void lima_mmu_flush_tlb(struct lima_ip *ip);
void lima_mmu_switch_vm(struct lima_ip *ip, struct lima_vm *vm);
void lima_mmu_page_fault_resume(struct lima_ip *ip);
diff --git a/drivers/gpu/drm/lima/lima_regs.h b/drivers/gpu/drm/lima/lima_regs.h
index ace8ecefbe90..0124c90e0153 100644
--- a/drivers/gpu/drm/lima/lima_regs.h
+++ b/drivers/gpu/drm/lima/lima_regs.h
@@ -239,6 +239,7 @@
#define LIMA_MMU_STATUS_REPLAY_BUFFER_EMPTY BIT(4)
#define LIMA_MMU_STATUS_PAGE_FAULT_IS_WRITE BIT(5)
#define LIMA_MMU_STATUS_BUS_ID(x) ((x >> 6) & 0x1F)
+#define LIMA_MMU_STATUS_STALL_NOT_ACTIVE BIT(31)
#define LIMA_MMU_COMMAND 0x0008
#define LIMA_MMU_COMMAND_ENABLE_PAGING 0x00
#define LIMA_MMU_COMMAND_DISABLE_PAGING 0x01
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index b561dd05bd62..3886999b4533 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -313,6 +313,26 @@ static const struct drm_sched_backend_ops lima_sched_ops = {
.free_job = lima_sched_free_job,
};
+static void lima_sched_recover_work(struct work_struct *work)
+{
+ struct lima_sched_pipe *pipe =
+ container_of(work, struct lima_sched_pipe, recover_work);
+ int i;
+
+ for (i = 0; i < pipe->num_l2_cache; i++)
+ lima_l2_cache_flush(pipe->l2_cache[i]);
+
+ if (pipe->bcast_mmu) {
+ lima_mmu_flush_tlb(pipe->bcast_mmu);
+ } else {
+ for (i = 0; i < pipe->num_mmu; i++)
+ lima_mmu_flush_tlb(pipe->mmu[i]);
+ }
+
+ if (pipe->task_recover(pipe))
+ drm_sched_fault(&pipe->base);
+}
+
int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)
{
unsigned int timeout = lima_sched_timeout_ms > 0 ?
@@ -321,6 +341,8 @@ int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)
pipe->fence_context = dma_fence_context_alloc(1);
spin_lock_init(&pipe->fence_lock);
+ INIT_WORK(&pipe->recover_work, lima_sched_recover_work);
+
return drm_sched_init(&pipe->base, &lima_sched_ops, 1, 0,
msecs_to_jiffies(timeout), name);
}
@@ -332,11 +354,14 @@ void lima_sched_pipe_fini(struct lima_sched_pipe *pipe)
void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe)
{
- if (pipe->error)
- drm_sched_fault(&pipe->base);
- else {
- struct lima_sched_task *task = pipe->current_task;
-
+ struct lima_sched_task *task = pipe->current_task;
+
+ if (pipe->error) {
+ if (task && task->recoverable)
+ schedule_work(&pipe->recover_work);
+ else
+ drm_sched_fault(&pipe->base);
+ } else {
pipe->task_fini(pipe);
dma_fence_signal(task->fence);
}
diff --git a/drivers/gpu/drm/lima/lima_sched.h b/drivers/gpu/drm/lima/lima_sched.h
index 1d814fecbcc0..d64393fb50a9 100644
--- a/drivers/gpu/drm/lima/lima_sched.h
+++ b/drivers/gpu/drm/lima/lima_sched.h
@@ -20,6 +20,9 @@ struct lima_sched_task {
struct lima_bo **bos;
int num_bos;
+ bool recoverable;
+ struct lima_bo *heap;
+
/* pipe fence */
struct dma_fence *fence;
};
@@ -68,6 +71,9 @@ struct lima_sched_pipe {
void (*task_fini)(struct lima_sched_pipe *pipe);
void (*task_error)(struct lima_sched_pipe *pipe);
void (*task_mmu_error)(struct lima_sched_pipe *pipe);
+ int (*task_recover)(struct lima_sched_pipe *pipe);
+
+ struct work_struct recover_work;
};
int lima_sched_task_init(struct lima_sched_task *task,
diff --git a/drivers/gpu/drm/lima/lima_vm.c b/drivers/gpu/drm/lima/lima_vm.c
index 840e2350d872..5b92fb82674a 100644
--- a/drivers/gpu/drm/lima/lima_vm.c
+++ b/drivers/gpu/drm/lima/lima_vm.c
@@ -155,6 +155,7 @@ err_out0:
void lima_vm_bo_del(struct lima_vm *vm, struct lima_bo *bo)
{
struct lima_bo_va *bo_va;
+ u32 size;
mutex_lock(&bo->lock);
@@ -166,8 +167,9 @@ void lima_vm_bo_del(struct lima_vm *vm, struct lima_bo *bo)
mutex_lock(&vm->lock);
+ size = bo->heap_size ? bo->heap_size : bo_va->node.size;
lima_vm_unmap_range(vm, bo_va->node.start,
- bo_va->node.start + bo_va->node.size - 1);
+ bo_va->node.start + size - 1);
drm_mm_remove_node(&bo_va->node);
@@ -277,3 +279,45 @@ void lima_vm_print(struct lima_vm *vm)
}
}
}
+
+int lima_vm_map_bo(struct lima_vm *vm, struct lima_bo *bo, int pageoff)
+{
+ struct lima_bo_va *bo_va;
+ struct sg_dma_page_iter sg_iter;
+ int offset = 0, err;
+ u32 base;
+
+ mutex_lock(&bo->lock);
+
+ bo_va = lima_vm_bo_find(vm, bo);
+ if (!bo_va) {
+ err = -ENOENT;
+ goto err_out0;
+ }
+
+ mutex_lock(&vm->lock);
+
+ base = bo_va->node.start + (pageoff << PAGE_SHIFT);
+ for_each_sg_dma_page(bo->base.sgt->sgl, &sg_iter,
+ bo->base.sgt->nents, pageoff) {
+ err = lima_vm_map_page(vm, sg_page_iter_dma_address(&sg_iter),
+ base + offset);
+ if (err)
+ goto err_out1;
+
+ offset += PAGE_SIZE;
+ }
+
+ mutex_unlock(&vm->lock);
+
+ mutex_unlock(&bo->lock);
+ return 0;
+
+err_out1:
+ if (offset)
+ lima_vm_unmap_range(vm, base, base + offset - 1);
+ mutex_unlock(&vm->lock);
+err_out0:
+ mutex_unlock(&bo->lock);
+ return err;
+}
diff --git a/drivers/gpu/drm/lima/lima_vm.h b/drivers/gpu/drm/lima/lima_vm.h
index e0bdedcf14dd..22aeec77d84d 100644
--- a/drivers/gpu/drm/lima/lima_vm.h
+++ b/drivers/gpu/drm/lima/lima_vm.h
@@ -58,5 +58,6 @@ static inline void lima_vm_put(struct lima_vm *vm)
}
void lima_vm_print(struct lima_vm *vm);
+int lima_vm_map_bo(struct lima_vm *vm, struct lima_bo *bo, int pageoff);
#endif
diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c
index 9008ddcfc528..f28cb7a576ba 100644
--- a/drivers/gpu/drm/mcde/mcde_drv.c
+++ b/drivers/gpu/drm/mcde/mcde_drv.c
@@ -20,11 +20,11 @@
* input formats including most variants of RGB and YUV.
*
* The hardware has four display pipes, and the layout is a little
- * bit like this:
+ * bit like this::
*
- * Memory -> Overlay -> Channel -> FIFO -> 5 formatters -> DSI/DPI
- * External 0..5 0..3 A,B, 3 x DSI bridge
- * source 0..9 C0,C1 2 x DPI
+ * Memory -> Overlay -> Channel -> FIFO -> 5 formatters -> DSI/DPI
+ * External 0..5 0..3 A,B, 3 x DSI bridge
+ * source 0..9 C0,C1 2 x DPI
*
* FIFOs A and B are for LCD and HDMI while FIFO CO/C1 are for
* panels with embedded buffer.
@@ -43,6 +43,7 @@
* to change as we exploit more of the hardware capabilities.
*
* TODO:
+ *
* - Enabled damaged rectangles using drm_plane_enable_fb_damage_clips()
* so we can selectively just transmit the damaged area to a
* command-only display.
diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
index bb6528b01cd0..7af5ebb0c436 100644
--- a/drivers/gpu/drm/mcde/mcde_dsi.c
+++ b/drivers/gpu/drm/mcde/mcde_dsi.c
@@ -986,7 +986,8 @@ static void mcde_dsi_bridge_disable(struct drm_bridge *bridge)
clk_disable_unprepare(d->lp_clk);
}
-static int mcde_dsi_bridge_attach(struct drm_bridge *bridge)
+static int mcde_dsi_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct mcde_dsi *d = bridge_to_mcde_dsi(bridge);
struct drm_device *drm = bridge->dev;
@@ -998,7 +999,7 @@ static int mcde_dsi_bridge_attach(struct drm_bridge *bridge)
}
/* Attach the DSI bridge to the output (panel etc) bridge */
- ret = drm_bridge_attach(bridge->encoder, d->bridge_out, bridge);
+ ret = drm_bridge_attach(bridge->encoder, d->bridge_out, bridge, flags);
if (ret) {
dev_err(d->dev, "failed to attach the DSI bridge\n");
return ret;
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 01fa8b8d763d..4f0ce4cd5b8c 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -607,7 +607,7 @@ static int mtk_dpi_bind(struct device *dev, struct device *master, void *data)
/* Currently DPI0 is fixed to be driven by OVL1 */
dpi->encoder.possible_crtcs = BIT(1);
- ret = drm_bridge_attach(&dpi->encoder, dpi->bridge, NULL);
+ ret = drm_bridge_attach(&dpi->encoder, dpi->bridge, NULL, 0);
if (ret) {
dev_err(dev, "Failed to attach bridge: %d\n", ret);
goto err_cleanup;
@@ -664,6 +664,16 @@ static unsigned int mt2701_calculate_factor(int clock)
return 1;
}
+static unsigned int mt8183_calculate_factor(int clock)
+{
+ if (clock <= 27000)
+ return 8;
+ else if (clock <= 167000)
+ return 4;
+ else
+ return 2;
+}
+
static const struct mtk_dpi_conf mt8173_conf = {
.cal_factor = mt8173_calculate_factor,
.reg_h_fre_con = 0xe0,
@@ -675,6 +685,11 @@ static const struct mtk_dpi_conf mt2701_conf = {
.edge_sel_en = true,
};
+static const struct mtk_dpi_conf mt8183_conf = {
+ .cal_factor = mt8183_calculate_factor,
+ .reg_h_fre_con = 0xe0,
+};
+
static int mtk_dpi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -770,6 +785,9 @@ static const struct of_device_id mtk_dpi_of_ids[] = {
{ .compatible = "mediatek,mt8173-dpi",
.data = &mt8173_conf,
},
+ { .compatible = "mediatek,mt8183-dpi",
+ .data = &mt8183_conf,
+ },
{ },
};
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 0dfcd1787e65..fe85e487e477 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -486,6 +486,7 @@ static void mtk_drm_crtc_hw_config(struct mtk_drm_crtc *mtk_crtc)
}
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
if (mtk_crtc->cmdq_client) {
+ mbox_flush(mtk_crtc->cmdq_client->chan, 2000);
cmdq_handle = cmdq_pkt_create(mtk_crtc->cmdq_client, PAGE_SIZE);
cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event);
cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event);
@@ -636,10 +637,18 @@ static const struct drm_crtc_helper_funcs mtk_crtc_helper_funcs = {
static int mtk_drm_crtc_init(struct drm_device *drm,
struct mtk_drm_crtc *mtk_crtc,
- struct drm_plane *primary,
- struct drm_plane *cursor, unsigned int pipe)
+ unsigned int pipe)
{
- int ret;
+ struct drm_plane *primary = NULL;
+ struct drm_plane *cursor = NULL;
+ int i, ret;
+
+ for (i = 0; i < mtk_crtc->layer_nr; i++) {
+ if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_PRIMARY)
+ primary = &mtk_crtc->planes[i];
+ else if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_CURSOR)
+ cursor = &mtk_crtc->planes[i];
+ }
ret = drm_crtc_init_with_planes(drm, &mtk_crtc->base, primary, cursor,
&mtk_crtc_funcs, NULL);
@@ -689,11 +698,12 @@ static int mtk_drm_crtc_num_comp_planes(struct mtk_drm_crtc *mtk_crtc,
}
static inline
-enum drm_plane_type mtk_drm_crtc_plane_type(unsigned int plane_idx)
+enum drm_plane_type mtk_drm_crtc_plane_type(unsigned int plane_idx,
+ unsigned int num_planes)
{
if (plane_idx == 0)
return DRM_PLANE_TYPE_PRIMARY;
- else if (plane_idx == 1)
+ else if (plane_idx == (num_planes - 1))
return DRM_PLANE_TYPE_CURSOR;
else
return DRM_PLANE_TYPE_OVERLAY;
@@ -712,7 +722,8 @@ static int mtk_drm_crtc_init_comp_planes(struct drm_device *drm_dev,
ret = mtk_plane_init(drm_dev,
&mtk_crtc->planes[mtk_crtc->layer_nr],
BIT(pipe),
- mtk_drm_crtc_plane_type(mtk_crtc->layer_nr),
+ mtk_drm_crtc_plane_type(mtk_crtc->layer_nr,
+ num_planes),
mtk_ddp_comp_supported_rotations(comp));
if (ret)
return ret;
@@ -807,9 +818,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
return ret;
}
- ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0],
- mtk_crtc->layer_nr > 1 ? &mtk_crtc->planes[1] :
- NULL, pipe);
+ ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, pipe);
if (ret < 0)
return ret;
@@ -828,7 +837,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
drm_crtc_index(&mtk_crtc->base));
mtk_crtc->cmdq_client = NULL;
}
- ret = of_property_read_u32_index(dev->of_node, "mediatek,gce-events",
+ ret = of_property_read_u32_index(priv->mutex_node,
+ "mediatek,gce-events",
drm_crtc_index(&mtk_crtc->base),
&mtk_crtc->cmdq_event);
if (ret)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index 1f5a112bb034..57c88de9a329 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -471,6 +471,7 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
/* Only DMA capable components need the LARB property */
comp->larb_dev = NULL;
if (type != MTK_DISP_OVL &&
+ type != MTK_DISP_OVL_2L &&
type != MTK_DISP_RDMA &&
type != MTK_DISP_WDMA)
return 0;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
index 914cc7619cd7..c2bd683a87c8 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
@@ -80,6 +80,7 @@ static int mtk_plane_atomic_async_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct drm_crtc_state *crtc_state;
+ int ret;
if (plane != state->crtc->cursor)
return -EINVAL;
@@ -90,6 +91,11 @@ static int mtk_plane_atomic_async_check(struct drm_plane *plane,
if (!plane->state->fb)
return -EINVAL;
+ ret = mtk_drm_crtc_plane_check(state->crtc, plane,
+ to_mtk_plane_state(state));
+ if (ret)
+ return ret;
+
if (state->state)
crtc_state = drm_atomic_get_existing_crtc_state(state->state,
state->crtc);
@@ -115,6 +121,7 @@ static void mtk_plane_atomic_async_update(struct drm_plane *plane,
plane->state->src_y = new_state->src_y;
plane->state->src_h = new_state->src_h;
plane->state->src_w = new_state->src_w;
+ swap(plane->state->fb, new_state->fb);
state->pending.async_dirty = true;
mtk_drm_crtc_async_update(new_state->crtc, plane, new_state);
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 5fa1073cf26b..0ede69830a9d 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -904,7 +904,7 @@ static int mtk_dsi_create_conn_enc(struct drm_device *drm, struct mtk_dsi *dsi)
/* If there's a bridge, attach to it and let it create the connector */
if (dsi->bridge) {
- ret = drm_bridge_attach(&dsi->encoder, dsi->bridge, NULL);
+ ret = drm_bridge_attach(&dsi->encoder, dsi->bridge, NULL, 0);
if (ret) {
DRM_ERROR("Failed to attach bridge to drm\n");
goto err_encoder_cleanup;
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 5e4a4dbda443..a8b20557539b 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1297,11 +1297,17 @@ static void mtk_hdmi_hpd_event(bool hpd, struct device *dev)
* Bridge callbacks
*/
-static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge)
+static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
int ret;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
ret = drm_connector_init_with_ddc(bridge->encoder->dev, &hdmi->conn,
&mtk_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA,
@@ -1326,7 +1332,7 @@ static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge)
if (hdmi->next_bridge) {
ret = drm_bridge_attach(bridge->encoder, hdmi->next_bridge,
- bridge);
+ bridge, flags);
if (ret) {
dev_err(hdmi->dev,
"Failed to attach external bridge: %d\n", ret);
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index 3bb7ffe5fc39..e8c94915a4fc 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -16,6 +16,7 @@
#include <drm/bridge/dw_hdmi.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_device.h>
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
@@ -135,6 +136,7 @@ struct meson_dw_hdmi_data {
struct meson_dw_hdmi {
struct drm_encoder encoder;
+ struct drm_bridge bridge;
struct dw_hdmi_plat_data dw_plat_data;
struct meson_drm *priv;
struct device *dev;
@@ -148,9 +150,12 @@ struct meson_dw_hdmi {
struct regulator *hdmi_supply;
u32 irq_stat;
struct dw_hdmi *hdmi;
+ unsigned long output_bus_fmt;
};
#define encoder_to_meson_dw_hdmi(x) \
container_of(x, struct meson_dw_hdmi, encoder)
+#define bridge_to_meson_dw_hdmi(x) \
+ container_of(x, struct meson_dw_hdmi, bridge)
static inline int dw_hdmi_is_compatible(struct meson_dw_hdmi *dw_hdmi,
const char *compat)
@@ -297,6 +302,10 @@ static void meson_hdmi_phy_setup_mode(struct meson_dw_hdmi *dw_hdmi,
struct meson_drm *priv = dw_hdmi->priv;
unsigned int pixel_clock = mode->clock;
+ /* For 420, pixel clock is half unlike venc clock */
+ if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
+ pixel_clock /= 2;
+
if (dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxl-dw-hdmi") ||
dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxm-dw-hdmi")) {
if (pixel_clock >= 371250) {
@@ -368,29 +377,40 @@ static inline void meson_dw_hdmi_phy_reset(struct meson_dw_hdmi *dw_hdmi)
}
static void dw_hdmi_set_vclk(struct meson_dw_hdmi *dw_hdmi,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct meson_drm *priv = dw_hdmi->priv;
int vic = drm_match_cea_mode(mode);
+ unsigned int phy_freq;
unsigned int vclk_freq;
unsigned int venc_freq;
unsigned int hdmi_freq;
vclk_freq = mode->clock;
+ /* For 420, pixel clock is half unlike venc clock */
+ if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
+ vclk_freq /= 2;
+
+ /* TMDS clock is pixel_clock * 10 */
+ phy_freq = vclk_freq * 10;
+
if (!vic) {
- meson_vclk_setup(priv, MESON_VCLK_TARGET_DMT, vclk_freq,
- vclk_freq, vclk_freq, false);
+ meson_vclk_setup(priv, MESON_VCLK_TARGET_DMT, phy_freq,
+ vclk_freq, vclk_freq, vclk_freq, false);
return;
}
+ /* 480i/576i needs global pixel doubling */
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
vclk_freq *= 2;
venc_freq = vclk_freq;
hdmi_freq = vclk_freq;
- if (meson_venc_hdmi_venc_repeat(vic))
+ /* VENC double pixels for 1080i, 720p and YUV420 modes */
+ if (meson_venc_hdmi_venc_repeat(vic) ||
+ dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
venc_freq *= 2;
vclk_freq = max(venc_freq, hdmi_freq);
@@ -398,11 +418,11 @@ static void dw_hdmi_set_vclk(struct meson_dw_hdmi *dw_hdmi,
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
venc_freq /= 2;
- DRM_DEBUG_DRIVER("vclk:%d venc=%d hdmi=%d enci=%d\n",
- vclk_freq, venc_freq, hdmi_freq,
+ DRM_DEBUG_DRIVER("vclk:%d phy=%d venc=%d hdmi=%d enci=%d\n",
+ phy_freq, vclk_freq, venc_freq, hdmi_freq,
priv->venc.hdmi_use_enci);
- meson_vclk_setup(priv, MESON_VCLK_TARGET_HDMI, vclk_freq,
+ meson_vclk_setup(priv, MESON_VCLK_TARGET_HDMI, phy_freq, vclk_freq,
venc_freq, hdmi_freq, priv->venc.hdmi_use_enci);
}
@@ -437,8 +457,9 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data,
/* Enable normal output to PHY */
dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_BIST_CNTL, BIT(12));
- /* TMDS pattern setup (TOFIX Handle the YUV420 case) */
- if (mode->clock > 340000) {
+ /* TMDS pattern setup */
+ if (mode->clock > 340000 &&
+ dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_YUV8_1X24) {
dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_01,
0);
dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_23,
@@ -613,6 +634,8 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
const struct drm_display_mode *mode)
{
struct meson_drm *priv = connector->dev->dev_private;
+ bool is_hdmi2_sink = connector->display_info.hdmi.scdc.supported;
+ unsigned int phy_freq;
unsigned int vclk_freq;
unsigned int venc_freq;
unsigned int hdmi_freq;
@@ -621,9 +644,11 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
DRM_DEBUG_DRIVER("Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
- /* If sink max TMDS clock, we reject the mode */
+ /* If sink does not support 540MHz, reject the non-420 HDMI2 modes */
if (connector->display_info.max_tmds_clock &&
- mode->clock > connector->display_info.max_tmds_clock)
+ mode->clock > connector->display_info.max_tmds_clock &&
+ !drm_mode_is_420_only(&connector->display_info, mode) &&
+ !drm_mode_is_420_also(&connector->display_info, mode))
return MODE_BAD;
/* Check against non-VIC supported modes */
@@ -639,6 +664,15 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
vclk_freq = mode->clock;
+ /* For 420, pixel clock is half unlike venc clock */
+ if (drm_mode_is_420_only(&connector->display_info, mode) ||
+ (!is_hdmi2_sink &&
+ drm_mode_is_420_also(&connector->display_info, mode)))
+ vclk_freq /= 2;
+
+ /* TMDS clock is pixel_clock * 10 */
+ phy_freq = vclk_freq * 10;
+
/* 480i/576i needs global pixel doubling */
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
vclk_freq *= 2;
@@ -646,8 +680,11 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
venc_freq = vclk_freq;
hdmi_freq = vclk_freq;
- /* VENC double pixels for 1080i and 720p modes */
- if (meson_venc_hdmi_venc_repeat(vic))
+ /* VENC double pixels for 1080i, 720p and YUV420 modes */
+ if (meson_venc_hdmi_venc_repeat(vic) ||
+ drm_mode_is_420_only(&connector->display_info, mode) ||
+ (!is_hdmi2_sink &&
+ drm_mode_is_420_also(&connector->display_info, mode)))
venc_freq *= 2;
vclk_freq = max(venc_freq, hdmi_freq);
@@ -655,14 +692,19 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
venc_freq /= 2;
- dev_dbg(connector->dev->dev, "%s: vclk:%d venc=%d hdmi=%d\n", __func__,
- vclk_freq, venc_freq, hdmi_freq);
+ dev_dbg(connector->dev->dev, "%s: vclk:%d phy=%d venc=%d hdmi=%d\n",
+ __func__, phy_freq, vclk_freq, venc_freq, hdmi_freq);
- return meson_vclk_vic_supported_freq(vclk_freq);
+ return meson_vclk_vic_supported_freq(phy_freq, vclk_freq);
}
/* Encoder */
+static const u32 meson_dw_hdmi_out_bus_fmts[] = {
+ MEDIA_BUS_FMT_YUV8_1X24,
+ MEDIA_BUS_FMT_UYYVYY8_0_5X24,
+};
+
static void meson_venc_hdmi_encoder_destroy(struct drm_encoder *encoder)
{
drm_encoder_cleanup(encoder);
@@ -672,16 +714,54 @@ static const struct drm_encoder_funcs meson_venc_hdmi_encoder_funcs = {
.destroy = meson_venc_hdmi_encoder_destroy,
};
-static int meson_venc_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
+static u32 *
+meson_venc_hdmi_encoder_get_inp_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ u32 *input_fmts = NULL;
+ int i;
+
+ *num_input_fmts = 0;
+
+ for (i = 0 ; i < ARRAY_SIZE(meson_dw_hdmi_out_bus_fmts) ; ++i) {
+ if (output_fmt == meson_dw_hdmi_out_bus_fmts[i]) {
+ *num_input_fmts = 1;
+ input_fmts = kcalloc(*num_input_fmts,
+ sizeof(*input_fmts),
+ GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+
+ input_fmts[0] = output_fmt;
+
+ break;
+ }
+ }
+
+ return input_fmts;
+}
+
+static int meson_venc_hdmi_encoder_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
+ struct meson_dw_hdmi *dw_hdmi = bridge_to_meson_dw_hdmi(bridge);
+
+ dw_hdmi->output_bus_fmt = bridge_state->output_bus_cfg.format;
+
+ DRM_DEBUG_DRIVER("output_bus_fmt %lx\n", dw_hdmi->output_bus_fmt);
+
return 0;
}
-static void meson_venc_hdmi_encoder_disable(struct drm_encoder *encoder)
+static void meson_venc_hdmi_encoder_disable(struct drm_bridge *bridge)
{
- struct meson_dw_hdmi *dw_hdmi = encoder_to_meson_dw_hdmi(encoder);
+ struct meson_dw_hdmi *dw_hdmi = bridge_to_meson_dw_hdmi(bridge);
struct meson_drm *priv = dw_hdmi->priv;
DRM_DEBUG_DRIVER("\n");
@@ -693,9 +773,9 @@ static void meson_venc_hdmi_encoder_disable(struct drm_encoder *encoder)
writel_relaxed(0, priv->io_base + _REG(ENCP_VIDEO_EN));
}
-static void meson_venc_hdmi_encoder_enable(struct drm_encoder *encoder)
+static void meson_venc_hdmi_encoder_enable(struct drm_bridge *bridge)
{
- struct meson_dw_hdmi *dw_hdmi = encoder_to_meson_dw_hdmi(encoder);
+ struct meson_dw_hdmi *dw_hdmi = bridge_to_meson_dw_hdmi(bridge);
struct meson_drm *priv = dw_hdmi->priv;
DRM_DEBUG_DRIVER("%s\n", priv->venc.hdmi_use_enci ? "VENCI" : "VENCP");
@@ -706,32 +786,47 @@ static void meson_venc_hdmi_encoder_enable(struct drm_encoder *encoder)
writel_relaxed(1, priv->io_base + _REG(ENCP_VIDEO_EN));
}
-static void meson_venc_hdmi_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static void meson_venc_hdmi_encoder_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
{
- struct meson_dw_hdmi *dw_hdmi = encoder_to_meson_dw_hdmi(encoder);
+ struct meson_dw_hdmi *dw_hdmi = bridge_to_meson_dw_hdmi(bridge);
struct meson_drm *priv = dw_hdmi->priv;
int vic = drm_match_cea_mode(mode);
+ unsigned int ycrcb_map = VPU_HDMI_OUTPUT_CBYCR;
+ bool yuv420_mode = false;
DRM_DEBUG_DRIVER("\"%s\" vic %d\n", mode->name, vic);
+ if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) {
+ ycrcb_map = VPU_HDMI_OUTPUT_CRYCB;
+ yuv420_mode = true;
+ }
+
/* VENC + VENC-DVI Mode setup */
- meson_venc_hdmi_mode_set(priv, vic, mode);
+ meson_venc_hdmi_mode_set(priv, vic, ycrcb_map, yuv420_mode, mode);
/* VCLK Set clock */
dw_hdmi_set_vclk(dw_hdmi, mode);
- /* Setup YUV444 to HDMI-TX, no 10bit diphering */
- writel_relaxed(0, priv->io_base + _REG(VPU_HDMI_FMT_CTRL));
+ if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
+ /* Setup YUV420 to HDMI-TX, no 10bit diphering */
+ writel_relaxed(2 | (2 << 2),
+ priv->io_base + _REG(VPU_HDMI_FMT_CTRL));
+ else
+ /* Setup YUV444 to HDMI-TX, no 10bit diphering */
+ writel_relaxed(0, priv->io_base + _REG(VPU_HDMI_FMT_CTRL));
}
-static const struct drm_encoder_helper_funcs
- meson_venc_hdmi_encoder_helper_funcs = {
- .atomic_check = meson_venc_hdmi_encoder_atomic_check,
- .disable = meson_venc_hdmi_encoder_disable,
- .enable = meson_venc_hdmi_encoder_enable,
- .mode_set = meson_venc_hdmi_encoder_mode_set,
+static const struct drm_bridge_funcs meson_venc_hdmi_encoder_bridge_funcs = {
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_get_input_bus_fmts = meson_venc_hdmi_encoder_get_inp_bus_fmts,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_check = meson_venc_hdmi_encoder_atomic_check,
+ .enable = meson_venc_hdmi_encoder_enable,
+ .disable = meson_venc_hdmi_encoder_disable,
+ .mode_set = meson_venc_hdmi_encoder_mode_set,
};
/* DW HDMI Regmap */
@@ -852,6 +947,7 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
struct drm_device *drm = data;
struct meson_drm *priv = drm->dev_private;
struct dw_hdmi_plat_data *dw_plat_data;
+ struct drm_bridge *next_bridge;
struct drm_encoder *encoder;
struct resource *res;
int irq;
@@ -953,8 +1049,6 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
/* Encoder */
- drm_encoder_helper_add(encoder, &meson_venc_hdmi_encoder_helper_funcs);
-
ret = drm_encoder_init(drm, encoder, &meson_venc_hdmi_encoder_funcs,
DRM_MODE_ENCODER_TMDS, "meson_hdmi");
if (ret) {
@@ -962,6 +1056,9 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
return ret;
}
+ meson_dw_hdmi->bridge.funcs = &meson_venc_hdmi_encoder_bridge_funcs;
+ drm_bridge_attach(encoder, &meson_dw_hdmi->bridge, NULL, 0);
+
encoder->possible_crtcs = BIT(0);
DRM_DEBUG_DRIVER("encoder initialized\n");
@@ -974,8 +1071,8 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
dw_plat_data->phy_ops = &meson_dw_hdmi_phy_ops;
dw_plat_data->phy_name = "meson_dw_hdmi_phy";
dw_plat_data->phy_data = meson_dw_hdmi;
- dw_plat_data->input_bus_format = MEDIA_BUS_FMT_YUV8_1X24;
dw_plat_data->input_bus_encoding = V4L2_YCBCR_ENC_709;
+ dw_plat_data->ycbcr_420_allowed = true;
if (dw_hdmi_is_compatible(meson_dw_hdmi, "amlogic,meson-gxl-dw-hdmi") ||
dw_hdmi_is_compatible(meson_dw_hdmi, "amlogic,meson-gxm-dw-hdmi") ||
@@ -984,11 +1081,16 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
platform_set_drvdata(pdev, meson_dw_hdmi);
- meson_dw_hdmi->hdmi = dw_hdmi_bind(pdev, encoder,
- &meson_dw_hdmi->dw_plat_data);
+ meson_dw_hdmi->hdmi = dw_hdmi_probe(pdev,
+ &meson_dw_hdmi->dw_plat_data);
if (IS_ERR(meson_dw_hdmi->hdmi))
return PTR_ERR(meson_dw_hdmi->hdmi);
+ next_bridge = of_drm_find_bridge(pdev->dev.of_node);
+ if (next_bridge)
+ drm_bridge_attach(encoder, next_bridge,
+ &meson_dw_hdmi->bridge, 0);
+
DRM_DEBUG_DRIVER("HDMI controller initialized\n");
return 0;
diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
index f690793ae2d5..fdf26dac9fa8 100644
--- a/drivers/gpu/drm/meson/meson_vclk.c
+++ b/drivers/gpu/drm/meson/meson_vclk.c
@@ -354,12 +354,17 @@ enum {
/* 2970 /1 /1 /1 /5 /2 => /1 /1 */
MESON_VCLK_HDMI_297000,
/* 5940 /1 /1 /2 /5 /1 => /1 /1 */
- MESON_VCLK_HDMI_594000
+ MESON_VCLK_HDMI_594000,
+/* 2970 /1 /1 /1 /5 /1 => /1 /2 */
+ MESON_VCLK_HDMI_594000_YUV420,
};
struct meson_vclk_params {
+ unsigned int pll_freq;
+ unsigned int phy_freq;
+ unsigned int vclk_freq;
+ unsigned int venc_freq;
unsigned int pixel_freq;
- unsigned int pll_base_freq;
unsigned int pll_od1;
unsigned int pll_od2;
unsigned int pll_od3;
@@ -367,8 +372,11 @@ struct meson_vclk_params {
unsigned int vclk_div;
} params[] = {
[MESON_VCLK_HDMI_ENCI_54000] = {
+ .pll_freq = 4320000,
+ .phy_freq = 270000,
+ .vclk_freq = 54000,
+ .venc_freq = 54000,
.pixel_freq = 54000,
- .pll_base_freq = 4320000,
.pll_od1 = 4,
.pll_od2 = 4,
.pll_od3 = 1,
@@ -376,8 +384,11 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_DDR_54000] = {
- .pixel_freq = 54000,
- .pll_base_freq = 4320000,
+ .pll_freq = 4320000,
+ .phy_freq = 270000,
+ .vclk_freq = 54000,
+ .venc_freq = 54000,
+ .pixel_freq = 27000,
.pll_od1 = 4,
.pll_od2 = 4,
.pll_od3 = 1,
@@ -385,8 +396,11 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_DDR_148500] = {
- .pixel_freq = 148500,
- .pll_base_freq = 2970000,
+ .pll_freq = 2970000,
+ .phy_freq = 742500,
+ .vclk_freq = 148500,
+ .venc_freq = 148500,
+ .pixel_freq = 74250,
.pll_od1 = 4,
.pll_od2 = 1,
.pll_od3 = 1,
@@ -394,8 +408,11 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_74250] = {
+ .pll_freq = 2970000,
+ .phy_freq = 742500,
+ .vclk_freq = 74250,
+ .venc_freq = 74250,
.pixel_freq = 74250,
- .pll_base_freq = 2970000,
.pll_od1 = 2,
.pll_od2 = 2,
.pll_od3 = 2,
@@ -403,8 +420,11 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_148500] = {
+ .pll_freq = 2970000,
+ .phy_freq = 1485000,
+ .vclk_freq = 148500,
+ .venc_freq = 148500,
.pixel_freq = 148500,
- .pll_base_freq = 2970000,
.pll_od1 = 1,
.pll_od2 = 2,
.pll_od3 = 2,
@@ -412,8 +432,11 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_297000] = {
+ .pll_freq = 5940000,
+ .phy_freq = 2970000,
+ .venc_freq = 297000,
+ .vclk_freq = 297000,
.pixel_freq = 297000,
- .pll_base_freq = 5940000,
.pll_od1 = 2,
.pll_od2 = 1,
.pll_od3 = 1,
@@ -421,14 +444,29 @@ struct meson_vclk_params {
.vclk_div = 2,
},
[MESON_VCLK_HDMI_594000] = {
+ .pll_freq = 5940000,
+ .phy_freq = 5940000,
+ .venc_freq = 594000,
+ .vclk_freq = 594000,
.pixel_freq = 594000,
- .pll_base_freq = 5940000,
.pll_od1 = 1,
.pll_od2 = 1,
.pll_od3 = 2,
.vid_pll_div = VID_PLL_DIV_5,
.vclk_div = 1,
},
+ [MESON_VCLK_HDMI_594000_YUV420] = {
+ .pll_freq = 5940000,
+ .phy_freq = 2970000,
+ .venc_freq = 594000,
+ .vclk_freq = 594000,
+ .pixel_freq = 297000,
+ .pll_od1 = 2,
+ .pll_od2 = 1,
+ .pll_od3 = 1,
+ .vid_pll_div = VID_PLL_DIV_5,
+ .vclk_div = 1,
+ },
{ /* sentinel */ },
};
@@ -701,6 +739,7 @@ static void meson_hdmi_pll_generic_set(struct meson_drm *priv,
unsigned int od, m, frac, od1, od2, od3;
if (meson_hdmi_pll_find_params(priv, pll_freq, &m, &frac, &od)) {
+ /* OD2 goes to the PHY, and needs to be *10, so keep OD3=1 */
od3 = 1;
if (od < 4) {
od1 = 2;
@@ -723,21 +762,28 @@ static void meson_hdmi_pll_generic_set(struct meson_drm *priv,
}
enum drm_mode_status
-meson_vclk_vic_supported_freq(unsigned int freq)
+meson_vclk_vic_supported_freq(unsigned int phy_freq,
+ unsigned int vclk_freq)
{
int i;
- DRM_DEBUG_DRIVER("freq = %d\n", freq);
+ DRM_DEBUG_DRIVER("phy_freq = %d vclk_freq = %d\n",
+ phy_freq, vclk_freq);
for (i = 0 ; params[i].pixel_freq ; ++i) {
DRM_DEBUG_DRIVER("i = %d pixel_freq = %d alt = %d\n",
i, params[i].pixel_freq,
FREQ_1000_1001(params[i].pixel_freq));
+ DRM_DEBUG_DRIVER("i = %d phy_freq = %d alt = %d\n",
+ i, params[i].phy_freq,
+ FREQ_1000_1001(params[i].phy_freq/10)*10);
/* Match strict frequency */
- if (freq == params[i].pixel_freq)
+ if (phy_freq == params[i].phy_freq &&
+ vclk_freq == params[i].vclk_freq)
return MODE_OK;
/* Match 1000/1001 variant */
- if (freq == FREQ_1000_1001(params[i].pixel_freq))
+ if (phy_freq == (FREQ_1000_1001(params[i].phy_freq/10)*10) &&
+ vclk_freq == FREQ_1000_1001(params[i].vclk_freq))
return MODE_OK;
}
@@ -965,8 +1011,9 @@ static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
}
void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
- unsigned int vclk_freq, unsigned int venc_freq,
- unsigned int dac_freq, bool hdmi_use_enci)
+ unsigned int phy_freq, unsigned int vclk_freq,
+ unsigned int venc_freq, unsigned int dac_freq,
+ bool hdmi_use_enci)
{
bool vic_alternate_clock = false;
unsigned int freq;
@@ -986,7 +1033,7 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
* - venc_div = 1
* - encp encoder
*/
- meson_vclk_set(priv, vclk_freq * 10, 0, 0, 0,
+ meson_vclk_set(priv, phy_freq, 0, 0, 0,
VID_PLL_DIV_5, 2, 1, 1, false, false);
return;
}
@@ -1008,9 +1055,11 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
}
for (freq = 0 ; params[freq].pixel_freq ; ++freq) {
- if (vclk_freq == params[freq].pixel_freq ||
- vclk_freq == FREQ_1000_1001(params[freq].pixel_freq)) {
- if (vclk_freq != params[freq].pixel_freq)
+ if ((phy_freq == params[freq].phy_freq ||
+ phy_freq == FREQ_1000_1001(params[freq].phy_freq/10)*10) &&
+ (vclk_freq == params[freq].vclk_freq ||
+ vclk_freq == FREQ_1000_1001(params[freq].vclk_freq))) {
+ if (vclk_freq != params[freq].vclk_freq)
vic_alternate_clock = true;
else
vic_alternate_clock = false;
@@ -1039,7 +1088,7 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
return;
}
- meson_vclk_set(priv, params[freq].pll_base_freq,
+ meson_vclk_set(priv, params[freq].pll_freq,
params[freq].pll_od1, params[freq].pll_od2,
params[freq].pll_od3, params[freq].vid_pll_div,
params[freq].vclk_div, hdmi_tx_div, venc_div,
diff --git a/drivers/gpu/drm/meson/meson_vclk.h b/drivers/gpu/drm/meson/meson_vclk.h
index b62125540aef..aed0ab2efa71 100644
--- a/drivers/gpu/drm/meson/meson_vclk.h
+++ b/drivers/gpu/drm/meson/meson_vclk.h
@@ -25,10 +25,11 @@ enum {
enum drm_mode_status
meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned int freq);
enum drm_mode_status
-meson_vclk_vic_supported_freq(unsigned int freq);
+meson_vclk_vic_supported_freq(unsigned int phy_freq, unsigned int vclk_freq);
void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
- unsigned int vclk_freq, unsigned int venc_freq,
- unsigned int dac_freq, bool hdmi_use_enci);
+ unsigned int phy_freq, unsigned int vclk_freq,
+ unsigned int venc_freq, unsigned int dac_freq,
+ bool hdmi_use_enci);
#endif /* __MESON_VCLK_H */
diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c
index 4efd7864d5bf..f93c725b6f02 100644
--- a/drivers/gpu/drm/meson/meson_venc.c
+++ b/drivers/gpu/drm/meson/meson_venc.c
@@ -946,7 +946,9 @@ bool meson_venc_hdmi_venc_repeat(int vic)
EXPORT_SYMBOL_GPL(meson_venc_hdmi_venc_repeat);
void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
- struct drm_display_mode *mode)
+ unsigned int ycrcb_map,
+ bool yuv420_mode,
+ const struct drm_display_mode *mode)
{
union meson_hdmi_venc_mode *vmode = NULL;
union meson_hdmi_venc_mode vmode_dmt;
@@ -1528,14 +1530,14 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
reg |= VPU_HDMI_INV_VSYNC;
- /* Output data format: CbYCr */
- reg |= VPU_HDMI_OUTPUT_CBYCR;
+ /* Output data format */
+ reg |= ycrcb_map;
/*
* Write rate to the async FIFO between VENC and HDMI.
* One write every 2 wr_clk.
*/
- if (venc_repeat)
+ if (venc_repeat || yuv420_mode)
reg |= VPU_HDMI_WR_RATE(2);
/*
diff --git a/drivers/gpu/drm/meson/meson_venc.h b/drivers/gpu/drm/meson/meson_venc.h
index 576768bdd08d..9138255ffc9e 100644
--- a/drivers/gpu/drm/meson/meson_venc.h
+++ b/drivers/gpu/drm/meson/meson_venc.h
@@ -60,7 +60,9 @@ extern struct meson_cvbs_enci_mode meson_cvbs_enci_ntsc;
void meson_venci_cvbs_mode_set(struct meson_drm *priv,
struct meson_cvbs_enci_mode *mode);
void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
- struct drm_display_mode *mode);
+ unsigned int ycrcb_map,
+ bool yuv420_mode,
+ const struct drm_display_mode *mode);
unsigned int meson_venci_get_field(struct meson_drm *priv);
void meson_venc_enable_vsync(struct meson_drm *priv);
diff --git a/drivers/gpu/drm/meson/meson_venc_cvbs.c b/drivers/gpu/drm/meson/meson_venc_cvbs.c
index 1bd6b6d15ffb..541f9eb2a135 100644
--- a/drivers/gpu/drm/meson/meson_venc_cvbs.c
+++ b/drivers/gpu/drm/meson/meson_venc_cvbs.c
@@ -213,8 +213,10 @@ static void meson_venc_cvbs_encoder_mode_set(struct drm_encoder *encoder,
meson_venci_cvbs_mode_set(priv, meson_mode->enci);
/* Setup 27MHz vclk2 for ENCI and VDAC */
- meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS, MESON_VCLK_CVBS,
- MESON_VCLK_CVBS, MESON_VCLK_CVBS, true);
+ meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS,
+ MESON_VCLK_CVBS, MESON_VCLK_CVBS,
+ MESON_VCLK_CVBS, MESON_VCLK_CVBS,
+ true);
}
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index aa32aad222c2..9691252d6233 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -95,7 +95,6 @@
#define MATROX_DPMS_CLEARED (-1)
#define to_mga_crtc(x) container_of(x, struct mga_crtc, base)
-#define to_mga_encoder(x) container_of(x, struct mga_encoder, base)
#define to_mga_connector(x) container_of(x, struct mga_connector, base)
struct mga_crtc {
@@ -110,12 +109,6 @@ struct mga_mode_info {
struct mga_crtc *crtc;
};
-struct mga_encoder {
- struct drm_encoder base;
- int last_dpms;
-};
-
-
struct mga_i2c_chan {
struct i2c_adapter adapter;
struct drm_device *dev;
@@ -185,6 +178,8 @@ struct mga_device {
/* SE model number stored in reg 0x1e24 */
u32 unique_rev_id;
+
+ struct drm_encoder encoder;
};
static inline enum mga_type
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 62a8e9ccb16d..d90e83959fca 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -15,6 +15,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "mgag200_drv.h"
@@ -1449,76 +1450,6 @@ static void mga_crtc_init(struct mga_device *mdev)
drm_crtc_helper_add(&mga_crtc->base, &mga_helper_funcs);
}
-/*
- * The encoder comes after the CRTC in the output pipeline, but before
- * the connector. It's responsible for ensuring that the digital
- * stream is appropriately converted into the output format. Setup is
- * very simple in this case - all we have to do is inform qemu of the
- * colour depth in order to ensure that it displays appropriately
- */
-
-/*
- * These functions are analagous to those in the CRTC code, but are intended
- * to handle any encoder-specific limitations
- */
-static void mga_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
-
-}
-
-static void mga_encoder_dpms(struct drm_encoder *encoder, int state)
-{
- return;
-}
-
-static void mga_encoder_prepare(struct drm_encoder *encoder)
-{
-}
-
-static void mga_encoder_commit(struct drm_encoder *encoder)
-{
-}
-
-static void mga_encoder_destroy(struct drm_encoder *encoder)
-{
- struct mga_encoder *mga_encoder = to_mga_encoder(encoder);
- drm_encoder_cleanup(encoder);
- kfree(mga_encoder);
-}
-
-static const struct drm_encoder_helper_funcs mga_encoder_helper_funcs = {
- .dpms = mga_encoder_dpms,
- .mode_set = mga_encoder_mode_set,
- .prepare = mga_encoder_prepare,
- .commit = mga_encoder_commit,
-};
-
-static const struct drm_encoder_funcs mga_encoder_encoder_funcs = {
- .destroy = mga_encoder_destroy,
-};
-
-static struct drm_encoder *mga_encoder_init(struct drm_device *dev)
-{
- struct drm_encoder *encoder;
- struct mga_encoder *mga_encoder;
-
- mga_encoder = kzalloc(sizeof(struct mga_encoder), GFP_KERNEL);
- if (!mga_encoder)
- return NULL;
-
- encoder = &mga_encoder->base;
- encoder->possible_crtcs = 0x1;
-
- drm_encoder_init(dev, encoder, &mga_encoder_encoder_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
- drm_encoder_helper_add(encoder, &mga_encoder_helper_funcs);
-
- return encoder;
-}
-
-
static int mga_vga_get_modes(struct drm_connector *connector)
{
struct mga_connector *mga_connector = to_mga_connector(connector);
@@ -1686,8 +1617,9 @@ static struct drm_connector *mga_vga_init(struct drm_device *dev)
int mgag200_modeset_init(struct mga_device *mdev)
{
- struct drm_encoder *encoder;
+ struct drm_encoder *encoder = &mdev->encoder;
struct drm_connector *connector;
+ int ret;
mdev->mode_info.mode_config_initialized = true;
@@ -1698,11 +1630,15 @@ int mgag200_modeset_init(struct mga_device *mdev)
mga_crtc_init(mdev);
- encoder = mga_encoder_init(mdev->dev);
- if (!encoder) {
- DRM_ERROR("mga_encoder_init failed\n");
- return -1;
+ ret = drm_simple_encoder_init(mdev->dev, encoder,
+ DRM_MODE_ENCODER_DAC);
+ if (ret) {
+ drm_err(mdev->dev,
+ "drm_simple_encoder_init() failed, error %d\n",
+ ret);
+ return ret;
}
+ encoder->possible_crtcs = 0x1;
connector = mga_vga_init(mdev->dev);
if (!connector) {
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 983afeaee737..748cd379065f 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -796,12 +796,41 @@ bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
return true;
}
+#define GBIF_CLIENT_HALT_MASK BIT(0)
+#define GBIF_ARB_HALT_MASK BIT(1)
+
+static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
+{
+ struct msm_gpu *gpu = &adreno_gpu->base;
+
+ if (!a6xx_has_gbif(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
+ spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) &
+ 0xf) == 0xf);
+ gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
+
+ return;
+ }
+
+ /* Halt new client requests on GBIF */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
+ spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
+ (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK);
+
+ /* Halt all AXI requests on GBIF */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK);
+ spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
+ (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK);
+
+ /* The GBIF halt needs to be explicitly cleared */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
+}
+
/* Gracefully try to shut down the GMU and by extension the GPU */
static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
{
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
- struct msm_gpu *gpu = &adreno_gpu->base;
u32 val;
/*
@@ -819,11 +848,7 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
return;
}
- /* Clear the VBIF pipe before shutting down */
- gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
- spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & 0xf)
- == 0xf);
- gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
+ a6xx_bus_clear_pending_transactions(adreno_gpu);
/* tell the GMU we want to slumber */
a6xx_gmu_notify_slumber(gmu);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index daf07800cde0..68af24150de5 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -378,18 +378,6 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
int ret;
- /*
- * During a previous slumber, GBIF halt is asserted to ensure
- * no further transaction can go through GPU before GPU
- * headswitch is turned off.
- *
- * This halt is deasserted once headswitch goes off but
- * incase headswitch doesn't goes off clear GBIF halt
- * here to ensure GPU wake-up doesn't fail because of
- * halted GPU transactions.
- */
- gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
-
/* Make sure the GMU keeps the GPU on while we set it up */
a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
@@ -470,10 +458,12 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
/* Select CP0 to always count cycles */
gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT);
- gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, 2 << 1);
- gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, 2 << 1);
- gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, 2 << 1);
- gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, 2 << 21);
+ if (adreno_is_a630(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, 2 << 1);
+ gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, 2 << 1);
+ gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, 2 << 1);
+ gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, 2 << 21);
+ }
/* Enable fault detection */
gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL,
@@ -748,39 +738,6 @@ static const u32 a6xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A6XX_CP_RB_CNTL),
};
-#define GBIF_CLIENT_HALT_MASK BIT(0)
-#define GBIF_ARB_HALT_MASK BIT(1)
-
-static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
-{
- struct msm_gpu *gpu = &adreno_gpu->base;
-
- if(!a6xx_has_gbif(adreno_gpu)){
- gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
- spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) &
- 0xf) == 0xf);
- gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
-
- return;
- }
-
- /* Halt new client requests on GBIF */
- gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
- spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
- (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK);
-
- /* Halt all AXI requests on GBIF */
- gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK);
- spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
- (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK);
-
- /*
- * GMU needs DDR access in slumber path. Deassert GBIF halt now
- * to allow for GMU to access system memory.
- */
- gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
-}
-
static int a6xx_pm_resume(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -805,16 +762,6 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
devfreq_suspend_device(gpu->devfreq.devfreq);
- /*
- * Make sure the GMU is idle before continuing (because some transitions
- * may use VBIF
- */
- a6xx_gmu_wait_for_idle(&a6xx_gpu->gmu);
-
- /* Clear the VBIF pipe before shutting down */
- /* FIXME: This accesses the GPU - do we need to make sure it is on? */
- a6xx_bus_clear_pending_transactions(adreno_gpu);
-
return a6xx_gmu_stop(a6xx_gpu);
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
index eda11abc5f01..e450e0b97211 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
@@ -7,6 +7,7 @@
#include "a6xx_gmu.h"
#include "a6xx_gmu.xml.h"
+#include "a6xx_gpu.h"
#define HFI_MSG_ID(val) [val] = #val
@@ -216,48 +217,82 @@ static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu)
NULL, 0);
}
-static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
+static void a618_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
{
- struct a6xx_hfi_msg_bw_table msg = { 0 };
+ /* Send a single "off" entry since the 618 GMU doesn't do bus scaling */
+ msg->bw_level_num = 1;
+
+ msg->ddr_cmds_num = 3;
+ msg->ddr_wait_bitmask = 0x01;
+
+ msg->ddr_cmds_addrs[0] = 0x50000;
+ msg->ddr_cmds_addrs[1] = 0x5003c;
+ msg->ddr_cmds_addrs[2] = 0x5000c;
+
+ msg->ddr_cmds_data[0][0] = 0x40000000;
+ msg->ddr_cmds_data[0][1] = 0x40000000;
+ msg->ddr_cmds_data[0][2] = 0x40000000;
/*
- * The sdm845 GMU doesn't do bus frequency scaling on its own but it
- * does need at least one entry in the list because it might be accessed
- * when the GMU is shutting down. Send a single "off" entry.
+ * These are the CX (CNOC) votes - these are used by the GMU but the
+ * votes are known and fixed for the target
*/
+ msg->cnoc_cmds_num = 1;
+ msg->cnoc_wait_bitmask = 0x01;
+
+ msg->cnoc_cmds_addrs[0] = 0x5007c;
+ msg->cnoc_cmds_data[0][0] = 0x40000000;
+ msg->cnoc_cmds_data[1][0] = 0x60000001;
+}
- msg.bw_level_num = 1;
+static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
+{
+ /* Send a single "off" entry since the 630 GMU doesn't do bus scaling */
+ msg->bw_level_num = 1;
- msg.ddr_cmds_num = 3;
- msg.ddr_wait_bitmask = 0x07;
+ msg->ddr_cmds_num = 3;
+ msg->ddr_wait_bitmask = 0x07;
- msg.ddr_cmds_addrs[0] = 0x50000;
- msg.ddr_cmds_addrs[1] = 0x5005c;
- msg.ddr_cmds_addrs[2] = 0x5000c;
+ msg->ddr_cmds_addrs[0] = 0x50000;
+ msg->ddr_cmds_addrs[1] = 0x5005c;
+ msg->ddr_cmds_addrs[2] = 0x5000c;
- msg.ddr_cmds_data[0][0] = 0x40000000;
- msg.ddr_cmds_data[0][1] = 0x40000000;
- msg.ddr_cmds_data[0][2] = 0x40000000;
+ msg->ddr_cmds_data[0][0] = 0x40000000;
+ msg->ddr_cmds_data[0][1] = 0x40000000;
+ msg->ddr_cmds_data[0][2] = 0x40000000;
/*
* These are the CX (CNOC) votes. This is used but the values for the
* sdm845 GMU are known and fixed so we can hard code them.
*/
- msg.cnoc_cmds_num = 3;
- msg.cnoc_wait_bitmask = 0x05;
+ msg->cnoc_cmds_num = 3;
+ msg->cnoc_wait_bitmask = 0x05;
- msg.cnoc_cmds_addrs[0] = 0x50034;
- msg.cnoc_cmds_addrs[1] = 0x5007c;
- msg.cnoc_cmds_addrs[2] = 0x5004c;
+ msg->cnoc_cmds_addrs[0] = 0x50034;
+ msg->cnoc_cmds_addrs[1] = 0x5007c;
+ msg->cnoc_cmds_addrs[2] = 0x5004c;
- msg.cnoc_cmds_data[0][0] = 0x40000000;
- msg.cnoc_cmds_data[0][1] = 0x00000000;
- msg.cnoc_cmds_data[0][2] = 0x40000000;
+ msg->cnoc_cmds_data[0][0] = 0x40000000;
+ msg->cnoc_cmds_data[0][1] = 0x00000000;
+ msg->cnoc_cmds_data[0][2] = 0x40000000;
+
+ msg->cnoc_cmds_data[1][0] = 0x60000001;
+ msg->cnoc_cmds_data[1][1] = 0x20000001;
+ msg->cnoc_cmds_data[1][2] = 0x60000001;
+}
+
+
+static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_msg_bw_table msg = { 0 };
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
- msg.cnoc_cmds_data[1][0] = 0x60000001;
- msg.cnoc_cmds_data[1][1] = 0x20000001;
- msg.cnoc_cmds_data[1][2] = 0x60000001;
+ if (adreno_is_a618(adreno_gpu))
+ a618_build_bw_table(&msg);
+ else
+ a6xx_build_bw_table(&msg);
return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, &msg, sizeof(msg),
NULL, 0);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index bf513411b243..17448505a9b5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -1272,6 +1272,8 @@ static const struct drm_crtc_funcs dpu_crtc_funcs = {
.atomic_destroy_state = dpu_crtc_destroy_state,
.late_register = dpu_crtc_late_register,
.early_unregister = dpu_crtc_early_unregister,
+ .enable_vblank = msm_crtc_enable_vblank,
+ .disable_vblank = msm_crtc_disable_vblank,
};
static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index f8ac3bf60fd6..58d3400668f5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -512,7 +512,6 @@ static void _dpu_encoder_adjust_mode(struct drm_connector *connector,
if (cur_mode->vdisplay == adj_mode->vdisplay &&
cur_mode->hdisplay == adj_mode->hdisplay &&
drm_mode_vrefresh(cur_mode) == drm_mode_vrefresh(adj_mode)) {
- adj_mode->private = cur_mode->private;
adj_mode->private_flags |= cur_mode->private_flags;
}
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
index 528632690f1e..a05282dede91 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
@@ -255,13 +255,13 @@ static const struct dpu_format dpu_format_map[] = {
INTERLEAVED_RGB_FMT(RGB565,
0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
false, 2, 0,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGR565,
0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
false, 2, 0,
DPU_FETCH_LINEAR, 1),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
index 29705e773a4b..80d3cfc14007 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
@@ -12,6 +12,7 @@
#define to_dpu_mdss(x) container_of(x, struct dpu_mdss, base)
+#define HW_REV 0x0
#define HW_INTR_STATUS 0x0010
/* Max BW defined in KBps */
@@ -22,6 +23,17 @@ struct dpu_irq_controller {
struct irq_domain *domain;
};
+struct dpu_hw_cfg {
+ u32 val;
+ u32 offset;
+};
+
+struct dpu_mdss_hw_init_handler {
+ u32 hw_rev;
+ u32 hw_reg_count;
+ struct dpu_hw_cfg* hw_cfg;
+};
+
struct dpu_mdss {
struct msm_mdss base;
void __iomem *mmio;
@@ -32,6 +44,44 @@ struct dpu_mdss {
u32 num_paths;
};
+static struct dpu_hw_cfg hw_cfg[] = {
+ {
+ /* UBWC global settings */
+ .val = 0x1E,
+ .offset = 0x144,
+ }
+};
+
+static struct dpu_mdss_hw_init_handler cfg_handler[] = {
+ { .hw_rev = DPU_HW_VER_620,
+ .hw_reg_count = ARRAY_SIZE(hw_cfg),
+ .hw_cfg = hw_cfg
+ },
+};
+
+static void dpu_mdss_hw_init(struct dpu_mdss *dpu_mdss, u32 hw_rev)
+{
+ int i;
+ u32 count = 0;
+ struct dpu_hw_cfg *hw_cfg = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(cfg_handler); i++) {
+ if (cfg_handler[i].hw_rev == hw_rev) {
+ hw_cfg = cfg_handler[i].hw_cfg;
+ count = cfg_handler[i].hw_reg_count;
+ break;
+ }
+ }
+
+ for (i = 0; i < count; i++ ) {
+ writel_relaxed(hw_cfg->val,
+ dpu_mdss->mmio + hw_cfg->offset);
+ hw_cfg++;
+ }
+
+ return;
+}
+
static int dpu_mdss_parse_data_bus_icc_path(struct drm_device *dev,
struct dpu_mdss *dpu_mdss)
{
@@ -174,12 +224,18 @@ static int dpu_mdss_enable(struct msm_mdss *mdss)
struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
struct dss_module_power *mp = &dpu_mdss->mp;
int ret;
+ u32 mdss_rev;
dpu_mdss_icc_request_bw(mdss);
ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
- if (ret)
+ if (ret) {
DPU_ERROR("clock enable failed, ret:%d\n", ret);
+ return ret;
+ }
+
+ mdss_rev = readl_relaxed(dpu_mdss->mmio + HW_REV);
+ dpu_mdss_hw_init(dpu_mdss, mdss_rev);
return ret;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
index f34dca5d4532..c9239b07fe4f 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
@@ -481,6 +481,8 @@ static const struct drm_crtc_funcs mdp4_crtc_funcs = {
.reset = drm_atomic_helper_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .enable_vblank = msm_crtc_enable_vblank,
+ .disable_vblank = msm_crtc_disable_vblank,
};
static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = {
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index 05cc04f729d6..998bef1190a3 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -405,6 +405,83 @@ static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
}
+static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_encoder *encoder;
+
+ drm_for_each_encoder(encoder, dev)
+ if (encoder->crtc == crtc)
+ return encoder;
+
+ return NULL;
+}
+
+static bool mdp5_crtc_get_scanout_position(struct drm_crtc *crtc,
+ bool in_vblank_irq,
+ int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
+{
+ unsigned int pipe = crtc->index;
+ struct drm_encoder *encoder;
+ int line, vsw, vbp, vactive_start, vactive_end, vfp_end;
+
+
+ encoder = get_encoder_from_crtc(crtc);
+ if (!encoder) {
+ DRM_ERROR("no encoder found for crtc %d\n", pipe);
+ return false;
+ }
+
+ vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
+
+ /*
+ * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at
+ * the end of VFP. Translate the porch values relative to the line
+ * counter positions.
+ */
+
+ vactive_start = vsw + vbp + 1;
+
+ vactive_end = vactive_start + mode->crtc_vdisplay;
+
+ /* last scan line before VSYNC */
+ vfp_end = mode->crtc_vtotal;
+
+ if (stime)
+ *stime = ktime_get();
+
+ line = mdp5_encoder_get_linecount(encoder);
+
+ if (line < vactive_start)
+ line -= vactive_start;
+ else if (line > vactive_end)
+ line = line - vfp_end - vactive_start;
+ else
+ line -= vactive_start;
+
+ *vpos = line;
+ *hpos = 0;
+
+ if (etime)
+ *etime = ktime_get();
+
+ return true;
+}
+
+static u32 mdp5_crtc_get_vblank_counter(struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+
+ encoder = get_encoder_from_crtc(crtc);
+ if (!encoder)
+ return 0;
+
+ return mdp5_encoder_get_framecount(encoder);
+}
+
static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
@@ -1054,6 +1131,10 @@ static const struct drm_crtc_funcs mdp5_crtc_funcs = {
.cursor_set = mdp5_crtc_cursor_set,
.cursor_move = mdp5_crtc_cursor_move,
.atomic_print_state = mdp5_crtc_atomic_print_state,
+ .get_vblank_counter = mdp5_crtc_get_vblank_counter,
+ .enable_vblank = msm_crtc_enable_vblank,
+ .disable_vblank = msm_crtc_disable_vblank,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
@@ -1063,6 +1144,7 @@ static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
.atomic_flush = mdp5_crtc_atomic_flush,
.atomic_enable = mdp5_crtc_atomic_enable,
.atomic_disable = mdp5_crtc_atomic_disable,
+ .get_scanout_position = mdp5_crtc_get_scanout_position,
};
static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
@@ -1109,8 +1191,8 @@ static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion,
msecs_to_jiffies(50));
if (ret == 0)
- dev_warn(dev->dev, "pp done time out, lm=%d\n",
- mdp5_cstate->pipeline.mixer->lm);
+ dev_warn_ratelimited(dev->dev, "pp done time out, lm=%d\n",
+ mdp5_cstate->pipeline.mixer->lm);
}
static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index e43ecd4be10a..6650f478b226 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -583,98 +583,6 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp,
return 0;
}
-static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_encoder *encoder;
-
- drm_for_each_encoder(encoder, dev)
- if (encoder->crtc == crtc)
- return encoder;
-
- return NULL;
-}
-
-static bool mdp5_get_scanoutpos(struct drm_device *dev, unsigned int pipe,
- bool in_vblank_irq, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode)
-{
- struct msm_drm_private *priv = dev->dev_private;
- struct drm_crtc *crtc;
- struct drm_encoder *encoder;
- int line, vsw, vbp, vactive_start, vactive_end, vfp_end;
-
- crtc = priv->crtcs[pipe];
- if (!crtc) {
- DRM_ERROR("Invalid crtc %d\n", pipe);
- return false;
- }
-
- encoder = get_encoder_from_crtc(crtc);
- if (!encoder) {
- DRM_ERROR("no encoder found for crtc %d\n", pipe);
- return false;
- }
-
- vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
- vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
-
- /*
- * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at
- * the end of VFP. Translate the porch values relative to the line
- * counter positions.
- */
-
- vactive_start = vsw + vbp + 1;
-
- vactive_end = vactive_start + mode->crtc_vdisplay;
-
- /* last scan line before VSYNC */
- vfp_end = mode->crtc_vtotal;
-
- if (stime)
- *stime = ktime_get();
-
- line = mdp5_encoder_get_linecount(encoder);
-
- if (line < vactive_start) {
- line -= vactive_start;
- } else if (line > vactive_end) {
- line = line - vfp_end - vactive_start;
- } else {
- line -= vactive_start;
- }
-
- *vpos = line;
- *hpos = 0;
-
- if (etime)
- *etime = ktime_get();
-
- return true;
-}
-
-static u32 mdp5_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
-{
- struct msm_drm_private *priv = dev->dev_private;
- struct drm_crtc *crtc;
- struct drm_encoder *encoder;
-
- if (pipe >= priv->num_crtcs)
- return 0;
-
- crtc = priv->crtcs[pipe];
- if (!crtc)
- return 0;
-
- encoder = get_encoder_from_crtc(crtc);
- if (!encoder)
- return 0;
-
- return mdp5_encoder_get_framecount(encoder);
-}
-
struct msm_kms *mdp5_kms_init(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
@@ -762,9 +670,6 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
dev->mode_config.max_width = 0xffff;
dev->mode_config.max_height = 0xffff;
- dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
- dev->driver->get_scanout_position = mdp5_get_scanoutpos;
- dev->driver->get_vblank_counter = mdp5_get_vblank_counter;
dev->max_vblank_count = 0; /* max_vblank_count is set on each CRTC */
dev->vblank_disable_immediate = true;
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 104115d112eb..4b363bd7ddff 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -336,7 +336,7 @@ static int dsi_mgr_connector_get_modes(struct drm_connector *connector)
return num;
}
-static int dsi_mgr_connector_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status dsi_mgr_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
int id = dsi_mgr_connector_get_id(connector);
@@ -506,6 +506,7 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
struct mipi_dsi_host *host = msm_dsi->host;
struct drm_panel *panel = msm_dsi->panel;
+ struct msm_dsi_pll *src_pll;
bool is_dual_dsi = IS_DUAL_DSI();
int ret;
@@ -539,6 +540,10 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
id, ret);
}
+ /* Save PLL status if it is a clock source */
+ src_pll = msm_dsi_phy_get_pll(msm_dsi->phy);
+ msm_dsi_pll_save_state(src_pll);
+
ret = msm_dsi_host_power_off(host);
if (ret)
pr_err("%s: host %d power off failed,%d\n", __func__, id, ret);
@@ -684,7 +689,7 @@ struct drm_bridge *msm_dsi_manager_bridge_init(u8 id)
bridge = &dsi_bridge->base;
bridge->funcs = &dsi_mgr_bridge_funcs;
- ret = drm_bridge_attach(encoder, bridge, NULL);
+ ret = drm_bridge_attach(encoder, bridge, NULL, 0);
if (ret)
goto fail;
@@ -713,7 +718,7 @@ struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id)
encoder = msm_dsi->encoder;
/* link the internal dsi bridge to the external bridge */
- drm_bridge_attach(encoder, ext_bridge, int_bridge);
+ drm_bridge_attach(encoder, ext_bridge, int_bridge, 0);
/*
* we need the drm_connector created by the external bridge
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index b0cfa67d2a57..f509ebd77500 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -724,10 +724,6 @@ void msm_dsi_phy_disable(struct msm_dsi_phy *phy)
if (!phy || !phy->cfg->ops.disable)
return;
- /* Save PLL status if it is a clock source */
- if (phy->usecase != MSM_DSI_PHY_SLAVE)
- msm_dsi_pll_save_state(phy->pll);
-
phy->cfg->ops.disable(phy);
dsi_phy_regulator_disable(phy);
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
index 8f6100db90ed..6ac04fc303f5 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
@@ -411,6 +411,12 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
if (pll_10nm->slave)
dsi_pll_enable_pll_bias(pll_10nm->slave);
+ rc = dsi_pll_10nm_vco_set_rate(hw,pll_10nm->vco_current_rate, 0);
+ if (rc) {
+ pr_err("vco_set_rate failed, rc=%d\n", rc);
+ return rc;
+ }
+
/* Start PLL */
pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_PLL_CNTRL,
0x01);
@@ -751,9 +757,9 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->id);
hw = clk_hw_register_mux(dev, clk_name,
- (const char *[]){
+ ((const char *[]){
parent, parent2, parent3, parent4
- }, 4, 0, pll_10nm->phy_cmn_mmio +
+ }), 4, 0, pll_10nm->phy_cmn_mmio +
REG_DSI_10nm_PHY_CMN_CLK_CFG1,
0, 2, 0, NULL);
if (IS_ERR(hw)) {
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
index 8c99e01ae332..6dffd7f4a99b 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
@@ -554,9 +554,9 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->id);
snprintf(parent2, 32, "dsi%dindirect_path_div2_clk", pll_28nm->id);
clks[num++] = clk_register_mux(dev, clk_name,
- (const char *[]){
+ ((const char *[]){
parent1, parent2
- }, 2, CLK_SET_RATE_PARENT, pll_28nm->mmio +
+ }), 2, CLK_SET_RATE_PARENT, pll_28nm->mmio +
REG_DSI_28nm_PHY_PLL_VREG_CFG, 1, 1, 0, NULL);
snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->id);
diff --git a/drivers/gpu/drm/msm/edp/edp.c b/drivers/gpu/drm/msm/edp/edp.c
index ad4e963ccd9b..a78d6077802b 100644
--- a/drivers/gpu/drm/msm/edp/edp.c
+++ b/drivers/gpu/drm/msm/edp/edp.c
@@ -178,7 +178,7 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
goto fail;
}
- ret = drm_bridge_attach(encoder, edp->bridge, NULL);
+ ret = drm_bridge_attach(encoder, edp->bridge, NULL, 0);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/edp/edp_bridge.c b/drivers/gpu/drm/msm/edp/edp_bridge.c
index b65b5cc2dba2..c69a37e0c708 100644
--- a/drivers/gpu/drm/msm/edp/edp_bridge.c
+++ b/drivers/gpu/drm/msm/edp/edp_bridge.c
@@ -97,7 +97,7 @@ struct drm_bridge *msm_edp_bridge_init(struct msm_edp *edp)
bridge = &edp_bridge->base;
bridge->funcs = &edp_bridge_funcs;
- ret = drm_bridge_attach(edp->encoder, bridge, NULL);
+ ret = drm_bridge_attach(edp->encoder, bridge, NULL, 0);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 1a9b6289637d..3a8646535c14 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -327,7 +327,7 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
goto fail;
}
- ret = drm_bridge_attach(encoder, hdmi->bridge, NULL);
+ ret = drm_bridge_attach(encoder, hdmi->bridge, NULL, 0);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index ba81338a9bf8..6e380db9287b 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -287,7 +287,7 @@ struct drm_bridge *msm_hdmi_bridge_init(struct hdmi *hdmi)
bridge = &hdmi_bridge->base;
bridge->funcs = &msm_hdmi_bridge_funcs;
- ret = drm_bridge_attach(hdmi->encoder, bridge, NULL);
+ ret = drm_bridge_attach(hdmi->encoder, bridge, NULL, 0);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index f50fefb87040..2a82c23a6e4d 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -138,7 +138,7 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
size = resource_size(res);
- ptr = devm_ioremap_nocache(&pdev->dev, res->start, size);
+ ptr = devm_ioremap(&pdev->dev, res->start, size);
if (!ptr) {
DRM_DEV_ERROR(&pdev->dev, "failed to ioremap: %s\n", name);
return ERR_PTR(-ENOMEM);
@@ -441,6 +441,14 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
if (ret)
goto err_msm_uninit;
+ if (!dev->dma_parms) {
+ dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
+ GFP_KERNEL);
+ if (!dev->dma_parms)
+ return -ENOMEM;
+ }
+ dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+
msm_gem_shrinker_init(ddev);
switch (get_mdp_ver(pdev)) {
@@ -660,8 +668,10 @@ static void msm_irq_uninstall(struct drm_device *dev)
kms->funcs->irq_uninstall(kms);
}
-static int msm_enable_vblank(struct drm_device *dev, unsigned int pipe)
+int msm_crtc_enable_vblank(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
if (!kms)
@@ -670,8 +680,10 @@ static int msm_enable_vblank(struct drm_device *dev, unsigned int pipe)
return vblank_ctrl_queue_work(priv, pipe, true);
}
-static void msm_disable_vblank(struct drm_device *dev, unsigned int pipe)
+void msm_crtc_disable_vblank(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
if (!kms)
@@ -996,8 +1008,6 @@ static struct drm_driver msm_driver = {
.irq_preinstall = msm_irq_preinstall,
.irq_postinstall = msm_irq_postinstall,
.irq_uninstall = msm_irq_uninstall,
- .enable_vblank = msm_enable_vblank,
- .disable_vblank = msm_disable_vblank,
.gem_free_object_unlocked = msm_gem_free_object,
.gem_vm_ops = &vm_ops,
.dumb_create = msm_gem_dumb_create,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 71547e756e29..194d900a460e 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -232,6 +232,9 @@ struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);
void msm_atomic_state_clear(struct drm_atomic_state *state);
void msm_atomic_state_free(struct drm_atomic_state *state);
+int msm_crtc_enable_vblank(struct drm_crtc *crtc);
+void msm_crtc_disable_vblank(struct drm_crtc *crtc);
+
int msm_gem_init_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, int npages);
void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
@@ -454,8 +457,7 @@ static inline unsigned long timeout_to_jiffies(const ktime_t *timeout)
remaining_jiffies = 0;
} else {
ktime_t rem = ktime_sub(*timeout, now);
- struct timespec ts = ktime_to_timespec(rem);
- remaining_jiffies = timespec_to_jiffies(&ts);
+ remaining_jiffies = ktime_divns(rem, NSEC_PER_SEC / HZ);
}
return remaining_jiffies;
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index db48867df47d..47235f8c5922 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -160,16 +160,12 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
drm_fb_helper_prepare(dev, helper, &msm_fb_helper_funcs);
- ret = drm_fb_helper_init(dev, helper, priv->num_connectors);
+ ret = drm_fb_helper_init(dev, helper);
if (ret) {
DRM_DEV_ERROR(dev->dev, "could not init fbdev: ret=%d\n", ret);
goto fail;
}
- ret = drm_fb_helper_single_add_all_connectors(helper);
- if (ret)
- goto fini;
-
/* the fw fb could be anywhere in memory */
drm_fb_helper_remove_conflicting_framebuffers(NULL, "msm", false);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 37c50ea8f847..1f08de4241e0 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -1248,6 +1248,9 @@ static const struct drm_crtc_funcs nv04_crtc_funcs = {
.set_config = drm_crtc_helper_set_config,
.page_flip = nv04_crtc_page_flip,
.destroy = nv_crtc_destroy,
+ .enable_vblank = nouveau_display_vblank_enable,
+ .disable_vblank = nouveau_display_vblank_disable,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static const struct drm_crtc_helper_funcs nv04_crtc_helper_funcs = {
@@ -1258,6 +1261,7 @@ static const struct drm_crtc_helper_funcs nv04_crtc_helper_funcs = {
.mode_set_base = nv04_crtc_mode_set_base,
.mode_set_base_atomic = nv04_crtc_mode_set_base_atomic,
.disable = nv_crtc_disable,
+ .get_scanout_position = nouveau_display_scanoutpos,
};
static const uint32_t modeset_formats[] = {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index a3dc2ba19fb2..4d1c58468dbc 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -1256,30 +1256,6 @@ nv50_mstm_prepare(struct nv50_mstm *mstm)
}
}
-static void
-nv50_mstm_destroy_connector(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_connector *connector)
-{
- struct nouveau_drm *drm = nouveau_drm(connector->dev);
- struct nv50_mstc *mstc = nv50_mstc(connector);
-
- drm_connector_unregister(&mstc->connector);
-
- drm_fb_helper_remove_one_connector(&drm->fbcon->helper, &mstc->connector);
-
- drm_connector_put(&mstc->connector);
-}
-
-static void
-nv50_mstm_register_connector(struct drm_connector *connector)
-{
- struct nouveau_drm *drm = nouveau_drm(connector->dev);
-
- drm_fb_helper_add_one_connector(&drm->fbcon->helper, connector);
-
- drm_connector_register(connector);
-}
-
static struct drm_connector *
nv50_mstm_add_connector(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port, const char *path)
@@ -1298,8 +1274,6 @@ nv50_mstm_add_connector(struct drm_dp_mst_topology_mgr *mgr,
static const struct drm_dp_mst_topology_cbs
nv50_mstm = {
.add_connector = nv50_mstm_add_connector,
- .register_connector = nv50_mstm_register_connector,
- .destroy_connector = nv50_mstm_destroy_connector,
};
void
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index d9d64602947d..8f6455697ba7 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -29,6 +29,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_vblank.h>
#include "nouveau_connector.h"
void
nv50_head_flush_clr(struct nv50_head *head,
@@ -413,6 +414,7 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
static const struct drm_crtc_helper_funcs
nv50_head_help = {
.atomic_check = nv50_head_atomic_check,
+ .get_scanout_position = nouveau_display_scanoutpos,
};
static void
@@ -481,6 +483,9 @@ nv50_head_func = {
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = nv50_head_atomic_duplicate_state,
.atomic_destroy_state = nv50_head_atomic_destroy_state,
+ .enable_vblank = nouveau_display_vblank_enable,
+ .disable_vblank = nouveau_display_vblank_disable,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
struct nv50_head *
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 890315291b01..bb737f9281e6 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -458,6 +458,8 @@ nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
asyw->clr.ntfy = armw->ntfy.handle != 0;
asyw->clr.sema = armw->sema.handle != 0;
asyw->clr.xlut = armw->xlut.handle != 0;
+ if (asyw->clr.xlut && asyw->visible)
+ asyw->set.xlut = asyw->xlut.handle != 0;
asyw->clr.csc = armw->csc.valid;
if (wndw->func->image_clr)
asyw->clr.image = armw->image.handle[0] != 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 1b62ccc57aef..2b4b21b02e40 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -647,13 +647,6 @@ nouveau_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags)
}
static int
-nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
-{
- /* We'll do this from user space. */
- return 0;
-}
-
-static int
nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man)
{
@@ -1697,7 +1690,6 @@ struct ttm_bo_driver nouveau_bo_driver = {
.ttm_tt_create = &nouveau_ttm_tt_create,
.ttm_tt_populate = &nouveau_ttm_tt_populate,
.ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
- .invalidate_caches = nouveau_bo_invalidate_caches,
.init_mem_type = nouveau_bo_init_mem_type,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = nouveau_bo_evict_flags,
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 53f9bceaf17a..700817dc4fa0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -54,15 +54,10 @@ nouveau_display_vblank_handler(struct nvif_notify *notify)
}
int
-nouveau_display_vblank_enable(struct drm_device *dev, unsigned int pipe)
+nouveau_display_vblank_enable(struct drm_crtc *crtc)
{
- struct drm_crtc *crtc;
struct nouveau_crtc *nv_crtc;
- crtc = drm_crtc_from_index(dev, pipe);
- if (!crtc)
- return -EINVAL;
-
nv_crtc = nouveau_crtc(crtc);
nvif_notify_get(&nv_crtc->vblank);
@@ -70,15 +65,10 @@ nouveau_display_vblank_enable(struct drm_device *dev, unsigned int pipe)
}
void
-nouveau_display_vblank_disable(struct drm_device *dev, unsigned int pipe)
+nouveau_display_vblank_disable(struct drm_crtc *crtc)
{
- struct drm_crtc *crtc;
struct nouveau_crtc *nv_crtc;
- crtc = drm_crtc_from_index(dev, pipe);
- if (!crtc)
- return;
-
nv_crtc = nouveau_crtc(crtc);
nvif_notify_put(&nv_crtc->vblank);
}
@@ -136,21 +126,13 @@ nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos,
}
bool
-nouveau_display_scanoutpos(struct drm_device *dev, unsigned int pipe,
+nouveau_display_scanoutpos(struct drm_crtc *crtc,
bool in_vblank_irq, int *vpos, int *hpos,
ktime_t *stime, ktime_t *etime,
const struct drm_display_mode *mode)
{
- struct drm_crtc *crtc;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (nouveau_crtc(crtc)->index == pipe) {
- return nouveau_display_scanoutpos_head(crtc, vpos, hpos,
- stime, etime);
- }
- }
-
- return false;
+ return nouveau_display_scanoutpos_head(crtc, vpos, hpos,
+ stime, etime);
}
static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 6e8e66882e45..de004018ab5c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -61,11 +61,12 @@ int nouveau_display_init(struct drm_device *dev, bool resume, bool runtime);
void nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime);
int nouveau_display_suspend(struct drm_device *dev, bool runtime);
void nouveau_display_resume(struct drm_device *dev, bool runtime);
-int nouveau_display_vblank_enable(struct drm_device *, unsigned int);
-void nouveau_display_vblank_disable(struct drm_device *, unsigned int);
-bool nouveau_display_scanoutpos(struct drm_device *, unsigned int,
- bool, int *, int *, ktime_t *,
- ktime_t *, const struct drm_display_mode *);
+int nouveau_display_vblank_enable(struct drm_crtc *crtc);
+void nouveau_display_vblank_disable(struct drm_crtc *crtc);
+bool nouveau_display_scanoutpos(struct drm_crtc *crtc,
+ bool in_vblank_irq, int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode);
int nouveau_display_dumb_create(struct drm_file *, struct drm_device *,
struct drm_mode_create_dumb *args);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index b65ae817eabf..6b1629c14dd7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -1120,11 +1120,6 @@ driver_stub = {
.debugfs_init = nouveau_drm_debugfs_init,
#endif
- .enable_vblank = nouveau_display_vblank_enable,
- .disable_vblank = nouveau_display_vblank_disable,
- .get_scanout_position = nouveau_display_scanoutpos,
- .get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
-
.ioctls = nouveau_ioctls,
.num_ioctls = ARRAY_SIZE(nouveau_ioctls),
.fops = &nouveau_driver_fops,
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 0c5cdda3c336..24d543a01f43 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -558,14 +558,10 @@ nouveau_fbcon_init(struct drm_device *dev)
drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
- ret = drm_fb_helper_init(dev, &fbcon->helper, 4);
+ ret = drm_fb_helper_init(dev, &fbcon->helper);
if (ret)
goto free;
- ret = drm_fb_helper_single_add_all_connectors(&fbcon->helper);
- if (ret)
- goto fini;
-
if (preferred_bpp != 8 && preferred_bpp != 16 && preferred_bpp != 32) {
if (drm->client.device.info.ram_size <= 32 * 1024 * 1024)
preferred_bpp = 8;
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index d865d8aeac3c..c85dd8afa3c3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -72,7 +72,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
* locking inversion with the driver load path. And the access here is
* completely racy anyway. So don't bother with locking for now.
*/
- return dev->open_count == 0;
+ return atomic_read(&dev->open_count) == 0;
}
static const struct vga_switcheroo_client_ops
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index c7d700916eae..8ebbe1656008 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2579,6 +2579,7 @@ nv166_chipset = {
static const struct nvkm_device_chip
nv167_chipset = {
.name = "TU117",
+ .acr = tu102_acr_new,
.bar = tu102_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
@@ -2607,6 +2608,7 @@ nv167_chipset = {
.disp = tu102_disp_new,
.dma = gv100_dma_new,
.fifo = tu102_fifo_new,
+ .gr = tu102_gr_new,
.nvdec[0] = gm107_nvdec_new,
.nvenc[0] = gm107_nvenc_new,
.sec2 = tu102_sec2_new,
@@ -2615,6 +2617,7 @@ nv167_chipset = {
static const struct nvkm_device_chip
nv168_chipset = {
.name = "TU116",
+ .acr = tu102_acr_new,
.bar = tu102_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
@@ -2643,6 +2646,7 @@ nv168_chipset = {
.disp = tu102_disp_new,
.dma = gv100_dma_new,
.fifo = tu102_fifo_new,
+ .gr = tu102_gr_new,
.nvdec[0] = gm107_nvdec_new,
.nvenc[0] = gm107_nvenc_new,
.sec2 = tu102_sec2_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
index 454668b1cf54..a9efa4d78be9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
@@ -164,6 +164,32 @@ MODULE_FIRMWARE("nvidia/tu106/gr/sw_nonctx.bin");
MODULE_FIRMWARE("nvidia/tu106/gr/sw_bundle_init.bin");
MODULE_FIRMWARE("nvidia/tu106/gr/sw_method_init.bin");
+MODULE_FIRMWARE("nvidia/tu117/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/tu117/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/tu117/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/tu117/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/tu117/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/tu117/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/tu117/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/tu117/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/tu117/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/tu117/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/tu117/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/tu117/gr/sw_method_init.bin");
+
+MODULE_FIRMWARE("nvidia/tu116/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/tu116/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/tu116/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/tu116/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/tu116/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/tu116/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/tu116/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/tu116/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/tu116/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/tu116/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/tu116/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/tu116/gr/sw_method_init.bin");
+
static const struct gf100_gr_fwif
tu102_gr_fwif[] = {
{ 0, gm200_gr_load, &tu102_gr, &gp108_gr_fecs_acr, &gp108_gr_gpccs_acr },
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c
index 7f4b89d82d32..d28d8f36ae24 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c
@@ -107,6 +107,12 @@ MODULE_FIRMWARE("nvidia/tu104/acr/ucode_unload.bin");
MODULE_FIRMWARE("nvidia/tu106/acr/unload_bl.bin");
MODULE_FIRMWARE("nvidia/tu106/acr/ucode_unload.bin");
+MODULE_FIRMWARE("nvidia/tu116/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/tu116/acr/ucode_unload.bin");
+
+MODULE_FIRMWARE("nvidia/tu117/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/tu117/acr/ucode_unload.bin");
+
static const struct nvkm_acr_hsf_fwif
tu102_acr_unload_fwif[] = {
{ 0, nvkm_acr_hsfw_load, &gp108_acr_unload_0 },
@@ -130,6 +136,8 @@ tu102_acr_asb_0 = {
MODULE_FIRMWARE("nvidia/tu102/acr/ucode_asb.bin");
MODULE_FIRMWARE("nvidia/tu104/acr/ucode_asb.bin");
MODULE_FIRMWARE("nvidia/tu106/acr/ucode_asb.bin");
+MODULE_FIRMWARE("nvidia/tu116/acr/ucode_asb.bin");
+MODULE_FIRMWARE("nvidia/tu117/acr/ucode_asb.bin");
static const struct nvkm_acr_hsf_fwif
tu102_acr_asb_fwif[] = {
@@ -154,6 +162,12 @@ MODULE_FIRMWARE("nvidia/tu104/acr/ucode_ahesasc.bin");
MODULE_FIRMWARE("nvidia/tu106/acr/bl.bin");
MODULE_FIRMWARE("nvidia/tu106/acr/ucode_ahesasc.bin");
+MODULE_FIRMWARE("nvidia/tu116/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/tu116/acr/ucode_ahesasc.bin");
+
+MODULE_FIRMWARE("nvidia/tu117/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/tu117/acr/ucode_ahesasc.bin");
+
static const struct nvkm_acr_hsf_fwif
tu102_acr_ahesasc_fwif[] = {
{ 0, nvkm_acr_hsfw_load, &tu102_acr_ahesasc_0 },
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
index 389bad312bf2..10ff5d053f7e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
@@ -51,3 +51,5 @@ MODULE_FIRMWARE("nvidia/gv100/nvdec/scrubber.bin");
MODULE_FIRMWARE("nvidia/tu102/nvdec/scrubber.bin");
MODULE_FIRMWARE("nvidia/tu104/nvdec/scrubber.bin");
MODULE_FIRMWARE("nvidia/tu106/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/tu116/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/tu117/nvdec/scrubber.bin");
diff --git a/drivers/gpu/drm/omapdrm/displays/Kconfig b/drivers/gpu/drm/omapdrm/displays/Kconfig
index b562a8cd61bf..f2be594c7eff 100644
--- a/drivers/gpu/drm/omapdrm/displays/Kconfig
+++ b/drivers/gpu/drm/omapdrm/displays/Kconfig
@@ -1,28 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
menu "OMAPDRM External Display Device Drivers"
-config DRM_OMAP_ENCODER_OPA362
- tristate "OPA362 external analog amplifier"
- help
- Driver for OPA362 external analog TV amplifier controlled
- through a GPIO.
-
-config DRM_OMAP_ENCODER_TPD12S015
- tristate "TPD12S015 HDMI ESD protection and level shifter"
- help
- Driver for TPD12S015, which offers HDMI ESD protection and level
- shifting.
-
-config DRM_OMAP_CONNECTOR_HDMI
- tristate "HDMI Connector"
- help
- Driver for a generic HDMI connector.
-
-config DRM_OMAP_CONNECTOR_ANALOG_TV
- tristate "Analog TV Connector"
- help
- Driver for a generic analog TV connector.
-
config DRM_OMAP_PANEL_DSI_CM
tristate "Generic DSI Command Mode Panel"
depends on BACKLIGHT_CLASS_DEVICE
diff --git a/drivers/gpu/drm/omapdrm/displays/Makefile b/drivers/gpu/drm/omapdrm/displays/Makefile
index cb76859dc574..488ddf153613 100644
--- a/drivers/gpu/drm/omapdrm/displays/Makefile
+++ b/drivers/gpu/drm/omapdrm/displays/Makefile
@@ -1,6 +1,2 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_DRM_OMAP_ENCODER_OPA362) += encoder-opa362.o
-obj-$(CONFIG_DRM_OMAP_ENCODER_TPD12S015) += encoder-tpd12s015.o
-obj-$(CONFIG_DRM_OMAP_CONNECTOR_HDMI) += connector-hdmi.o
-obj-$(CONFIG_DRM_OMAP_CONNECTOR_ANALOG_TV) += connector-analog-tv.o
obj-$(CONFIG_DRM_OMAP_PANEL_DSI_CM) += panel-dsi-cm.o
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
deleted file mode 100644
index 0d20fab605d7..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
+++ /dev/null
@@ -1,97 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Analog TV Connector driver
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Tomi Valkeinen <[email protected]>
- */
-
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-
-#include "../dss/omapdss.h"
-
-struct panel_drv_data {
- struct omap_dss_device dssdev;
-
- struct device *dev;
-};
-
-#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
-
-static int tvc_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- return 0;
-}
-
-static void tvc_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
-}
-
-static const struct omap_dss_device_ops tvc_ops = {
- .connect = tvc_connect,
- .disconnect = tvc_disconnect,
-};
-
-static int tvc_probe(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata;
- struct omap_dss_device *dssdev;
-
- ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
- if (!ddata)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, ddata);
- ddata->dev = &pdev->dev;
-
- dssdev = &ddata->dssdev;
- dssdev->ops = &tvc_ops;
- dssdev->dev = &pdev->dev;
- dssdev->type = OMAP_DISPLAY_TYPE_VENC;
- dssdev->display = true;
- dssdev->owner = THIS_MODULE;
- dssdev->of_ports = BIT(0);
-
- omapdss_display_init(dssdev);
- omapdss_device_register(dssdev);
-
- return 0;
-}
-
-static int __exit tvc_remove(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
-
- omapdss_device_unregister(&ddata->dssdev);
-
- return 0;
-}
-
-static const struct of_device_id tvc_of_match[] = {
- { .compatible = "omapdss,svideo-connector", },
- { .compatible = "omapdss,composite-video-connector", },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, tvc_of_match);
-
-static struct platform_driver tvc_connector_driver = {
- .probe = tvc_probe,
- .remove = __exit_p(tvc_remove),
- .driver = {
- .name = "connector-analog-tv",
- .of_match_table = tvc_of_match,
- .suppress_bind_attrs = true,
- },
-};
-
-module_platform_driver(tvc_connector_driver);
-
-MODULE_AUTHOR("Tomi Valkeinen <[email protected]>");
-MODULE_DESCRIPTION("Analog TV Connector driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
deleted file mode 100644
index f5d69d810bb8..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
+++ /dev/null
@@ -1,183 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * HDMI Connector driver
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Tomi Valkeinen <[email protected]>
- */
-
-#include <linux/gpio/consumer.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-#include "../dss/omapdss.h"
-
-struct panel_drv_data {
- struct omap_dss_device dssdev;
- void (*hpd_cb)(void *cb_data, enum drm_connector_status status);
- void *hpd_cb_data;
- struct mutex hpd_lock;
-
- struct device *dev;
-
- struct gpio_desc *hpd_gpio;
-};
-
-#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
-
-static int hdmic_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- return 0;
-}
-
-static void hdmic_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
-}
-
-static bool hdmic_detect(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- return gpiod_get_value_cansleep(ddata->hpd_gpio);
-}
-
-static void hdmic_register_hpd_cb(struct omap_dss_device *dssdev,
- void (*cb)(void *cb_data,
- enum drm_connector_status status),
- void *cb_data)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- mutex_lock(&ddata->hpd_lock);
- ddata->hpd_cb = cb;
- ddata->hpd_cb_data = cb_data;
- mutex_unlock(&ddata->hpd_lock);
-}
-
-static void hdmic_unregister_hpd_cb(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- mutex_lock(&ddata->hpd_lock);
- ddata->hpd_cb = NULL;
- ddata->hpd_cb_data = NULL;
- mutex_unlock(&ddata->hpd_lock);
-}
-
-static const struct omap_dss_device_ops hdmic_ops = {
- .connect = hdmic_connect,
- .disconnect = hdmic_disconnect,
-
- .detect = hdmic_detect,
- .register_hpd_cb = hdmic_register_hpd_cb,
- .unregister_hpd_cb = hdmic_unregister_hpd_cb,
-};
-
-static irqreturn_t hdmic_hpd_isr(int irq, void *data)
-{
- struct panel_drv_data *ddata = data;
-
- mutex_lock(&ddata->hpd_lock);
- if (ddata->hpd_cb) {
- enum drm_connector_status status;
-
- if (hdmic_detect(&ddata->dssdev))
- status = connector_status_connected;
- else
- status = connector_status_disconnected;
-
- ddata->hpd_cb(ddata->hpd_cb_data, status);
- }
- mutex_unlock(&ddata->hpd_lock);
-
- return IRQ_HANDLED;
-}
-
-static int hdmic_probe(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata;
- struct omap_dss_device *dssdev;
- struct gpio_desc *gpio;
- int r;
-
- ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
- if (!ddata)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, ddata);
- ddata->dev = &pdev->dev;
-
- mutex_init(&ddata->hpd_lock);
-
- /* HPD GPIO */
- gpio = devm_gpiod_get_optional(&pdev->dev, "hpd", GPIOD_IN);
- if (IS_ERR(gpio)) {
- dev_err(&pdev->dev, "failed to parse HPD gpio\n");
- return PTR_ERR(gpio);
- }
-
- ddata->hpd_gpio = gpio;
-
- if (ddata->hpd_gpio) {
- r = devm_request_threaded_irq(&pdev->dev,
- gpiod_to_irq(ddata->hpd_gpio),
- NULL, hdmic_hpd_isr,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
- IRQF_ONESHOT,
- "hdmic hpd", ddata);
- if (r)
- return r;
- }
-
- dssdev = &ddata->dssdev;
- dssdev->ops = &hdmic_ops;
- dssdev->dev = &pdev->dev;
- dssdev->type = OMAP_DISPLAY_TYPE_HDMI;
- dssdev->display = true;
- dssdev->owner = THIS_MODULE;
- dssdev->of_ports = BIT(0);
- dssdev->ops_flags = ddata->hpd_gpio
- ? OMAP_DSS_DEVICE_OP_DETECT | OMAP_DSS_DEVICE_OP_HPD
- : 0;
-
- omapdss_display_init(dssdev);
- omapdss_device_register(dssdev);
-
- return 0;
-}
-
-static int __exit hdmic_remove(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
-
- omapdss_device_unregister(&ddata->dssdev);
-
- return 0;
-}
-
-static const struct of_device_id hdmic_of_match[] = {
- { .compatible = "omapdss,hdmi-connector", },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, hdmic_of_match);
-
-static struct platform_driver hdmi_connector_driver = {
- .probe = hdmic_probe,
- .remove = __exit_p(hdmic_remove),
- .driver = {
- .name = "connector-hdmi",
- .of_match_table = hdmic_of_match,
- .suppress_bind_attrs = true,
- },
-};
-
-module_platform_driver(hdmi_connector_driver);
-
-MODULE_AUTHOR("Tomi Valkeinen <[email protected]>");
-MODULE_DESCRIPTION("HDMI Connector driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
deleted file mode 100644
index b992387ed674..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
+++ /dev/null
@@ -1,137 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * OPA362 analog video amplifier with output/power control
- *
- * Copyright (C) 2014 Golden Delicious Computers
- * Author: H. Nikolaus Schaller <[email protected]>
- *
- * based on encoder-tfp410
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Tomi Valkeinen <[email protected]>
- */
-
-#include <linux/gpio/consumer.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-#include "../dss/omapdss.h"
-
-struct panel_drv_data {
- struct omap_dss_device dssdev;
-
- struct gpio_desc *enable_gpio;
-};
-
-#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
-
-static int opa362_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- return omapdss_device_connect(dst->dss, dst, dst->next);
-}
-
-static void opa362_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- omapdss_device_disconnect(dst, dst->next);
-}
-
-static void opa362_enable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- if (ddata->enable_gpio)
- gpiod_set_value_cansleep(ddata->enable_gpio, 1);
-}
-
-static void opa362_disable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- if (ddata->enable_gpio)
- gpiod_set_value_cansleep(ddata->enable_gpio, 0);
-}
-
-static const struct omap_dss_device_ops opa362_ops = {
- .connect = opa362_connect,
- .disconnect = opa362_disconnect,
- .enable = opa362_enable,
- .disable = opa362_disable,
-};
-
-static int opa362_probe(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata;
- struct omap_dss_device *dssdev;
- struct gpio_desc *gpio;
-
- dev_dbg(&pdev->dev, "probe\n");
-
- ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
- if (!ddata)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, ddata);
-
- gpio = devm_gpiod_get_optional(&pdev->dev, "enable", GPIOD_OUT_LOW);
- if (IS_ERR(gpio))
- return PTR_ERR(gpio);
-
- ddata->enable_gpio = gpio;
-
- dssdev = &ddata->dssdev;
- dssdev->ops = &opa362_ops;
- dssdev->dev = &pdev->dev;
- dssdev->type = OMAP_DISPLAY_TYPE_VENC;
- dssdev->owner = THIS_MODULE;
- dssdev->of_ports = BIT(1) | BIT(0);
-
- dssdev->next = omapdss_of_find_connected_device(pdev->dev.of_node, 1);
- if (IS_ERR(dssdev->next)) {
- if (PTR_ERR(dssdev->next) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to find video sink\n");
- return PTR_ERR(dssdev->next);
- }
-
- omapdss_device_register(dssdev);
-
- return 0;
-}
-
-static int __exit opa362_remove(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *dssdev = &ddata->dssdev;
-
- if (dssdev->next)
- omapdss_device_put(dssdev->next);
- omapdss_device_unregister(&ddata->dssdev);
-
- opa362_disable(dssdev);
-
- return 0;
-}
-
-static const struct of_device_id opa362_of_match[] = {
- { .compatible = "omapdss,ti,opa362", },
- {},
-};
-MODULE_DEVICE_TABLE(of, opa362_of_match);
-
-static struct platform_driver opa362_driver = {
- .probe = opa362_probe,
- .remove = __exit_p(opa362_remove),
- .driver = {
- .name = "amplifier-opa362",
- .of_match_table = opa362_of_match,
- .suppress_bind_attrs = true,
- },
-};
-
-module_platform_driver(opa362_driver);
-
-MODULE_AUTHOR("H. Nikolaus Schaller <[email protected]>");
-MODULE_DESCRIPTION("OPA362 analog video amplifier with output/power control");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
deleted file mode 100644
index 089105c5aa0a..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
+++ /dev/null
@@ -1,217 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * TPD12S015 HDMI ESD protection & level shifter chip driver
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Tomi Valkeinen <[email protected]>
- */
-
-#include <linux/completion.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
-#include <linux/gpio/consumer.h>
-#include <linux/mutex.h>
-
-#include "../dss/omapdss.h"
-
-struct panel_drv_data {
- struct omap_dss_device dssdev;
- void (*hpd_cb)(void *cb_data, enum drm_connector_status status);
- void *hpd_cb_data;
- struct mutex hpd_lock;
-
- struct gpio_desc *ct_cp_hpd_gpio;
- struct gpio_desc *ls_oe_gpio;
- struct gpio_desc *hpd_gpio;
-};
-
-#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
-
-static int tpd_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- struct panel_drv_data *ddata = to_panel_data(dst);
- int r;
-
- r = omapdss_device_connect(dst->dss, dst, dst->next);
- if (r)
- return r;
-
- gpiod_set_value_cansleep(ddata->ct_cp_hpd_gpio, 1);
- gpiod_set_value_cansleep(ddata->ls_oe_gpio, 1);
-
- /* DC-DC converter needs at max 300us to get to 90% of 5V */
- udelay(300);
-
- return 0;
-}
-
-static void tpd_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- struct panel_drv_data *ddata = to_panel_data(dst);
-
- gpiod_set_value_cansleep(ddata->ct_cp_hpd_gpio, 0);
- gpiod_set_value_cansleep(ddata->ls_oe_gpio, 0);
-
- omapdss_device_disconnect(dst, dst->next);
-}
-
-static bool tpd_detect(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- return gpiod_get_value_cansleep(ddata->hpd_gpio);
-}
-
-static void tpd_register_hpd_cb(struct omap_dss_device *dssdev,
- void (*cb)(void *cb_data,
- enum drm_connector_status status),
- void *cb_data)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- mutex_lock(&ddata->hpd_lock);
- ddata->hpd_cb = cb;
- ddata->hpd_cb_data = cb_data;
- mutex_unlock(&ddata->hpd_lock);
-}
-
-static void tpd_unregister_hpd_cb(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- mutex_lock(&ddata->hpd_lock);
- ddata->hpd_cb = NULL;
- ddata->hpd_cb_data = NULL;
- mutex_unlock(&ddata->hpd_lock);
-}
-
-static const struct omap_dss_device_ops tpd_ops = {
- .connect = tpd_connect,
- .disconnect = tpd_disconnect,
- .detect = tpd_detect,
- .register_hpd_cb = tpd_register_hpd_cb,
- .unregister_hpd_cb = tpd_unregister_hpd_cb,
-};
-
-static irqreturn_t tpd_hpd_isr(int irq, void *data)
-{
- struct panel_drv_data *ddata = data;
-
- mutex_lock(&ddata->hpd_lock);
- if (ddata->hpd_cb) {
- enum drm_connector_status status;
-
- if (tpd_detect(&ddata->dssdev))
- status = connector_status_connected;
- else
- status = connector_status_disconnected;
-
- ddata->hpd_cb(ddata->hpd_cb_data, status);
- }
- mutex_unlock(&ddata->hpd_lock);
-
- return IRQ_HANDLED;
-}
-
-static int tpd_probe(struct platform_device *pdev)
-{
- struct omap_dss_device *dssdev;
- struct panel_drv_data *ddata;
- int r;
- struct gpio_desc *gpio;
-
- ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
- if (!ddata)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, ddata);
-
- gpio = devm_gpiod_get_index_optional(&pdev->dev, NULL, 0,
- GPIOD_OUT_LOW);
- if (IS_ERR(gpio))
- return PTR_ERR(gpio);
-
- ddata->ct_cp_hpd_gpio = gpio;
-
- gpio = devm_gpiod_get_index_optional(&pdev->dev, NULL, 1,
- GPIOD_OUT_LOW);
- if (IS_ERR(gpio))
- return PTR_ERR(gpio);
-
- ddata->ls_oe_gpio = gpio;
-
- gpio = devm_gpiod_get_index(&pdev->dev, NULL, 2,
- GPIOD_IN);
- if (IS_ERR(gpio))
- return PTR_ERR(gpio);
-
- ddata->hpd_gpio = gpio;
-
- mutex_init(&ddata->hpd_lock);
-
- r = devm_request_threaded_irq(&pdev->dev, gpiod_to_irq(ddata->hpd_gpio),
- NULL, tpd_hpd_isr,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- "tpd12s015 hpd", ddata);
- if (r)
- return r;
-
- dssdev = &ddata->dssdev;
- dssdev->ops = &tpd_ops;
- dssdev->dev = &pdev->dev;
- dssdev->type = OMAP_DISPLAY_TYPE_HDMI;
- dssdev->owner = THIS_MODULE;
- dssdev->of_ports = BIT(1) | BIT(0);
- dssdev->ops_flags = OMAP_DSS_DEVICE_OP_DETECT
- | OMAP_DSS_DEVICE_OP_HPD;
-
- dssdev->next = omapdss_of_find_connected_device(pdev->dev.of_node, 1);
- if (IS_ERR(dssdev->next)) {
- if (PTR_ERR(dssdev->next) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to find video sink\n");
- return PTR_ERR(dssdev->next);
- }
-
- omapdss_device_register(dssdev);
-
- return 0;
-}
-
-static int __exit tpd_remove(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *dssdev = &ddata->dssdev;
-
- if (dssdev->next)
- omapdss_device_put(dssdev->next);
- omapdss_device_unregister(&ddata->dssdev);
-
- return 0;
-}
-
-static const struct of_device_id tpd_of_match[] = {
- { .compatible = "omapdss,ti,tpd12s015", },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, tpd_of_match);
-
-static struct platform_driver tpd_driver = {
- .probe = tpd_probe,
- .remove = __exit_p(tpd_remove),
- .driver = {
- .name = "tpd12s015",
- .of_match_table = tpd_of_match,
- .suppress_bind_attrs = true,
- },
-};
-
-module_platform_driver(tpd_driver);
-
-MODULE_AUTHOR("Tomi Valkeinen <[email protected]>");
-MODULE_DESCRIPTION("TPD12S015 driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
index 564e3e1a1891..3484b5d4a91c 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
@@ -678,7 +678,7 @@ static int dsicm_power_on(struct panel_drv_data *ddata)
if (r)
goto err;
- ddata->enabled = 1;
+ ddata->enabled = true;
if (!ddata->intro_printed) {
dev_info(&ddata->pdev->dev, "panel revision %02x.%02x.%02x\n",
@@ -729,7 +729,7 @@ static void dsicm_power_off(struct panel_drv_data *ddata)
if (ddata->vpnl)
regulator_disable(ddata->vpnl);
- ddata->enabled = 0;
+ ddata->enabled = false;
}
static int dsicm_panel_reset(struct panel_drv_data *ddata)
@@ -1265,7 +1265,7 @@ static int dsicm_probe(struct platform_device *pdev)
dssdev->type = OMAP_DISPLAY_TYPE_DSI;
dssdev->display = true;
dssdev->owner = THIS_MODULE;
- dssdev->of_ports = BIT(0);
+ dssdev->of_port = 0;
dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE |
diff --git a/drivers/gpu/drm/omapdrm/dss/Makefile b/drivers/gpu/drm/omapdrm/dss/Makefile
index 5950c3f52c2e..f967e6948f2e 100644
--- a/drivers/gpu/drm/omapdrm/dss/Makefile
+++ b/drivers/gpu/drm/omapdrm/dss/Makefile
@@ -2,7 +2,7 @@
obj-$(CONFIG_OMAP2_DSS_INIT) += omapdss-boot-init.o
obj-$(CONFIG_OMAP_DSS_BASE) += omapdss-base.o
-omapdss-base-y := base.o display.o dss-of.o output.o
+omapdss-base-y := base.o display.o output.o
obj-$(CONFIG_OMAP2_DSS) += omapdss.o
# Core DSS files
diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c
index a1970b9db6ab..c7650a7c155d 100644
--- a/drivers/gpu/drm/omapdrm/dss/base.c
+++ b/drivers/gpu/drm/omapdrm/dss/base.c
@@ -149,8 +149,7 @@ struct omap_dss_device *omapdss_device_next_output(struct omap_dss_device *from)
goto done;
}
- if (dssdev->id &&
- (dssdev->next || dssdev->bridge || dssdev->panel))
+ if (dssdev->id && (dssdev->next || dssdev->bridge))
goto done;
}
@@ -185,11 +184,10 @@ int omapdss_device_connect(struct dss_device *dss,
if (!dst) {
/*
* The destination is NULL when the source is connected to a
- * bridge or panel instead of a DSS device. Stop here, we will
- * attach the bridge or panel later when we will have a DRM
- * encoder.
+ * bridge instead of a DSS device. Stop here, we will attach
+ * the bridge later when we will have a DRM encoder.
*/
- return src && (src->bridge || src->panel) ? 0 : -EINVAL;
+ return src && src->bridge ? 0 : -EINVAL;
}
if (omapdss_device_is_connected(dst))
@@ -197,10 +195,12 @@ int omapdss_device_connect(struct dss_device *dss,
dst->dss = dss;
- ret = dst->ops->connect(src, dst);
- if (ret < 0) {
- dst->dss = NULL;
- return ret;
+ if (dst->ops && dst->ops->connect) {
+ ret = dst->ops->connect(src, dst);
+ if (ret < 0) {
+ dst->dss = NULL;
+ return ret;
+ }
}
return 0;
@@ -217,7 +217,7 @@ void omapdss_device_disconnect(struct omap_dss_device *src,
dst ? dev_name(dst->dev) : "NULL");
if (!dst) {
- WARN_ON(!src->bridge && !src->panel);
+ WARN_ON(!src->bridge);
return;
}
@@ -228,29 +228,18 @@ void omapdss_device_disconnect(struct omap_dss_device *src,
WARN_ON(dst->state != OMAP_DSS_DISPLAY_DISABLED);
- dst->ops->disconnect(src, dst);
+ if (dst->ops && dst->ops->disconnect)
+ dst->ops->disconnect(src, dst);
dst->dss = NULL;
}
EXPORT_SYMBOL_GPL(omapdss_device_disconnect);
-void omapdss_device_pre_enable(struct omap_dss_device *dssdev)
-{
- if (!dssdev)
- return;
-
- omapdss_device_pre_enable(dssdev->next);
-
- if (dssdev->ops->pre_enable)
- dssdev->ops->pre_enable(dssdev);
-}
-EXPORT_SYMBOL_GPL(omapdss_device_pre_enable);
-
void omapdss_device_enable(struct omap_dss_device *dssdev)
{
if (!dssdev)
return;
- if (dssdev->ops->enable)
+ if (dssdev->ops && dssdev->ops->enable)
dssdev->ops->enable(dssdev);
omapdss_device_enable(dssdev->next);
@@ -266,25 +255,11 @@ void omapdss_device_disable(struct omap_dss_device *dssdev)
omapdss_device_disable(dssdev->next);
- if (dssdev->ops->disable)
+ if (dssdev->ops && dssdev->ops->disable)
dssdev->ops->disable(dssdev);
}
EXPORT_SYMBOL_GPL(omapdss_device_disable);
-void omapdss_device_post_disable(struct omap_dss_device *dssdev)
-{
- if (!dssdev)
- return;
-
- if (dssdev->ops->post_disable)
- dssdev->ops->post_disable(dssdev);
-
- omapdss_device_post_disable(dssdev->next);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-}
-EXPORT_SYMBOL_GPL(omapdss_device_post_disable);
-
/* -----------------------------------------------------------------------------
* Components Handling
*/
diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c
index 8a3f61f5825f..3b82158b1bfd 100644
--- a/drivers/gpu/drm/omapdrm/dss/display.c
+++ b/drivers/gpu/drm/omapdrm/dss/display.c
@@ -40,15 +40,6 @@ void omapdss_display_init(struct omap_dss_device *dssdev)
}
EXPORT_SYMBOL_GPL(omapdss_display_init);
-struct omap_dss_device *omapdss_display_get(struct omap_dss_device *output)
-{
- while (output->next)
- output = output->next;
-
- return omapdss_device_get(output);
-}
-EXPORT_SYMBOL_GPL(omapdss_display_get);
-
int omapdss_display_get_modes(struct drm_connector *connector,
const struct videomode *vm)
{
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index 95147437b990..5110acb0c6c1 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -9,20 +9,22 @@
#define DSS_SUBSYS_NAME "DPI"
-#include <linux/kernel.h>
+#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/export.h>
#include <linux/err.h>
#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/string.h>
-#include <linux/of.h>
-#include <linux/clk.h>
#include <linux/sys_soc.h>
-#include "omapdss.h"
+#include <drm/drm_bridge.h>
+
#include "dss.h"
+#include "omapdss.h"
struct dpi_data {
struct platform_device *pdev;
@@ -34,19 +36,19 @@ struct dpi_data {
enum dss_clk_source clk_src;
struct dss_pll *pll;
- struct mutex lock;
-
struct dss_lcd_mgr_config mgr_config;
unsigned long pixelclock;
int data_lines;
struct omap_dss_device output;
+ struct drm_bridge bridge;
};
-static struct dpi_data *dpi_get_data_from_dssdev(struct omap_dss_device *dssdev)
-{
- return container_of(dssdev, struct dpi_data, output);
-}
+#define drm_bridge_to_dpi(bridge) container_of(bridge, struct dpi_data, bridge)
+
+/* -----------------------------------------------------------------------------
+ * Clock Handling and PLL
+ */
static enum dss_clk_source dpi_get_clk_src_dra7xx(struct dpi_data *dpi,
enum omap_channel channel)
@@ -283,9 +285,7 @@ static bool dpi_dss_clk_calc(struct dpi_data *dpi, unsigned long pck,
-static int dpi_set_pll_clk(struct dpi_data *dpi, enum omap_channel channel,
- unsigned long pck_req, unsigned long *fck, int *lck_div,
- int *pck_div)
+static int dpi_set_pll_clk(struct dpi_data *dpi, unsigned long pck_req)
{
struct dpi_clk_calc_ctx ctx;
int r;
@@ -299,19 +299,15 @@ static int dpi_set_pll_clk(struct dpi_data *dpi, enum omap_channel channel,
if (r)
return r;
- dss_select_lcd_clk_source(dpi->dss, channel, dpi->clk_src);
+ dss_select_lcd_clk_source(dpi->dss, dpi->output.dispc_channel,
+ dpi->clk_src);
dpi->mgr_config.clock_info = ctx.dispc_cinfo;
- *fck = ctx.pll_cinfo.clkout[ctx.clkout_idx];
- *lck_div = ctx.dispc_cinfo.lck_div;
- *pck_div = ctx.dispc_cinfo.pck_div;
-
return 0;
}
-static int dpi_set_dispc_clk(struct dpi_data *dpi, unsigned long pck_req,
- unsigned long *fck, int *lck_div, int *pck_div)
+static int dpi_set_dispc_clk(struct dpi_data *dpi, unsigned long pck_req)
{
struct dpi_clk_calc_ctx ctx;
int r;
@@ -327,29 +323,19 @@ static int dpi_set_dispc_clk(struct dpi_data *dpi, unsigned long pck_req,
dpi->mgr_config.clock_info = ctx.dispc_cinfo;
- *fck = ctx.fck;
- *lck_div = ctx.dispc_cinfo.lck_div;
- *pck_div = ctx.dispc_cinfo.pck_div;
-
return 0;
}
static int dpi_set_mode(struct dpi_data *dpi)
{
- int lck_div = 0, pck_div = 0;
- unsigned long fck = 0;
- int r = 0;
+ int r;
if (dpi->pll)
- r = dpi_set_pll_clk(dpi, dpi->output.dispc_channel,
- dpi->pixelclock, &fck, &lck_div, &pck_div);
+ r = dpi_set_pll_clk(dpi, dpi->pixelclock);
else
- r = dpi_set_dispc_clk(dpi, dpi->pixelclock, &fck,
- &lck_div, &pck_div);
- if (r)
- return r;
+ r = dpi_set_dispc_clk(dpi, dpi->pixelclock);
- return 0;
+ return r;
}
static void dpi_config_lcd_manager(struct dpi_data *dpi)
@@ -366,25 +352,149 @@ static void dpi_config_lcd_manager(struct dpi_data *dpi)
dss_mgr_set_lcd_config(&dpi->output, &dpi->mgr_config);
}
-static void dpi_display_enable(struct omap_dss_device *dssdev)
+static int dpi_clock_update(struct dpi_data *dpi, unsigned long *clock)
+{
+ int lck_div, pck_div;
+ unsigned long fck;
+ struct dpi_clk_calc_ctx ctx;
+
+ if (dpi->pll) {
+ if (!dpi_pll_clk_calc(dpi, *clock, &ctx))
+ return -EINVAL;
+
+ fck = ctx.pll_cinfo.clkout[ctx.clkout_idx];
+ } else {
+ if (!dpi_dss_clk_calc(dpi, *clock, &ctx))
+ return -EINVAL;
+
+ fck = ctx.fck;
+ }
+
+ lck_div = ctx.dispc_cinfo.lck_div;
+ pck_div = ctx.dispc_cinfo.pck_div;
+
+ *clock = fck / lck_div / pck_div;
+
+ return 0;
+}
+
+static int dpi_verify_pll(struct dss_pll *pll)
{
- struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
- struct omap_dss_device *out = &dpi->output;
int r;
- mutex_lock(&dpi->lock);
+ /* do initial setup with the PLL to see if it is operational */
+
+ r = dss_pll_enable(pll);
+ if (r)
+ return r;
+
+ dss_pll_disable(pll);
+
+ return 0;
+}
+
+static void dpi_init_pll(struct dpi_data *dpi)
+{
+ struct dss_pll *pll;
+
+ if (dpi->pll)
+ return;
+
+ dpi->clk_src = dpi_get_clk_src(dpi);
+
+ pll = dss_pll_find_by_src(dpi->dss, dpi->clk_src);
+ if (!pll)
+ return;
+
+ if (dpi_verify_pll(pll)) {
+ DSSWARN("PLL not operational\n");
+ return;
+ }
+
+ dpi->pll = pll;
+}
+
+/* -----------------------------------------------------------------------------
+ * DRM Bridge Operations
+ */
+
+static int dpi_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct dpi_data *dpi = drm_bridge_to_dpi(bridge);
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+ return -EINVAL;
+
+ dpi_init_pll(dpi);
+
+ return drm_bridge_attach(bridge->encoder, dpi->output.next_bridge,
+ bridge, flags);
+}
+
+static enum drm_mode_status
+dpi_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode)
+{
+ struct dpi_data *dpi = drm_bridge_to_dpi(bridge);
+ unsigned long clock = mode->clock * 1000;
+ int ret;
+
+ if (mode->hdisplay % 8 != 0)
+ return MODE_BAD_WIDTH;
+
+ if (mode->clock == 0)
+ return MODE_NOCLOCK;
+
+ ret = dpi_clock_update(dpi, &clock);
+ if (ret < 0)
+ return MODE_CLOCK_RANGE;
+
+ return MODE_OK;
+}
+
+static bool dpi_bridge_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct dpi_data *dpi = drm_bridge_to_dpi(bridge);
+ unsigned long clock = mode->clock * 1000;
+ int ret;
+
+ ret = dpi_clock_update(dpi, &clock);
+ if (ret < 0)
+ return false;
+
+ adjusted_mode->clock = clock / 1000;
+
+ return true;
+}
+
+static void dpi_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct dpi_data *dpi = drm_bridge_to_dpi(bridge);
+
+ dpi->pixelclock = adjusted_mode->clock * 1000;
+}
+
+static void dpi_bridge_enable(struct drm_bridge *bridge)
+{
+ struct dpi_data *dpi = drm_bridge_to_dpi(bridge);
+ int r;
if (dpi->vdds_dsi_reg) {
r = regulator_enable(dpi->vdds_dsi_reg);
if (r)
- goto err_reg_enable;
+ return;
}
r = dispc_runtime_get(dpi->dss->dispc);
if (r)
goto err_get_dispc;
- r = dss_dpi_select_source(dpi->dss, dpi->id, out->dispc_channel);
+ r = dss_dpi_select_source(dpi->dss, dpi->id, dpi->output.dispc_channel);
if (r)
goto err_src_sel;
@@ -406,8 +516,6 @@ static void dpi_display_enable(struct omap_dss_device *dssdev)
if (r)
goto err_mgr_enable;
- mutex_unlock(&dpi->lock);
-
return;
err_mgr_enable:
@@ -420,15 +528,11 @@ err_src_sel:
err_get_dispc:
if (dpi->vdds_dsi_reg)
regulator_disable(dpi->vdds_dsi_reg);
-err_reg_enable:
- mutex_unlock(&dpi->lock);
}
-static void dpi_display_disable(struct omap_dss_device *dssdev)
+static void dpi_bridge_disable(struct drm_bridge *bridge)
{
- struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
-
- mutex_lock(&dpi->lock);
+ struct dpi_data *dpi = drm_bridge_to_dpi(bridge);
dss_mgr_disable(&dpi->output);
@@ -442,99 +546,34 @@ static void dpi_display_disable(struct omap_dss_device *dssdev)
if (dpi->vdds_dsi_reg)
regulator_disable(dpi->vdds_dsi_reg);
-
- mutex_unlock(&dpi->lock);
}
-static void dpi_set_timings(struct omap_dss_device *dssdev,
- const struct drm_display_mode *mode)
-{
- struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
-
- DSSDBG("dpi_set_timings\n");
-
- mutex_lock(&dpi->lock);
-
- dpi->pixelclock = mode->clock * 1000;
-
- mutex_unlock(&dpi->lock);
-}
+static const struct drm_bridge_funcs dpi_bridge_funcs = {
+ .attach = dpi_bridge_attach,
+ .mode_valid = dpi_bridge_mode_valid,
+ .mode_fixup = dpi_bridge_mode_fixup,
+ .mode_set = dpi_bridge_mode_set,
+ .enable = dpi_bridge_enable,
+ .disable = dpi_bridge_disable,
+};
-static int dpi_check_timings(struct omap_dss_device *dssdev,
- struct drm_display_mode *mode)
+static void dpi_bridge_init(struct dpi_data *dpi)
{
- struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
- int lck_div, pck_div;
- unsigned long fck;
- unsigned long pck;
- struct dpi_clk_calc_ctx ctx;
- bool ok;
+ dpi->bridge.funcs = &dpi_bridge_funcs;
+ dpi->bridge.of_node = dpi->pdev->dev.of_node;
+ dpi->bridge.type = DRM_MODE_CONNECTOR_DPI;
- if (mode->hdisplay % 8 != 0)
- return -EINVAL;
-
- if (mode->clock == 0)
- return -EINVAL;
-
- if (dpi->pll) {
- ok = dpi_pll_clk_calc(dpi, mode->clock * 1000, &ctx);
- if (!ok)
- return -EINVAL;
-
- fck = ctx.pll_cinfo.clkout[ctx.clkout_idx];
- } else {
- ok = dpi_dss_clk_calc(dpi, mode->clock * 1000, &ctx);
- if (!ok)
- return -EINVAL;
-
- fck = ctx.fck;
- }
-
- lck_div = ctx.dispc_cinfo.lck_div;
- pck_div = ctx.dispc_cinfo.pck_div;
-
- pck = fck / lck_div / pck_div;
-
- mode->clock = pck / 1000;
-
- return 0;
+ drm_bridge_add(&dpi->bridge);
}
-static int dpi_verify_pll(struct dss_pll *pll)
+static void dpi_bridge_cleanup(struct dpi_data *dpi)
{
- int r;
-
- /* do initial setup with the PLL to see if it is operational */
-
- r = dss_pll_enable(pll);
- if (r)
- return r;
-
- dss_pll_disable(pll);
-
- return 0;
+ drm_bridge_remove(&dpi->bridge);
}
-static void dpi_init_pll(struct dpi_data *dpi)
-{
- struct dss_pll *pll;
-
- if (dpi->pll)
- return;
-
- dpi->clk_src = dpi_get_clk_src(dpi);
-
- pll = dss_pll_find_by_src(dpi->dss, dpi->clk_src);
- if (!pll)
- return;
-
- if (dpi_verify_pll(pll)) {
- DSSWARN("PLL not operational\n");
- return;
- }
-
- dpi->pll = pll;
-}
+/* -----------------------------------------------------------------------------
+ * Initialisation and Cleanup
+ */
/*
* Return a hardcoded channel for the DPI output. This should work for
@@ -572,39 +611,14 @@ static enum omap_channel dpi_get_channel(struct dpi_data *dpi)
}
}
-static int dpi_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- struct dpi_data *dpi = dpi_get_data_from_dssdev(dst);
-
- dpi_init_pll(dpi);
-
- return omapdss_device_connect(dst->dss, dst, dst->next);
-}
-
-static void dpi_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- omapdss_device_disconnect(dst, dst->next);
-}
-
-static const struct omap_dss_device_ops dpi_ops = {
- .connect = dpi_connect,
- .disconnect = dpi_disconnect,
-
- .enable = dpi_display_enable,
- .disable = dpi_display_disable,
-
- .check_timings = dpi_check_timings,
- .set_timings = dpi_set_timings,
-};
-
static int dpi_init_output_port(struct dpi_data *dpi, struct device_node *port)
{
struct omap_dss_device *out = &dpi->output;
u32 port_num = 0;
int r;
+ dpi_bridge_init(dpi);
+
of_property_read_u32(port, "reg", &port_num);
dpi->id = port_num <= 2 ? port_num : 0;
@@ -625,13 +639,14 @@ static int dpi_init_output_port(struct dpi_data *dpi, struct device_node *port)
out->id = OMAP_DSS_OUTPUT_DPI;
out->type = OMAP_DISPLAY_TYPE_DPI;
out->dispc_channel = dpi_get_channel(dpi);
- out->of_ports = BIT(port_num);
- out->ops = &dpi_ops;
+ out->of_port = port_num;
out->owner = THIS_MODULE;
- r = omapdss_device_init_output(out);
- if (r < 0)
+ r = omapdss_device_init_output(out, &dpi->bridge);
+ if (r < 0) {
+ dpi_bridge_cleanup(dpi);
return r;
+ }
omapdss_device_register(out);
@@ -645,8 +660,14 @@ static void dpi_uninit_output_port(struct device_node *port)
omapdss_device_unregister(out);
omapdss_device_cleanup_output(out);
+
+ dpi_bridge_cleanup(dpi);
}
+/* -----------------------------------------------------------------------------
+ * Initialisation and Cleanup
+ */
+
static const struct soc_device_attribute dpi_soc_devices[] = {
{ .machine = "OMAP3[456]*" },
{ .machine = "[AD]M37*" },
@@ -706,8 +727,6 @@ int dpi_init_port(struct dss_device *dss, struct platform_device *pdev,
dpi->dss = dss;
port->data = dpi;
- mutex_init(&dpi->lock);
-
r = dpi_init_regulator(dpi);
if (r)
return r;
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index da16ea095f13..79ddfbfd1b58 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -5116,12 +5116,12 @@ static int dsi_init_output(struct dsi_data *dsi)
out->dispc_channel = dsi_get_channel(dsi);
out->ops = &dsi_ops;
out->owner = THIS_MODULE;
- out->of_ports = BIT(0);
+ out->of_port = 0;
out->bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE
| DRM_BUS_FLAG_DE_HIGH
| DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE;
- r = omapdss_device_init_output(out);
+ r = omapdss_device_init_output(out, NULL);
if (r < 0)
return r;
diff --git a/drivers/gpu/drm/omapdrm/dss/dss-of.c b/drivers/gpu/drm/omapdrm/dss/dss-of.c
deleted file mode 100644
index b7981f3b80ad..000000000000
--- a/drivers/gpu/drm/omapdrm/dss/dss-of.c
+++ /dev/null
@@ -1,28 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Tomi Valkeinen <[email protected]>
- */
-
-#include <linux/err.h>
-#include <linux/of.h>
-#include <linux/of_graph.h>
-
-#include "omapdss.h"
-
-struct omap_dss_device *
-omapdss_of_find_connected_device(struct device_node *node, unsigned int port)
-{
- struct device_node *remote_node;
- struct omap_dss_device *dssdev;
-
- remote_node = of_graph_get_remote_node(node, port, 0);
- if (!remote_node)
- return NULL;
-
- dssdev = omapdss_find_device_by_node(remote_node);
- of_node_put(remote_node);
-
- return dssdev ? dssdev : ERR_PTR(-EPROBE_DEFER);
-}
-EXPORT_SYMBOL_GPL(omapdss_of_find_connected_device);
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index 225ec808b01a..b76fc2b56227 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -1151,46 +1151,38 @@ static const struct dss_features dra7xx_dss_feats = {
.has_lcd_clk_src = true,
};
-static int dss_init_ports(struct dss_device *dss)
+static void __dss_uninit_ports(struct dss_device *dss, unsigned int num_ports)
{
struct platform_device *pdev = dss->pdev;
struct device_node *parent = pdev->dev.of_node;
struct device_node *port;
unsigned int i;
- int r;
- for (i = 0; i < dss->feat->num_ports; i++) {
+ for (i = 0; i < num_ports; i++) {
port = of_graph_get_port_by_id(parent, i);
if (!port)
continue;
switch (dss->feat->ports[i]) {
case OMAP_DISPLAY_TYPE_DPI:
- r = dpi_init_port(dss, pdev, port, dss->feat->model);
- if (r)
- return r;
+ dpi_uninit_port(port);
break;
-
case OMAP_DISPLAY_TYPE_SDI:
- r = sdi_init_port(dss, pdev, port);
- if (r)
- return r;
+ sdi_uninit_port(port);
break;
-
default:
break;
}
}
-
- return 0;
}
-static void dss_uninit_ports(struct dss_device *dss)
+static int dss_init_ports(struct dss_device *dss)
{
struct platform_device *pdev = dss->pdev;
struct device_node *parent = pdev->dev.of_node;
struct device_node *port;
- int i;
+ unsigned int i;
+ int r;
for (i = 0; i < dss->feat->num_ports; i++) {
port = of_graph_get_port_by_id(parent, i);
@@ -1199,15 +1191,32 @@ static void dss_uninit_ports(struct dss_device *dss)
switch (dss->feat->ports[i]) {
case OMAP_DISPLAY_TYPE_DPI:
- dpi_uninit_port(port);
+ r = dpi_init_port(dss, pdev, port, dss->feat->model);
+ if (r)
+ goto error;
break;
+
case OMAP_DISPLAY_TYPE_SDI:
- sdi_uninit_port(port);
+ r = sdi_init_port(dss, pdev, port);
+ if (r)
+ goto error;
break;
+
default:
break;
}
}
+
+ return 0;
+
+error:
+ __dss_uninit_ports(dss, i);
+ return r;
+}
+
+static void dss_uninit_ports(struct dss_device *dss)
+{
+ __dss_uninit_ports(dss, dss->feat->num_ports);
}
static int dss_video_pll_probe(struct dss_device *dss)
@@ -1543,7 +1552,8 @@ static void dss_shutdown(struct platform_device *pdev)
DSSDBG("shutdown\n");
for_each_dss_output(dssdev) {
- if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE &&
+ dssdev->ops && dssdev->ops->disable)
dssdev->ops->disable(dssdev);
}
}
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi.h b/drivers/gpu/drm/omapdrm/dss/hdmi.h
index c867552c925c..3a40833d3368 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi.h
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi.h
@@ -14,6 +14,7 @@
#include <linux/hdmi.h>
#include <sound/omap-hdmi-audio.h>
#include <media/cec.h>
+#include <drm/drm_bridge.h>
#include "omapdss.h"
#include "dss.h"
@@ -364,6 +365,7 @@ struct omap_hdmi {
bool core_enabled;
struct omap_dss_device output;
+ struct drm_bridge bridge;
struct platform_device *audio_pdev;
void (*audio_abort_cb)(struct device *dev);
@@ -378,6 +380,6 @@ struct omap_hdmi {
bool display_enabled;
};
-#define dssdev_to_hdmi(dssdev) container_of(dssdev, struct omap_hdmi, output)
+#define drm_bridge_to_hdmi(b) container_of(b, struct omap_hdmi, bridge)
#endif
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index 0f557fad4513..2578c95570f6 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -28,6 +28,9 @@
#include <sound/omap-hdmi-audio.h>
#include <media/cec.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_state_helper.h>
+
#include "omapdss.h"
#include "hdmi4_core.h"
#include "hdmi4_cec.h"
@@ -237,20 +240,6 @@ static void hdmi_power_off_full(struct omap_hdmi *hdmi)
hdmi_power_off_core(hdmi);
}
-static void hdmi_display_set_timings(struct omap_dss_device *dssdev,
- const struct drm_display_mode *mode)
-{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
-
- mutex_lock(&hdmi->lock);
-
- drm_display_mode_to_videomode(mode, &hdmi->cfg.vm);
-
- dispc_set_tv_pclk(hdmi->dss->dispc, mode->clock * 1000);
-
- mutex_unlock(&hdmi->lock);
-}
-
static int hdmi_dump_regs(struct seq_file *s, void *p)
{
struct omap_hdmi *hdmi = s->private;
@@ -272,57 +261,139 @@ static int hdmi_dump_regs(struct seq_file *s, void *p)
return 0;
}
-static int read_edid(struct omap_hdmi *hdmi, u8 *buf, int len)
+static void hdmi_start_audio_stream(struct omap_hdmi *hd)
{
- int r;
+ hdmi_wp_audio_enable(&hd->wp, true);
+ hdmi4_audio_start(&hd->core, &hd->wp);
+}
- mutex_lock(&hdmi->lock);
+static void hdmi_stop_audio_stream(struct omap_hdmi *hd)
+{
+ hdmi4_audio_stop(&hd->core, &hd->wp);
+ hdmi_wp_audio_enable(&hd->wp, false);
+}
- r = hdmi_runtime_get(hdmi);
- BUG_ON(r);
+int hdmi4_core_enable(struct hdmi_core_data *core)
+{
+ struct omap_hdmi *hdmi = container_of(core, struct omap_hdmi, core);
+ int r = 0;
- r = hdmi4_read_edid(&hdmi->core, buf, len);
+ DSSDBG("ENTER omapdss_hdmi4_core_enable\n");
+
+ mutex_lock(&hdmi->lock);
+
+ r = hdmi_power_on_core(hdmi);
+ if (r) {
+ DSSERR("failed to power on device\n");
+ goto err0;
+ }
- hdmi_runtime_put(hdmi);
mutex_unlock(&hdmi->lock);
+ return 0;
+err0:
+ mutex_unlock(&hdmi->lock);
return r;
}
-static void hdmi_start_audio_stream(struct omap_hdmi *hd)
+void hdmi4_core_disable(struct hdmi_core_data *core)
{
- hdmi_wp_audio_enable(&hd->wp, true);
- hdmi4_audio_start(&hd->core, &hd->wp);
+ struct omap_hdmi *hdmi = container_of(core, struct omap_hdmi, core);
+
+ DSSDBG("Enter omapdss_hdmi4_core_disable\n");
+
+ mutex_lock(&hdmi->lock);
+
+ hdmi_power_off_core(hdmi);
+
+ mutex_unlock(&hdmi->lock);
}
-static void hdmi_stop_audio_stream(struct omap_hdmi *hd)
+/* -----------------------------------------------------------------------------
+ * DRM Bridge Operations
+ */
+
+static int hdmi4_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
- hdmi4_audio_stop(&hd->core, &hd->wp);
- hdmi_wp_audio_enable(&hd->wp, false);
+ struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+ return -EINVAL;
+
+ return drm_bridge_attach(bridge->encoder, hdmi->output.next_bridge,
+ bridge, flags);
}
-static void hdmi_display_enable(struct omap_dss_device *dssdev)
+static void hdmi4_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+ struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
+
+ mutex_lock(&hdmi->lock);
+
+ drm_display_mode_to_videomode(adjusted_mode, &hdmi->cfg.vm);
+
+ dispc_set_tv_pclk(hdmi->dss->dispc, adjusted_mode->clock * 1000);
+
+ mutex_unlock(&hdmi->lock);
+}
+
+static void hdmi4_bridge_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state)
+{
+ struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
+ struct drm_atomic_state *state = bridge_state->base.state;
+ struct drm_connector_state *conn_state;
+ struct drm_connector *connector;
+ struct drm_crtc_state *crtc_state;
unsigned long flags;
- int r;
+ int ret;
+
+ /*
+ * None of these should fail, as the bridge can't be enabled without a
+ * valid CRTC to connector path with fully populated new states.
+ */
+ connector = drm_atomic_get_new_connector_for_encoder(state,
+ bridge->encoder);
+ if (WARN_ON(!connector))
+ return;
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ if (WARN_ON(!conn_state))
+ return;
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ if (WARN_ON(!crtc_state))
+ return;
+
+ hdmi->cfg.hdmi_dvi_mode = connector->display_info.is_hdmi
+ ? HDMI_HDMI : HDMI_DVI;
- DSSDBG("ENTER hdmi_display_enable\n");
+ if (connector->display_info.is_hdmi) {
+ const struct drm_display_mode *mode;
+ struct hdmi_avi_infoframe avi;
+
+ mode = &crtc_state->adjusted_mode;
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&avi, connector,
+ mode);
+ if (ret == 0)
+ hdmi->cfg.infoframe = avi;
+ }
mutex_lock(&hdmi->lock);
- r = hdmi_power_on_full(hdmi);
- if (r) {
+ ret = hdmi_power_on_full(hdmi);
+ if (ret) {
DSSERR("failed to power on device\n");
goto done;
}
if (hdmi->audio_configured) {
- r = hdmi4_audio_config(&hdmi->core, &hdmi->wp,
- &hdmi->audio_config,
- hdmi->cfg.vm.pixelclock);
- if (r) {
- DSSERR("Error restoring audio configuration: %d", r);
+ ret = hdmi4_audio_config(&hdmi->core, &hdmi->wp,
+ &hdmi->audio_config,
+ hdmi->cfg.vm.pixelclock);
+ if (ret) {
+ DSSERR("Error restoring audio configuration: %d", ret);
hdmi->audio_abort_cb(&hdmi->pdev->dev);
hdmi->audio_configured = false;
}
@@ -338,13 +409,12 @@ done:
mutex_unlock(&hdmi->lock);
}
-static void hdmi_display_disable(struct omap_dss_device *dssdev)
+static void hdmi4_bridge_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state)
{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+ struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
unsigned long flags;
- DSSDBG("Enter hdmi_display_disable\n");
-
mutex_lock(&hdmi->lock);
spin_lock_irqsave(&hdmi->audio_playing_lock, flags);
@@ -357,58 +427,21 @@ static void hdmi_display_disable(struct omap_dss_device *dssdev)
mutex_unlock(&hdmi->lock);
}
-int hdmi4_core_enable(struct hdmi_core_data *core)
+static void hdmi4_bridge_hpd_notify(struct drm_bridge *bridge,
+ enum drm_connector_status status)
{
- struct omap_hdmi *hdmi = container_of(core, struct omap_hdmi, core);
- int r = 0;
-
- DSSDBG("ENTER omapdss_hdmi4_core_enable\n");
-
- mutex_lock(&hdmi->lock);
-
- r = hdmi_power_on_core(hdmi);
- if (r) {
- DSSERR("failed to power on device\n");
- goto err0;
- }
-
- mutex_unlock(&hdmi->lock);
- return 0;
-
-err0:
- mutex_unlock(&hdmi->lock);
- return r;
-}
-
-void hdmi4_core_disable(struct hdmi_core_data *core)
-{
- struct omap_hdmi *hdmi = container_of(core, struct omap_hdmi, core);
-
- DSSDBG("Enter omapdss_hdmi4_core_disable\n");
-
- mutex_lock(&hdmi->lock);
-
- hdmi_power_off_core(hdmi);
-
- mutex_unlock(&hdmi->lock);
-}
-
-static int hdmi_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- return omapdss_device_connect(dst->dss, dst, dst->next);
-}
+ struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
-static void hdmi_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- omapdss_device_disconnect(dst, dst->next);
+ if (status == connector_status_disconnected)
+ hdmi4_cec_set_phys_addr(&hdmi->core, CEC_PHYS_ADDR_INVALID);
}
-static int hdmi_read_edid(struct omap_dss_device *dssdev,
- u8 *edid, int len)
+static struct edid *hdmi4_bridge_get_edid(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+ struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
+ struct edid *edid = NULL;
+ unsigned int cec_addr;
bool need_enable;
int r;
@@ -417,63 +450,65 @@ static int hdmi_read_edid(struct omap_dss_device *dssdev,
if (need_enable) {
r = hdmi4_core_enable(&hdmi->core);
if (r)
- return r;
+ return NULL;
}
- r = read_edid(hdmi, edid, len);
- if (r >= 256)
- hdmi4_cec_set_phys_addr(&hdmi->core,
- cec_get_edid_phys_addr(edid, r, NULL));
- else
- hdmi4_cec_set_phys_addr(&hdmi->core, CEC_PHYS_ADDR_INVALID);
- if (need_enable)
- hdmi4_core_disable(&hdmi->core);
+ mutex_lock(&hdmi->lock);
+ r = hdmi_runtime_get(hdmi);
+ BUG_ON(r);
- return r;
-}
+ r = hdmi4_core_ddc_init(&hdmi->core);
+ if (r)
+ goto done;
-static void hdmi_lost_hotplug(struct omap_dss_device *dssdev)
-{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+ edid = drm_do_get_edid(connector, hdmi4_core_ddc_read, &hdmi->core);
- hdmi4_cec_set_phys_addr(&hdmi->core, CEC_PHYS_ADDR_INVALID);
-}
+done:
+ hdmi_runtime_put(hdmi);
+ mutex_unlock(&hdmi->lock);
-static int hdmi_set_infoframe(struct omap_dss_device *dssdev,
- const struct hdmi_avi_infoframe *avi)
-{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+ if (edid && edid->extensions) {
+ unsigned int len = (edid->extensions + 1) * EDID_LENGTH;
- hdmi->cfg.infoframe = *avi;
- return 0;
-}
+ cec_addr = cec_get_edid_phys_addr((u8 *)edid, len, NULL);
+ } else {
+ cec_addr = CEC_PHYS_ADDR_INVALID;
+ }
-static int hdmi_set_hdmi_mode(struct omap_dss_device *dssdev,
- bool hdmi_mode)
-{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+ hdmi4_cec_set_phys_addr(&hdmi->core, cec_addr);
- hdmi->cfg.hdmi_dvi_mode = hdmi_mode ? HDMI_HDMI : HDMI_DVI;
- return 0;
-}
+ if (need_enable)
+ hdmi4_core_disable(&hdmi->core);
-static const struct omap_dss_device_ops hdmi_ops = {
- .connect = hdmi_connect,
- .disconnect = hdmi_disconnect,
+ return edid;
+}
- .enable = hdmi_display_enable,
- .disable = hdmi_display_disable,
+static const struct drm_bridge_funcs hdmi4_bridge_funcs = {
+ .attach = hdmi4_bridge_attach,
+ .mode_set = hdmi4_bridge_mode_set,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_enable = hdmi4_bridge_enable,
+ .atomic_disable = hdmi4_bridge_disable,
+ .hpd_notify = hdmi4_bridge_hpd_notify,
+ .get_edid = hdmi4_bridge_get_edid,
+};
- .set_timings = hdmi_display_set_timings,
+static void hdmi4_bridge_init(struct omap_hdmi *hdmi)
+{
+ hdmi->bridge.funcs = &hdmi4_bridge_funcs;
+ hdmi->bridge.of_node = hdmi->pdev->dev.of_node;
+ hdmi->bridge.ops = DRM_BRIDGE_OP_EDID;
+ hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
- .read_edid = hdmi_read_edid,
+ drm_bridge_add(&hdmi->bridge);
+}
- .hdmi = {
- .lost_hotplug = hdmi_lost_hotplug,
- .set_infoframe = hdmi_set_infoframe,
- .set_hdmi_mode = hdmi_set_hdmi_mode,
- },
-};
+static void hdmi4_bridge_cleanup(struct omap_hdmi *hdmi)
+{
+ drm_bridge_remove(&hdmi->bridge);
+}
/* -----------------------------------------------------------------------------
* Audio Callbacks
@@ -666,19 +701,21 @@ static int hdmi4_init_output(struct omap_hdmi *hdmi)
struct omap_dss_device *out = &hdmi->output;
int r;
+ hdmi4_bridge_init(hdmi);
+
out->dev = &hdmi->pdev->dev;
out->id = OMAP_DSS_OUTPUT_HDMI;
out->type = OMAP_DISPLAY_TYPE_HDMI;
out->name = "hdmi.0";
out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
- out->ops = &hdmi_ops;
out->owner = THIS_MODULE;
- out->of_ports = BIT(0);
- out->ops_flags = OMAP_DSS_DEVICE_OP_EDID;
+ out->of_port = 0;
- r = omapdss_device_init_output(out);
- if (r < 0)
+ r = omapdss_device_init_output(out, &hdmi->bridge);
+ if (r < 0) {
+ hdmi4_bridge_cleanup(hdmi);
return r;
+ }
omapdss_device_register(out);
@@ -691,6 +728,8 @@ static void hdmi4_uninit_output(struct omap_hdmi *hdmi)
omapdss_device_unregister(out);
omapdss_device_cleanup_output(out);
+
+ hdmi4_bridge_cleanup(hdmi);
}
static int hdmi4_probe_of(struct omap_hdmi *hdmi)
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
index ea5d5c228534..751985a2679a 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
@@ -32,7 +32,7 @@ static inline void __iomem *hdmi_av_base(struct hdmi_core_data *core)
return core->base + HDMI_CORE_AV;
}
-static int hdmi_core_ddc_init(struct hdmi_core_data *core)
+int hdmi4_core_ddc_init(struct hdmi_core_data *core)
{
void __iomem *base = core->base;
@@ -74,13 +74,11 @@ static int hdmi_core_ddc_init(struct hdmi_core_data *core)
return 0;
}
-static int hdmi_core_ddc_edid(struct hdmi_core_data *core,
- u8 *pedid, int ext)
+int hdmi4_core_ddc_read(void *data, u8 *buf, unsigned int block, size_t len)
{
+ struct hdmi_core_data *core = data;
void __iomem *base = core->base;
u32 i;
- char checksum;
- u32 offset = 0;
/* HDMI_CORE_DDC_STATUS_IN_PROG */
if (hdmi_wait_for_bit_change(base, HDMI_CORE_DDC_STATUS,
@@ -89,24 +87,21 @@ static int hdmi_core_ddc_edid(struct hdmi_core_data *core,
return -ETIMEDOUT;
}
- if (ext % 2 != 0)
- offset = 0x80;
-
/* Load Segment Address Register */
- REG_FLD_MOD(base, HDMI_CORE_DDC_SEGM, ext / 2, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_DDC_SEGM, block / 2, 7, 0);
/* Load Slave Address Register */
REG_FLD_MOD(base, HDMI_CORE_DDC_ADDR, 0xA0 >> 1, 7, 1);
/* Load Offset Address Register */
- REG_FLD_MOD(base, HDMI_CORE_DDC_OFFSET, offset, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_DDC_OFFSET, block % 2 ? 0x80 : 0, 7, 0);
/* Load Byte Count */
- REG_FLD_MOD(base, HDMI_CORE_DDC_COUNT1, 0x80, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_DDC_COUNT1, len, 7, 0);
REG_FLD_MOD(base, HDMI_CORE_DDC_COUNT2, 0x0, 1, 0);
/* Set DDC_CMD */
- if (ext)
+ if (block)
REG_FLD_MOD(base, HDMI_CORE_DDC_CMD, 0x4, 3, 0);
else
REG_FLD_MOD(base, HDMI_CORE_DDC_CMD, 0x2, 3, 0);
@@ -122,7 +117,7 @@ static int hdmi_core_ddc_edid(struct hdmi_core_data *core,
return -EIO;
}
- for (i = 0; i < 0x80; ++i) {
+ for (i = 0; i < len; ++i) {
int t;
/* IN_PROG */
@@ -141,48 +136,12 @@ static int hdmi_core_ddc_edid(struct hdmi_core_data *core,
udelay(1);
}
- pedid[i] = REG_GET(base, HDMI_CORE_DDC_DATA, 7, 0);
- }
-
- checksum = 0;
- for (i = 0; i < 0x80; ++i)
- checksum += pedid[i];
-
- if (checksum != 0) {
- DSSERR("E-EDID checksum failed!!\n");
- return -EIO;
+ buf[i] = REG_GET(base, HDMI_CORE_DDC_DATA, 7, 0);
}
return 0;
}
-int hdmi4_read_edid(struct hdmi_core_data *core, u8 *edid, int len)
-{
- int r, l;
-
- if (len < 128)
- return -EINVAL;
-
- r = hdmi_core_ddc_init(core);
- if (r)
- return r;
-
- r = hdmi_core_ddc_edid(core, edid, 0);
- if (r)
- return r;
-
- l = 128;
-
- if (len >= 128 * 2 && edid[0x7e] > 0) {
- r = hdmi_core_ddc_edid(core, edid + 0x80, 1);
- if (r)
- return r;
- l += 128;
- }
-
- return l;
-}
-
static void hdmi_core_init(struct hdmi_core_video_config *video_cfg)
{
DSSDBG("Enter hdmi_core_init\n");
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h
index 11c4b7ba1eee..dc64ae2aa300 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h
@@ -249,7 +249,9 @@ struct hdmi_core_packet_enable_repeat {
u32 generic_pkt_repeat;
};
-int hdmi4_read_edid(struct hdmi_core_data *core, u8 *edid, int len);
+int hdmi4_core_ddc_init(struct hdmi_core_data *core);
+int hdmi4_core_ddc_read(void *data, u8 *buf, unsigned int block, size_t len);
+
void hdmi4_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct hdmi_config *cfg);
void hdmi4_core_dump(struct hdmi_core_data *core, struct seq_file *s);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index d9463b332554..4d4c1fabd0a1 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -31,6 +31,9 @@
#include <linux/of_graph.h>
#include <sound/omap-hdmi-audio.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_state_helper.h>
+
#include "omapdss.h"
#include "hdmi5_core.h"
#include "dss.h"
@@ -236,20 +239,6 @@ static void hdmi_power_off_full(struct omap_hdmi *hdmi)
hdmi_power_off_core(hdmi);
}
-static void hdmi_display_set_timings(struct omap_dss_device *dssdev,
- const struct drm_display_mode *mode)
-{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
-
- mutex_lock(&hdmi->lock);
-
- drm_display_mode_to_videomode(mode, &hdmi->cfg.vm);
-
- dispc_set_tv_pclk(hdmi->dss->dispc, mode->clock * 1000);
-
- mutex_unlock(&hdmi->lock);
-}
-
static int hdmi_dump_regs(struct seq_file *s, void *p)
{
struct omap_hdmi *hdmi = s->private;
@@ -271,66 +260,138 @@ static int hdmi_dump_regs(struct seq_file *s, void *p)
return 0;
}
-static int read_edid(struct omap_hdmi *hdmi, u8 *buf, int len)
+static void hdmi_start_audio_stream(struct omap_hdmi *hd)
{
- int r;
- int idlemode;
+ REG_FLD_MOD(hd->wp.base, HDMI_WP_SYSCONFIG, 1, 3, 2);
+ hdmi_wp_audio_enable(&hd->wp, true);
+ hdmi_wp_audio_core_req_enable(&hd->wp, true);
+}
- mutex_lock(&hdmi->lock);
+static void hdmi_stop_audio_stream(struct omap_hdmi *hd)
+{
+ hdmi_wp_audio_core_req_enable(&hd->wp, false);
+ hdmi_wp_audio_enable(&hd->wp, false);
+ REG_FLD_MOD(hd->wp.base, HDMI_WP_SYSCONFIG, hd->wp_idlemode, 3, 2);
+}
- r = hdmi_runtime_get(hdmi);
- BUG_ON(r);
+static int hdmi_core_enable(struct omap_hdmi *hdmi)
+{
+ int r = 0;
- idlemode = REG_GET(hdmi->wp.base, HDMI_WP_SYSCONFIG, 3, 2);
- /* No-idle mode */
- REG_FLD_MOD(hdmi->wp.base, HDMI_WP_SYSCONFIG, 1, 3, 2);
+ DSSDBG("ENTER omapdss_hdmi_core_enable\n");
- r = hdmi5_read_edid(&hdmi->core, buf, len);
+ mutex_lock(&hdmi->lock);
- REG_FLD_MOD(hdmi->wp.base, HDMI_WP_SYSCONFIG, idlemode, 3, 2);
+ r = hdmi_power_on_core(hdmi);
+ if (r) {
+ DSSERR("failed to power on device\n");
+ goto err0;
+ }
- hdmi_runtime_put(hdmi);
mutex_unlock(&hdmi->lock);
+ return 0;
+err0:
+ mutex_unlock(&hdmi->lock);
return r;
}
-static void hdmi_start_audio_stream(struct omap_hdmi *hd)
+static void hdmi_core_disable(struct omap_hdmi *hdmi)
{
- REG_FLD_MOD(hd->wp.base, HDMI_WP_SYSCONFIG, 1, 3, 2);
- hdmi_wp_audio_enable(&hd->wp, true);
- hdmi_wp_audio_core_req_enable(&hd->wp, true);
+ DSSDBG("Enter omapdss_hdmi_core_disable\n");
+
+ mutex_lock(&hdmi->lock);
+
+ hdmi_power_off_core(hdmi);
+
+ mutex_unlock(&hdmi->lock);
}
-static void hdmi_stop_audio_stream(struct omap_hdmi *hd)
+/* -----------------------------------------------------------------------------
+ * DRM Bridge Operations
+ */
+
+static int hdmi5_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
- hdmi_wp_audio_core_req_enable(&hd->wp, false);
- hdmi_wp_audio_enable(&hd->wp, false);
- REG_FLD_MOD(hd->wp.base, HDMI_WP_SYSCONFIG, hd->wp_idlemode, 3, 2);
+ struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+ return -EINVAL;
+
+ return drm_bridge_attach(bridge->encoder, hdmi->output.next_bridge,
+ bridge, flags);
}
-static void hdmi_display_enable(struct omap_dss_device *dssdev)
+static void hdmi5_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
- unsigned long flags;
- int r;
+ struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
+
+ mutex_lock(&hdmi->lock);
- DSSDBG("ENTER hdmi_display_enable\n");
+ drm_display_mode_to_videomode(adjusted_mode, &hdmi->cfg.vm);
+
+ dispc_set_tv_pclk(hdmi->dss->dispc, adjusted_mode->clock * 1000);
+
+ mutex_unlock(&hdmi->lock);
+}
+
+static void hdmi5_bridge_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state)
+{
+ struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
+ struct drm_atomic_state *state = bridge_state->base.state;
+ struct drm_connector_state *conn_state;
+ struct drm_connector *connector;
+ struct drm_crtc_state *crtc_state;
+ unsigned long flags;
+ int ret;
+
+ /*
+ * None of these should fail, as the bridge can't be enabled without a
+ * valid CRTC to connector path with fully populated new states.
+ */
+ connector = drm_atomic_get_new_connector_for_encoder(state,
+ bridge->encoder);
+ if (WARN_ON(!connector))
+ return;
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ if (WARN_ON(!conn_state))
+ return;
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ if (WARN_ON(!crtc_state))
+ return;
+
+ hdmi->cfg.hdmi_dvi_mode = connector->display_info.is_hdmi
+ ? HDMI_HDMI : HDMI_DVI;
+
+ if (connector->display_info.is_hdmi) {
+ const struct drm_display_mode *mode;
+ struct hdmi_avi_infoframe avi;
+
+ mode = &crtc_state->adjusted_mode;
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&avi, connector,
+ mode);
+ if (ret == 0)
+ hdmi->cfg.infoframe = avi;
+ }
mutex_lock(&hdmi->lock);
- r = hdmi_power_on_full(hdmi);
- if (r) {
+ ret = hdmi_power_on_full(hdmi);
+ if (ret) {
DSSERR("failed to power on device\n");
goto done;
}
if (hdmi->audio_configured) {
- r = hdmi5_audio_config(&hdmi->core, &hdmi->wp,
- &hdmi->audio_config,
- hdmi->cfg.vm.pixelclock);
- if (r) {
- DSSERR("Error restoring audio configuration: %d", r);
+ ret = hdmi5_audio_config(&hdmi->core, &hdmi->wp,
+ &hdmi->audio_config,
+ hdmi->cfg.vm.pixelclock);
+ if (ret) {
+ DSSERR("Error restoring audio configuration: %d", ret);
hdmi->audio_abort_cb(&hdmi->pdev->dev);
hdmi->audio_configured = false;
}
@@ -346,13 +407,12 @@ done:
mutex_unlock(&hdmi->lock);
}
-static void hdmi_display_disable(struct omap_dss_device *dssdev)
+static void hdmi5_bridge_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state)
{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+ struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
unsigned long flags;
- DSSDBG("Enter hdmi_display_disable\n");
-
mutex_lock(&hdmi->lock);
spin_lock_irqsave(&hdmi->audio_playing_lock, flags);
@@ -365,109 +425,74 @@ static void hdmi_display_disable(struct omap_dss_device *dssdev)
mutex_unlock(&hdmi->lock);
}
-static int hdmi_core_enable(struct omap_hdmi *hdmi)
+static struct edid *hdmi5_bridge_get_edid(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
- int r = 0;
-
- DSSDBG("ENTER omapdss_hdmi_core_enable\n");
+ struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
+ struct edid *edid;
+ bool need_enable;
+ int idlemode;
+ int r;
- mutex_lock(&hdmi->lock);
+ need_enable = hdmi->core_enabled == false;
- r = hdmi_power_on_core(hdmi);
- if (r) {
- DSSERR("failed to power on device\n");
- goto err0;
+ if (need_enable) {
+ r = hdmi_core_enable(hdmi);
+ if (r)
+ return NULL;
}
- mutex_unlock(&hdmi->lock);
- return 0;
-
-err0:
- mutex_unlock(&hdmi->lock);
- return r;
-}
-
-static void hdmi_core_disable(struct omap_hdmi *hdmi)
-{
- DSSDBG("Enter omapdss_hdmi_core_disable\n");
-
mutex_lock(&hdmi->lock);
+ r = hdmi_runtime_get(hdmi);
+ BUG_ON(r);
- hdmi_power_off_core(hdmi);
-
- mutex_unlock(&hdmi->lock);
-}
-
-static int hdmi_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- return omapdss_device_connect(dst->dss, dst, dst->next);
-}
+ idlemode = REG_GET(hdmi->wp.base, HDMI_WP_SYSCONFIG, 3, 2);
+ /* No-idle mode */
+ REG_FLD_MOD(hdmi->wp.base, HDMI_WP_SYSCONFIG, 1, 3, 2);
-static void hdmi_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- omapdss_device_disconnect(dst, dst->next);
-}
+ hdmi5_core_ddc_init(&hdmi->core);
-static int hdmi_read_edid(struct omap_dss_device *dssdev,
- u8 *edid, int len)
-{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
- bool need_enable;
- int r;
+ edid = drm_do_get_edid(connector, hdmi5_core_ddc_read, &hdmi->core);
- need_enable = hdmi->core_enabled == false;
+ hdmi5_core_ddc_uninit(&hdmi->core);
- if (need_enable) {
- r = hdmi_core_enable(hdmi);
- if (r)
- return r;
- }
+ REG_FLD_MOD(hdmi->wp.base, HDMI_WP_SYSCONFIG, idlemode, 3, 2);
- r = read_edid(hdmi, edid, len);
+ hdmi_runtime_put(hdmi);
+ mutex_unlock(&hdmi->lock);
if (need_enable)
hdmi_core_disable(hdmi);
- return r;
+ return (struct edid *)edid;
}
-static int hdmi_set_infoframe(struct omap_dss_device *dssdev,
- const struct hdmi_avi_infoframe *avi)
+static const struct drm_bridge_funcs hdmi5_bridge_funcs = {
+ .attach = hdmi5_bridge_attach,
+ .mode_set = hdmi5_bridge_mode_set,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_enable = hdmi5_bridge_enable,
+ .atomic_disable = hdmi5_bridge_disable,
+ .get_edid = hdmi5_bridge_get_edid,
+};
+
+static void hdmi5_bridge_init(struct omap_hdmi *hdmi)
{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+ hdmi->bridge.funcs = &hdmi5_bridge_funcs;
+ hdmi->bridge.of_node = hdmi->pdev->dev.of_node;
+ hdmi->bridge.ops = DRM_BRIDGE_OP_EDID;
+ hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
- hdmi->cfg.infoframe = *avi;
- return 0;
+ drm_bridge_add(&hdmi->bridge);
}
-static int hdmi_set_hdmi_mode(struct omap_dss_device *dssdev,
- bool hdmi_mode)
+static void hdmi5_bridge_cleanup(struct omap_hdmi *hdmi)
{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
-
- hdmi->cfg.hdmi_dvi_mode = hdmi_mode ? HDMI_HDMI : HDMI_DVI;
- return 0;
+ drm_bridge_remove(&hdmi->bridge);
}
-static const struct omap_dss_device_ops hdmi_ops = {
- .connect = hdmi_connect,
- .disconnect = hdmi_disconnect,
-
- .enable = hdmi_display_enable,
- .disable = hdmi_display_disable,
-
- .set_timings = hdmi_display_set_timings,
-
- .read_edid = hdmi_read_edid,
-
- .hdmi = {
- .set_infoframe = hdmi_set_infoframe,
- .set_hdmi_mode = hdmi_set_hdmi_mode,
- },
-};
-
/* -----------------------------------------------------------------------------
* Audio Callbacks
*/
@@ -650,19 +675,21 @@ static int hdmi5_init_output(struct omap_hdmi *hdmi)
struct omap_dss_device *out = &hdmi->output;
int r;
+ hdmi5_bridge_init(hdmi);
+
out->dev = &hdmi->pdev->dev;
out->id = OMAP_DSS_OUTPUT_HDMI;
out->type = OMAP_DISPLAY_TYPE_HDMI;
out->name = "hdmi.0";
out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
- out->ops = &hdmi_ops;
out->owner = THIS_MODULE;
- out->of_ports = BIT(0);
- out->ops_flags = OMAP_DSS_DEVICE_OP_EDID;
+ out->of_port = 0;
- r = omapdss_device_init_output(out);
- if (r < 0)
+ r = omapdss_device_init_output(out, &hdmi->bridge);
+ if (r < 0) {
+ hdmi5_bridge_cleanup(hdmi);
return r;
+ }
omapdss_device_register(out);
@@ -675,6 +702,8 @@ static void hdmi5_uninit_output(struct omap_hdmi *hdmi)
omapdss_device_unregister(out);
omapdss_device_cleanup_output(out);
+
+ hdmi5_bridge_cleanup(hdmi);
}
static int hdmi5_probe_of(struct omap_hdmi *hdmi)
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
index ff4d35c8771f..7dd587035160 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
@@ -23,7 +23,7 @@
#include "hdmi5_core.h"
-static void hdmi_core_ddc_init(struct hdmi_core_data *core)
+void hdmi5_core_ddc_init(struct hdmi_core_data *core)
{
void __iomem *base = core->base;
const unsigned long long iclk = 266000000; /* DSS L3 ICLK */
@@ -102,7 +102,7 @@ static void hdmi_core_ddc_init(struct hdmi_core_data *core)
REG_FLD_MOD(base, HDMI_CORE_I2CM_INT, 0x0, 2, 2);
}
-static void hdmi_core_ddc_uninit(struct hdmi_core_data *core)
+void hdmi5_core_ddc_uninit(struct hdmi_core_data *core)
{
void __iomem *base = core->base;
@@ -112,14 +112,14 @@ static void hdmi_core_ddc_uninit(struct hdmi_core_data *core)
REG_FLD_MOD(base, HDMI_CORE_I2CM_INT, 0x1, 2, 2);
}
-static int hdmi_core_ddc_edid(struct hdmi_core_data *core, u8 *pedid, u8 ext)
+int hdmi5_core_ddc_read(void *data, u8 *buf, unsigned int block, size_t len)
{
+ struct hdmi_core_data *core = data;
void __iomem *base = core->base;
u8 cur_addr;
- char checksum = 0;
const int retries = 1000;
- u8 seg_ptr = ext / 2;
- u8 edidbase = ((ext % 2) * 0x80);
+ u8 seg_ptr = block / 2;
+ u8 edidbase = ((block % 2) * EDID_LENGTH);
REG_FLD_MOD(base, HDMI_CORE_I2CM_SEGPTR, seg_ptr, 7, 0);
@@ -127,7 +127,7 @@ static int hdmi_core_ddc_edid(struct hdmi_core_data *core, u8 *pedid, u8 ext)
* TODO: We use polling here, although we probably should use proper
* interrupts.
*/
- for (cur_addr = 0; cur_addr < 128; ++cur_addr) {
+ for (cur_addr = 0; cur_addr < len; ++cur_addr) {
int i;
/* clear ERROR and DONE */
@@ -164,45 +164,13 @@ static int hdmi_core_ddc_edid(struct hdmi_core_data *core, u8 *pedid, u8 ext)
return -EIO;
}
- pedid[cur_addr] = REG_GET(base, HDMI_CORE_I2CM_DATAI, 7, 0);
- checksum += pedid[cur_addr];
+ buf[cur_addr] = REG_GET(base, HDMI_CORE_I2CM_DATAI, 7, 0);
}
return 0;
}
-int hdmi5_read_edid(struct hdmi_core_data *core, u8 *edid, int len)
-{
- int r, n, i;
- int max_ext_blocks = (len / 128) - 1;
-
- if (len < 128)
- return -EINVAL;
-
- hdmi_core_ddc_init(core);
-
- r = hdmi_core_ddc_edid(core, edid, 0);
- if (r)
- goto out;
-
- n = edid[0x7e];
-
- if (n > max_ext_blocks)
- n = max_ext_blocks;
-
- for (i = 1; i <= n; i++) {
- r = hdmi_core_ddc_edid(core, edid + i * EDID_LENGTH, i);
- if (r)
- goto out;
- }
-
-out:
- hdmi_core_ddc_uninit(core);
-
- return r ? r : len;
-}
-
void hdmi5_core_dump(struct hdmi_core_data *core, struct seq_file *s)
{
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.h b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.h
index f10b8a283011..65eadefdb3f9 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.h
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.h
@@ -281,7 +281,10 @@ struct csc_table {
u16 c1, c2, c3, c4;
};
-int hdmi5_read_edid(struct hdmi_core_data *core, u8 *edid, int len);
+void hdmi5_core_ddc_init(struct hdmi_core_data *core);
+int hdmi5_core_ddc_read(void *data, u8 *buf, unsigned int block, size_t len);
+void hdmi5_core_ddc_uninit(struct hdmi_core_data *core);
+
void hdmi5_core_dump(struct hdmi_core_data *core, struct seq_file *s);
int hdmi5_core_handle_irqs(struct hdmi_core_data *core);
void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
index 31502857f013..00372f4ce711 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
@@ -174,12 +174,7 @@ static const struct of_device_id omapdss_of_match[] __initconst = {
};
static const struct of_device_id omapdss_of_fixups_whitelist[] __initconst = {
- { .compatible = "composite-video-connector" },
- { .compatible = "hdmi-connector" },
{ .compatible = "panel-dsi-cm" },
- { .compatible = "svideo-connector" },
- { .compatible = "ti,opa362" },
- { .compatible = "ti,tpd12s015" },
{},
};
@@ -192,7 +187,7 @@ static int __init omapdss_boot_init(void)
dss = of_find_matching_node(NULL, omapdss_of_match);
if (dss == NULL || !of_device_is_available(dss))
- return 0;
+ goto put_node;
omapdss_walk_device(dss, true);
@@ -217,6 +212,8 @@ static int __init omapdss_boot_init(void)
kfree(n);
}
+put_node:
+ of_node_put(dss);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h
index 79f6b195c7cf..ab19d4af8de7 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss.h
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h
@@ -285,13 +285,6 @@ struct omap_dss_writeback_info {
u8 pre_mult_alpha;
};
-struct omapdss_hdmi_ops {
- void (*lost_hotplug)(struct omap_dss_device *dssdev);
- int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode);
- int (*set_infoframe)(struct omap_dss_device *dssdev,
- const struct hdmi_avi_infoframe *avi);
-};
-
struct omapdss_dsi_ops {
void (*disable)(struct omap_dss_device *dssdev, bool disconnect_lanes,
bool enter_ulps);
@@ -349,46 +342,23 @@ struct omap_dss_device_ops {
void (*disconnect)(struct omap_dss_device *dssdev,
struct omap_dss_device *dst);
- void (*pre_enable)(struct omap_dss_device *dssdev);
void (*enable)(struct omap_dss_device *dssdev);
void (*disable)(struct omap_dss_device *dssdev);
- void (*post_disable)(struct omap_dss_device *dssdev);
int (*check_timings)(struct omap_dss_device *dssdev,
struct drm_display_mode *mode);
- void (*set_timings)(struct omap_dss_device *dssdev,
- const struct drm_display_mode *mode);
-
- bool (*detect)(struct omap_dss_device *dssdev);
-
- void (*register_hpd_cb)(struct omap_dss_device *dssdev,
- void (*cb)(void *cb_data,
- enum drm_connector_status status),
- void *cb_data);
- void (*unregister_hpd_cb)(struct omap_dss_device *dssdev);
-
- int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
int (*get_modes)(struct omap_dss_device *dssdev,
struct drm_connector *connector);
- union {
- const struct omapdss_hdmi_ops hdmi;
- const struct omapdss_dsi_ops dsi;
- };
+ const struct omapdss_dsi_ops dsi;
};
/**
* enum omap_dss_device_ops_flag - Indicates which device ops are supported
- * @OMAP_DSS_DEVICE_OP_DETECT: The device supports output connection detection
- * @OMAP_DSS_DEVICE_OP_HPD: The device supports all hot-plug-related operations
- * @OMAP_DSS_DEVICE_OP_EDID: The device supports reading EDID
* @OMAP_DSS_DEVICE_OP_MODES: The device supports reading modes
*/
enum omap_dss_device_ops_flag {
- OMAP_DSS_DEVICE_OP_DETECT = BIT(0),
- OMAP_DSS_DEVICE_OP_HPD = BIT(1),
- OMAP_DSS_DEVICE_OP_EDID = BIT(2),
OMAP_DSS_DEVICE_OP_MODES = BIT(3),
};
@@ -400,6 +370,7 @@ struct omap_dss_device {
struct dss_device *dss;
struct omap_dss_device *next;
struct drm_bridge *bridge;
+ struct drm_bridge *next_bridge;
struct drm_panel *panel;
struct list_head list;
@@ -436,8 +407,8 @@ struct omap_dss_device {
/* output instance */
enum omap_dss_output_id id;
- /* bitmask of port numbers in DT */
- unsigned int of_ports;
+ /* port number in DT */
+ unsigned int of_port;
};
struct omap_dss_driver {
@@ -461,7 +432,6 @@ static inline bool omapdss_is_initialized(void)
}
void omapdss_display_init(struct omap_dss_device *dssdev);
-struct omap_dss_device *omapdss_display_get(struct omap_dss_device *output);
int omapdss_display_get_modes(struct drm_connector *connector,
const struct videomode *vm);
@@ -475,10 +445,8 @@ int omapdss_device_connect(struct dss_device *dss,
struct omap_dss_device *dst);
void omapdss_device_disconnect(struct omap_dss_device *src,
struct omap_dss_device *dst);
-void omapdss_device_pre_enable(struct omap_dss_device *dssdev);
void omapdss_device_enable(struct omap_dss_device *dssdev);
void omapdss_device_disable(struct omap_dss_device *dssdev);
-void omapdss_device_post_disable(struct omap_dss_device *dssdev);
int omap_dss_get_num_overlay_managers(void);
@@ -487,7 +455,8 @@ int omap_dss_get_num_overlays(void);
#define for_each_dss_output(d) \
while ((d = omapdss_device_next_output(d)) != NULL)
struct omap_dss_device *omapdss_device_next_output(struct omap_dss_device *from);
-int omapdss_device_init_output(struct omap_dss_device *out);
+int omapdss_device_init_output(struct omap_dss_device *out,
+ struct drm_bridge *local_bridge);
void omapdss_device_cleanup_output(struct omap_dss_device *out);
typedef void (*omap_dispc_isr_t) (void *arg, u32 mask);
@@ -502,9 +471,6 @@ static inline bool omapdss_device_is_enabled(struct omap_dss_device *dssdev)
return dssdev->state == OMAP_DSS_DISPLAY_ACTIVE;
}
-struct omap_dss_device *
-omapdss_of_find_connected_device(struct device_node *node, unsigned int port);
-
enum dss_writeback_channel {
DSS_WB_LCD1_MGR = 0,
DSS_WB_LCD2_MGR = 1,
diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c
index 0693d34fca1b..ce21c798cca6 100644
--- a/drivers/gpu/drm/omapdrm/dss/output.c
+++ b/drivers/gpu/drm/omapdrm/dss/output.c
@@ -4,7 +4,6 @@
* Author: Archit Taneja <[email protected]>
*/
-#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -18,12 +17,14 @@
#include "dss.h"
#include "omapdss.h"
-int omapdss_device_init_output(struct omap_dss_device *out)
+int omapdss_device_init_output(struct omap_dss_device *out,
+ struct drm_bridge *local_bridge)
{
struct device_node *remote_node;
+ int ret;
remote_node = of_graph_get_remote_node(out->dev->of_node,
- ffs(out->of_ports) - 1, 0);
+ out->of_port, 0);
if (!remote_node) {
dev_dbg(out->dev, "failed to find video sink\n");
return 0;
@@ -39,17 +40,55 @@ int omapdss_device_init_output(struct omap_dss_device *out)
if (out->next && out->type != out->next->type) {
dev_err(out->dev, "output type and display type don't match\n");
- omapdss_device_put(out->next);
- out->next = NULL;
- return -EINVAL;
+ ret = -EINVAL;
+ goto error;
}
- return out->next || out->bridge || out->panel ? 0 : -EPROBE_DEFER;
+ if (out->panel) {
+ struct drm_bridge *bridge;
+
+ bridge = drm_panel_bridge_add(out->panel);
+ if (IS_ERR(bridge)) {
+ dev_err(out->dev,
+ "unable to create panel bridge (%ld)\n",
+ PTR_ERR(bridge));
+ ret = PTR_ERR(bridge);
+ goto error;
+ }
+
+ out->bridge = bridge;
+ }
+
+ if (local_bridge) {
+ if (!out->bridge) {
+ ret = -EPROBE_DEFER;
+ goto error;
+ }
+
+ out->next_bridge = out->bridge;
+ out->bridge = local_bridge;
+ }
+
+ if (!out->next && !out->bridge) {
+ ret = -EPROBE_DEFER;
+ goto error;
+ }
+
+ return 0;
+
+error:
+ omapdss_device_cleanup_output(out);
+ out->next = NULL;
+ return ret;
}
EXPORT_SYMBOL(omapdss_device_init_output);
void omapdss_device_cleanup_output(struct omap_dss_device *out)
{
+ if (out->bridge && out->panel)
+ drm_panel_bridge_remove(out->next_bridge ?
+ out->next_bridge : out->bridge);
+
if (out->next)
omapdss_device_put(out->next);
}
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index 3b447c01fa2a..417a8740ad0a 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -6,17 +6,19 @@
#define DSS_SUBSYS_NAME "SDI"
-#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/regulator/consumer.h>
#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
#include <linux/string.h>
-#include <linux/of.h>
-#include "omapdss.h"
+#include <drm/drm_bridge.h>
+
#include "dss.h"
+#include "omapdss.h"
struct sdi_device {
struct platform_device *pdev;
@@ -30,9 +32,11 @@ struct sdi_device {
int datapairs;
struct omap_dss_device output;
+ struct drm_bridge bridge;
};
-#define dssdev_to_sdi(dssdev) container_of(dssdev, struct sdi_device, output)
+#define drm_bridge_to_sdi(bridge) \
+ container_of(bridge, struct sdi_device, bridge)
struct sdi_clk_calc_ctx {
struct sdi_device *sdi;
@@ -118,9 +122,82 @@ static void sdi_config_lcd_manager(struct sdi_device *sdi)
dss_mgr_set_lcd_config(&sdi->output, &sdi->mgr_config);
}
-static void sdi_display_enable(struct omap_dss_device *dssdev)
+/* -----------------------------------------------------------------------------
+ * DRM Bridge Operations
+ */
+
+static int sdi_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+ return -EINVAL;
+
+ return drm_bridge_attach(bridge->encoder, sdi->output.next_bridge,
+ bridge, flags);
+}
+
+static enum drm_mode_status
+sdi_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode)
+{
+ struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
+ unsigned long pixelclock = mode->clock * 1000;
+ struct dispc_clock_info dispc_cinfo;
+ unsigned long fck;
+ int ret;
+
+ if (pixelclock == 0)
+ return MODE_NOCLOCK;
+
+ ret = sdi_calc_clock_div(sdi, pixelclock, &fck, &dispc_cinfo);
+ if (ret < 0)
+ return MODE_CLOCK_RANGE;
+
+ return MODE_OK;
+}
+
+static bool sdi_bridge_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
+ unsigned long pixelclock = mode->clock * 1000;
+ struct dispc_clock_info dispc_cinfo;
+ unsigned long fck;
+ unsigned long pck;
+ int ret;
+
+ ret = sdi_calc_clock_div(sdi, pixelclock, &fck, &dispc_cinfo);
+ if (ret < 0)
+ return false;
+
+ pck = fck / dispc_cinfo.lck_div / dispc_cinfo.pck_div;
+
+ if (pck != pixelclock)
+ dev_dbg(&sdi->pdev->dev,
+ "pixel clock adjusted from %lu Hz to %lu Hz\n",
+ pixelclock, pck);
+
+ adjusted_mode->clock = pck / 1000;
+
+ return true;
+}
+
+static void sdi_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
+
+ sdi->pixelclock = adjusted_mode->clock * 1000;
+}
+
+static void sdi_bridge_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state)
{
- struct sdi_device *sdi = dssdev_to_sdi(dssdev);
+ struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
struct dispc_clock_info dispc_cinfo;
unsigned long fck;
int r;
@@ -181,9 +258,10 @@ err_get_dispc:
regulator_disable(sdi->vdds_sdi_reg);
}
-static void sdi_display_disable(struct omap_dss_device *dssdev)
+static void sdi_bridge_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state)
{
- struct sdi_device *sdi = dssdev_to_sdi(dssdev);
+ struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
dss_mgr_disable(&sdi->output);
@@ -194,86 +272,56 @@ static void sdi_display_disable(struct omap_dss_device *dssdev)
regulator_disable(sdi->vdds_sdi_reg);
}
-static void sdi_set_timings(struct omap_dss_device *dssdev,
- const struct drm_display_mode *mode)
-{
- struct sdi_device *sdi = dssdev_to_sdi(dssdev);
-
- sdi->pixelclock = mode->clock * 1000;
-}
+static const struct drm_bridge_funcs sdi_bridge_funcs = {
+ .attach = sdi_bridge_attach,
+ .mode_valid = sdi_bridge_mode_valid,
+ .mode_fixup = sdi_bridge_mode_fixup,
+ .mode_set = sdi_bridge_mode_set,
+ .atomic_enable = sdi_bridge_enable,
+ .atomic_disable = sdi_bridge_disable,
+};
-static int sdi_check_timings(struct omap_dss_device *dssdev,
- struct drm_display_mode *mode)
+static void sdi_bridge_init(struct sdi_device *sdi)
{
- struct sdi_device *sdi = dssdev_to_sdi(dssdev);
- struct dispc_clock_info dispc_cinfo;
- unsigned long pixelclock = mode->clock * 1000;
- unsigned long fck;
- unsigned long pck;
- int r;
+ sdi->bridge.funcs = &sdi_bridge_funcs;
+ sdi->bridge.of_node = sdi->pdev->dev.of_node;
+ sdi->bridge.type = DRM_MODE_CONNECTOR_LVDS;
- if (pixelclock == 0)
- return -EINVAL;
-
- r = sdi_calc_clock_div(sdi, pixelclock, &fck, &dispc_cinfo);
- if (r)
- return r;
-
- pck = fck / dispc_cinfo.lck_div / dispc_cinfo.pck_div;
-
- if (pck != pixelclock) {
- DSSWARN("Pixel clock adjusted from %lu Hz to %lu Hz\n",
- pixelclock, pck);
-
- mode->clock = pck / 1000;
- }
-
- return 0;
+ drm_bridge_add(&sdi->bridge);
}
-static int sdi_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
+static void sdi_bridge_cleanup(struct sdi_device *sdi)
{
- return omapdss_device_connect(dst->dss, dst, dst->next);
+ drm_bridge_remove(&sdi->bridge);
}
-static void sdi_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- omapdss_device_disconnect(dst, dst->next);
-}
-
-static const struct omap_dss_device_ops sdi_ops = {
- .connect = sdi_connect,
- .disconnect = sdi_disconnect,
-
- .enable = sdi_display_enable,
- .disable = sdi_display_disable,
-
- .check_timings = sdi_check_timings,
- .set_timings = sdi_set_timings,
-};
+/* -----------------------------------------------------------------------------
+ * Initialisation and Cleanup
+ */
static int sdi_init_output(struct sdi_device *sdi)
{
struct omap_dss_device *out = &sdi->output;
int r;
+ sdi_bridge_init(sdi);
+
out->dev = &sdi->pdev->dev;
out->id = OMAP_DSS_OUTPUT_SDI;
out->type = OMAP_DISPLAY_TYPE_SDI;
out->name = "sdi.0";
out->dispc_channel = OMAP_DSS_CHANNEL_LCD;
/* We have SDI only on OMAP3, where it's on port 1 */
- out->of_ports = BIT(1);
- out->ops = &sdi_ops;
+ out->of_port = 1;
out->owner = THIS_MODULE;
out->bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE /* 15.5.9.1.2 */
| DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE;
- r = omapdss_device_init_output(out);
- if (r < 0)
+ r = omapdss_device_init_output(out, &sdi->bridge);
+ if (r < 0) {
+ sdi_bridge_cleanup(sdi);
return r;
+ }
omapdss_device_register(out);
@@ -284,6 +332,8 @@ static void sdi_uninit_output(struct sdi_device *sdi)
{
omapdss_device_unregister(&sdi->output);
omapdss_device_cleanup_output(&sdi->output);
+
+ sdi_bridge_cleanup(sdi);
}
int sdi_init_port(struct dss_device *dss, struct platform_device *pdev,
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index 596a297d5813..766553bb2f87 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -13,7 +13,6 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/mutex.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/string.h>
@@ -26,6 +25,8 @@
#include <linux/component.h>
#include <linux/sys_soc.h>
+#include <drm/drm_bridge.h>
+
#include "omapdss.h"
#include "dss.h"
@@ -289,7 +290,6 @@ static const struct drm_display_mode omap_dss_ntsc_mode = {
struct venc_device {
struct platform_device *pdev;
void __iomem *base;
- struct mutex venc_lock;
struct regulator *vdda_dac_reg;
struct dss_device *dss;
@@ -303,9 +303,10 @@ struct venc_device {
bool requires_tv_dac_clk;
struct omap_dss_device output;
+ struct drm_bridge bridge;
};
-#define dssdev_to_venc(dssdev) container_of(dssdev, struct venc_device, output)
+#define drm_bridge_to_venc(b) container_of(b, struct venc_device, bridge)
static inline void venc_write_reg(struct venc_device *venc, int idx, u32 val)
{
@@ -477,56 +478,6 @@ static void venc_power_off(struct venc_device *venc)
venc_runtime_put(venc);
}
-static void venc_display_enable(struct omap_dss_device *dssdev)
-{
- struct venc_device *venc = dssdev_to_venc(dssdev);
-
- DSSDBG("venc_display_enable\n");
-
- mutex_lock(&venc->venc_lock);
-
- venc_power_on(venc);
-
- mutex_unlock(&venc->venc_lock);
-}
-
-static void venc_display_disable(struct omap_dss_device *dssdev)
-{
- struct venc_device *venc = dssdev_to_venc(dssdev);
-
- DSSDBG("venc_display_disable\n");
-
- mutex_lock(&venc->venc_lock);
-
- venc_power_off(venc);
-
- mutex_unlock(&venc->venc_lock);
-}
-
-static int venc_get_modes(struct omap_dss_device *dssdev,
- struct drm_connector *connector)
-{
- static const struct drm_display_mode *modes[] = {
- &omap_dss_pal_mode,
- &omap_dss_ntsc_mode,
- };
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(modes); ++i) {
- struct drm_display_mode *mode;
-
- mode = drm_mode_duplicate(connector->dev, modes[i]);
- if (!mode)
- return i;
-
- mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
- drm_mode_set_name(mode);
- drm_mode_probed_add(connector, mode);
- }
-
- return ARRAY_SIZE(modes);
-}
-
static enum venc_videomode venc_get_videomode(const struct drm_display_mode *mode)
{
if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
@@ -545,57 +496,6 @@ static enum venc_videomode venc_get_videomode(const struct drm_display_mode *mod
return VENC_MODE_UNKNOWN;
}
-static void venc_set_timings(struct omap_dss_device *dssdev,
- const struct drm_display_mode *mode)
-{
- struct venc_device *venc = dssdev_to_venc(dssdev);
- enum venc_videomode venc_mode = venc_get_videomode(mode);
-
- DSSDBG("venc_set_timings\n");
-
- mutex_lock(&venc->venc_lock);
-
- switch (venc_mode) {
- default:
- WARN_ON_ONCE(1);
- /* Fall-through */
- case VENC_MODE_PAL:
- venc->config = &venc_config_pal_trm;
- break;
-
- case VENC_MODE_NTSC:
- venc->config = &venc_config_ntsc_trm;
- break;
- }
-
- dispc_set_tv_pclk(venc->dss->dispc, 13500000);
-
- mutex_unlock(&venc->venc_lock);
-}
-
-static int venc_check_timings(struct omap_dss_device *dssdev,
- struct drm_display_mode *mode)
-{
- DSSDBG("venc_check_timings\n");
-
- switch (venc_get_videomode(mode)) {
- case VENC_MODE_PAL:
- drm_mode_copy(mode, &omap_dss_pal_mode);
- break;
-
- case VENC_MODE_NTSC:
- drm_mode_copy(mode, &omap_dss_ntsc_mode);
- break;
-
- default:
- return -EINVAL;
- }
-
- drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
- drm_mode_set_name(mode);
- return 0;
-}
-
static int venc_dump_regs(struct seq_file *s, void *p)
{
struct venc_device *venc = s->private;
@@ -673,31 +573,149 @@ static int venc_get_clocks(struct venc_device *venc)
return 0;
}
-static int venc_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
+/* -----------------------------------------------------------------------------
+ * DRM Bridge Operations
+ */
+
+static int venc_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct venc_device *venc = drm_bridge_to_venc(bridge);
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+ return -EINVAL;
+
+ return drm_bridge_attach(bridge->encoder, venc->output.next_bridge,
+ bridge, flags);
+}
+
+static enum drm_mode_status
+venc_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode)
+{
+ switch (venc_get_videomode(mode)) {
+ case VENC_MODE_PAL:
+ case VENC_MODE_NTSC:
+ return MODE_OK;
+
+ default:
+ return MODE_BAD;
+ }
+}
+
+static bool venc_bridge_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ const struct drm_display_mode *venc_mode;
+
+ switch (venc_get_videomode(adjusted_mode)) {
+ case VENC_MODE_PAL:
+ venc_mode = &omap_dss_pal_mode;
+ break;
+
+ case VENC_MODE_NTSC:
+ venc_mode = &omap_dss_ntsc_mode;
+ break;
+
+ default:
+ return false;
+ }
+
+ drm_mode_copy(adjusted_mode, venc_mode);
+ drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+ drm_mode_set_name(adjusted_mode);
+
+ return true;
+}
+
+static void venc_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct venc_device *venc = drm_bridge_to_venc(bridge);
+ enum venc_videomode venc_mode = venc_get_videomode(adjusted_mode);
+
+ switch (venc_mode) {
+ default:
+ WARN_ON_ONCE(1);
+ /* Fall-through */
+ case VENC_MODE_PAL:
+ venc->config = &venc_config_pal_trm;
+ break;
+
+ case VENC_MODE_NTSC:
+ venc->config = &venc_config_ntsc_trm;
+ break;
+ }
+
+ dispc_set_tv_pclk(venc->dss->dispc, 13500000);
+}
+
+static void venc_bridge_enable(struct drm_bridge *bridge)
{
- return omapdss_device_connect(dst->dss, dst, dst->next);
+ struct venc_device *venc = drm_bridge_to_venc(bridge);
+
+ venc_power_on(venc);
}
-static void venc_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
+static void venc_bridge_disable(struct drm_bridge *bridge)
{
- omapdss_device_disconnect(dst, dst->next);
+ struct venc_device *venc = drm_bridge_to_venc(bridge);
+
+ venc_power_off(venc);
}
-static const struct omap_dss_device_ops venc_ops = {
- .connect = venc_connect,
- .disconnect = venc_disconnect,
+static int venc_bridge_get_modes(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ static const struct drm_display_mode *modes[] = {
+ &omap_dss_pal_mode,
+ &omap_dss_ntsc_mode,
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(modes); ++i) {
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, modes[i]);
+ if (!mode)
+ return i;
- .enable = venc_display_enable,
- .disable = venc_display_disable,
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+ }
- .check_timings = venc_check_timings,
- .set_timings = venc_set_timings,
+ return ARRAY_SIZE(modes);
+}
- .get_modes = venc_get_modes,
+static const struct drm_bridge_funcs venc_bridge_funcs = {
+ .attach = venc_bridge_attach,
+ .mode_valid = venc_bridge_mode_valid,
+ .mode_fixup = venc_bridge_mode_fixup,
+ .mode_set = venc_bridge_mode_set,
+ .enable = venc_bridge_enable,
+ .disable = venc_bridge_disable,
+ .get_modes = venc_bridge_get_modes,
};
+static void venc_bridge_init(struct venc_device *venc)
+{
+ venc->bridge.funcs = &venc_bridge_funcs;
+ venc->bridge.of_node = venc->pdev->dev.of_node;
+ venc->bridge.ops = DRM_BRIDGE_OP_MODES;
+ venc->bridge.type = DRM_MODE_CONNECTOR_SVIDEO;
+ venc->bridge.interlace_allowed = true;
+
+ drm_bridge_add(&venc->bridge);
+}
+
+static void venc_bridge_cleanup(struct venc_device *venc)
+{
+ drm_bridge_remove(&venc->bridge);
+}
+
/* -----------------------------------------------------------------------------
* Component Bind & Unbind
*/
@@ -747,19 +765,22 @@ static int venc_init_output(struct venc_device *venc)
struct omap_dss_device *out = &venc->output;
int r;
+ venc_bridge_init(venc);
+
out->dev = &venc->pdev->dev;
out->id = OMAP_DSS_OUTPUT_VENC;
out->type = OMAP_DISPLAY_TYPE_VENC;
out->name = "venc.0";
out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
- out->ops = &venc_ops;
out->owner = THIS_MODULE;
- out->of_ports = BIT(0);
+ out->of_port = 0;
out->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
- r = omapdss_device_init_output(out);
- if (r < 0)
+ r = omapdss_device_init_output(out, &venc->bridge);
+ if (r < 0) {
+ venc_bridge_cleanup(venc);
return r;
+ }
omapdss_device_register(out);
@@ -770,6 +791,8 @@ static void venc_uninit_output(struct venc_device *venc)
{
omapdss_device_unregister(&venc->output);
omapdss_device_cleanup_output(&venc->output);
+
+ venc_bridge_cleanup(venc);
}
static int venc_probe_of(struct venc_device *venc)
@@ -839,8 +862,6 @@ static int venc_probe(struct platform_device *pdev)
if (soc_device_match(venc_soc_devices))
venc->requires_tv_dac_clk = true;
- mutex_init(&venc->venc_lock);
-
venc->config = &venc_config_pal_trm;
venc_mem = platform_get_resource(venc->pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index 94cded387174..528764566b17 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -6,7 +6,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
#include "omap_drv.h"
@@ -20,124 +19,12 @@
struct omap_connector {
struct drm_connector base;
struct omap_dss_device *output;
- struct omap_dss_device *hpd;
- bool hdmi_mode;
};
-static void omap_connector_hpd_notify(struct drm_connector *connector,
- enum drm_connector_status status)
-{
- struct omap_connector *omap_connector = to_omap_connector(connector);
- struct omap_dss_device *dssdev;
-
- if (status != connector_status_disconnected)
- return;
-
- /*
- * Notify all devics in the pipeline of disconnection. This is required
- * to let the HDMI encoders reset their internal state related to
- * connection status, such as the CEC address.
- */
- for (dssdev = omap_connector->output; dssdev; dssdev = dssdev->next) {
- if (dssdev->ops && dssdev->ops->hdmi.lost_hotplug)
- dssdev->ops->hdmi.lost_hotplug(dssdev);
- }
-}
-
-static void omap_connector_hpd_cb(void *cb_data,
- enum drm_connector_status status)
-{
- struct omap_connector *omap_connector = cb_data;
- struct drm_connector *connector = &omap_connector->base;
- struct drm_device *dev = connector->dev;
- enum drm_connector_status old_status;
-
- mutex_lock(&dev->mode_config.mutex);
- old_status = connector->status;
- connector->status = status;
- mutex_unlock(&dev->mode_config.mutex);
-
- if (old_status == status)
- return;
-
- omap_connector_hpd_notify(connector, status);
-
- drm_kms_helper_hotplug_event(dev);
-}
-
-void omap_connector_enable_hpd(struct drm_connector *connector)
-{
- struct omap_connector *omap_connector = to_omap_connector(connector);
- struct omap_dss_device *hpd = omap_connector->hpd;
-
- if (hpd)
- hpd->ops->register_hpd_cb(hpd, omap_connector_hpd_cb,
- omap_connector);
-}
-
-void omap_connector_disable_hpd(struct drm_connector *connector)
-{
- struct omap_connector *omap_connector = to_omap_connector(connector);
- struct omap_dss_device *hpd = omap_connector->hpd;
-
- if (hpd)
- hpd->ops->unregister_hpd_cb(hpd);
-}
-
-bool omap_connector_get_hdmi_mode(struct drm_connector *connector)
-{
- struct omap_connector *omap_connector = to_omap_connector(connector);
-
- return omap_connector->hdmi_mode;
-}
-
-static struct omap_dss_device *
-omap_connector_find_device(struct drm_connector *connector,
- enum omap_dss_device_ops_flag op)
-{
- struct omap_connector *omap_connector = to_omap_connector(connector);
- struct omap_dss_device *dssdev = NULL;
- struct omap_dss_device *d;
-
- for (d = omap_connector->output; d; d = d->next) {
- if (d->ops_flags & op)
- dssdev = d;
- }
-
- return dssdev;
-}
-
static enum drm_connector_status omap_connector_detect(
struct drm_connector *connector, bool force)
{
- struct omap_dss_device *dssdev;
- enum drm_connector_status status;
-
- dssdev = omap_connector_find_device(connector,
- OMAP_DSS_DEVICE_OP_DETECT);
-
- if (dssdev) {
- status = dssdev->ops->detect(dssdev)
- ? connector_status_connected
- : connector_status_disconnected;
-
- omap_connector_hpd_notify(connector, status);
- } else {
- switch (connector->connector_type) {
- case DRM_MODE_CONNECTOR_DPI:
- case DRM_MODE_CONNECTOR_LVDS:
- case DRM_MODE_CONNECTOR_DSI:
- status = connector_status_connected;
- break;
- default:
- status = connector_status_unknown;
- break;
- }
- }
-
- VERB("%s: %d (force=%d)", connector->name, status, force);
-
- return status;
+ return connector_status_connected;
}
static void omap_connector_destroy(struct drm_connector *connector)
@@ -146,14 +33,6 @@ static void omap_connector_destroy(struct drm_connector *connector)
DBG("%s", connector->name);
- if (omap_connector->hpd) {
- struct omap_dss_device *hpd = omap_connector->hpd;
-
- hpd->ops->unregister_hpd_cb(hpd);
- omapdss_device_put(hpd);
- omap_connector->hpd = NULL;
- }
-
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
@@ -162,81 +41,27 @@ static void omap_connector_destroy(struct drm_connector *connector)
kfree(omap_connector);
}
-#define MAX_EDID 512
-
-static int omap_connector_get_modes_edid(struct drm_connector *connector,
- struct omap_dss_device *dssdev)
-{
- struct omap_connector *omap_connector = to_omap_connector(connector);
- enum drm_connector_status status;
- void *edid;
- int n;
-
- status = omap_connector_detect(connector, false);
- if (status != connector_status_connected)
- goto no_edid;
-
- edid = kzalloc(MAX_EDID, GFP_KERNEL);
- if (!edid)
- goto no_edid;
-
- if (dssdev->ops->read_edid(dssdev, edid, MAX_EDID) <= 0 ||
- !drm_edid_is_valid(edid)) {
- kfree(edid);
- goto no_edid;
- }
-
- drm_connector_update_edid_property(connector, edid);
- n = drm_add_edid_modes(connector, edid);
-
- omap_connector->hdmi_mode = drm_detect_hdmi_monitor(edid);
-
- kfree(edid);
- return n;
-
-no_edid:
- drm_connector_update_edid_property(connector, NULL);
- return 0;
-}
-
static int omap_connector_get_modes(struct drm_connector *connector)
{
struct omap_connector *omap_connector = to_omap_connector(connector);
- struct omap_dss_device *dssdev;
+ struct omap_dss_device *dssdev = NULL;
+ struct omap_dss_device *d;
DBG("%s", connector->name);
/*
- * If display exposes EDID, then we parse that in the normal way to
- * build table of supported modes.
+ * If the display pipeline reports modes (e.g. with a fixed resolution
+ * panel or an analog TV output), query it.
*/
- dssdev = omap_connector_find_device(connector,
- OMAP_DSS_DEVICE_OP_EDID);
- if (dssdev)
- return omap_connector_get_modes_edid(connector, dssdev);
+ for (d = omap_connector->output; d; d = d->next) {
+ if (d->ops_flags & OMAP_DSS_DEVICE_OP_MODES)
+ dssdev = d;
+ }
- /*
- * Otherwise if the display pipeline reports modes (e.g. with a fixed
- * resolution panel or an analog TV output), query it.
- */
- dssdev = omap_connector_find_device(connector,
- OMAP_DSS_DEVICE_OP_MODES);
if (dssdev)
return dssdev->ops->get_modes(dssdev, connector);
- /*
- * Otherwise if the display pipeline uses a drm_panel, we delegate the
- * operation to the panel API.
- */
- if (omap_connector->output->panel)
- return drm_panel_get_modes(omap_connector->output->panel,
- connector);
-
- /*
- * We can't retrieve modes, which can happen for instance for a DVI or
- * VGA output with the DDC bus unconnected. The KMS core will add the
- * default modes.
- */
+ /* We can't retrieve modes. The KMS core will add the default modes. */
return 0;
}
@@ -249,7 +74,7 @@ enum drm_mode_status omap_connector_mode_fixup(struct omap_dss_device *dssdev,
drm_mode_copy(adjusted_mode, mode);
for (; dssdev; dssdev = dssdev->next) {
- if (!dssdev->ops->check_timings)
+ if (!dssdev->ops || !dssdev->ops->check_timings)
continue;
ret = dssdev->ops->check_timings(dssdev, adjusted_mode);
@@ -298,35 +123,6 @@ static const struct drm_connector_helper_funcs omap_connector_helper_funcs = {
.mode_valid = omap_connector_mode_valid,
};
-static int omap_connector_get_type(struct omap_dss_device *output)
-{
- struct omap_dss_device *display;
- enum omap_display_type type;
-
- display = omapdss_display_get(output);
- type = display->type;
- omapdss_device_put(display);
-
- switch (type) {
- case OMAP_DISPLAY_TYPE_HDMI:
- return DRM_MODE_CONNECTOR_HDMIA;
- case OMAP_DISPLAY_TYPE_DVI:
- return DRM_MODE_CONNECTOR_DVID;
- case OMAP_DISPLAY_TYPE_DSI:
- return DRM_MODE_CONNECTOR_DSI;
- case OMAP_DISPLAY_TYPE_DPI:
- case OMAP_DISPLAY_TYPE_DBI:
- return DRM_MODE_CONNECTOR_DPI;
- case OMAP_DISPLAY_TYPE_VENC:
- /* TODO: This could also be composite */
- return DRM_MODE_CONNECTOR_SVIDEO;
- case OMAP_DISPLAY_TYPE_SDI:
- return DRM_MODE_CONNECTOR_LVDS;
- default:
- return DRM_MODE_CONNECTOR_Unknown;
- }
-}
-
/* initialize connector */
struct drm_connector *omap_connector_init(struct drm_device *dev,
struct omap_dss_device *output,
@@ -334,7 +130,6 @@ struct drm_connector *omap_connector_init(struct drm_device *dev,
{
struct drm_connector *connector = NULL;
struct omap_connector *omap_connector;
- struct omap_dss_device *dssdev;
DBG("%s", output->name);
@@ -349,27 +144,9 @@ struct drm_connector *omap_connector_init(struct drm_device *dev,
connector->doublescan_allowed = 0;
drm_connector_init(dev, connector, &omap_connector_funcs,
- omap_connector_get_type(output));
+ DRM_MODE_CONNECTOR_DSI);
drm_connector_helper_add(connector, &omap_connector_helper_funcs);
- /*
- * Initialize connector status handling. First try to find a device that
- * supports hot-plug reporting. If it fails, fall back to a device that
- * support polling. If that fails too, we don't support hot-plug
- * detection at all.
- */
- dssdev = omap_connector_find_device(connector, OMAP_DSS_DEVICE_OP_HPD);
- if (dssdev) {
- omap_connector->hpd = omapdss_device_get(dssdev);
- connector->polled = DRM_CONNECTOR_POLL_HPD;
- } else {
- dssdev = omap_connector_find_device(connector,
- OMAP_DSS_DEVICE_OP_DETECT);
- if (dssdev)
- connector->polled = DRM_CONNECTOR_POLL_CONNECT |
- DRM_CONNECTOR_POLL_DISCONNECT;
- }
-
return connector;
fail:
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.h b/drivers/gpu/drm/omapdrm/omap_connector.h
index 13607bda33d8..0ecd4f1655b7 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.h
+++ b/drivers/gpu/drm/omapdrm/omap_connector.h
@@ -21,9 +21,6 @@ struct omap_dss_device;
struct drm_connector *omap_connector_init(struct drm_device *dev,
struct omap_dss_device *output,
struct drm_encoder *encoder);
-bool omap_connector_get_hdmi_mode(struct drm_connector *connector);
-void omap_connector_enable_hpd(struct drm_connector *connector);
-void omap_connector_disable_hpd(struct drm_connector *connector);
enum drm_mode_status omap_connector_mode_fixup(struct omap_dss_device *dssdev,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 3c5ddbf30e97..fce7e944a280 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -831,7 +831,7 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
* OMAP_DSS_CHANNEL_DIGIT. X server assumes 256 element gamma
* tables so lets use that. Size of HW gamma table can be
* extracted with dispc_mgr_gamma_size(). If it returns 0
- * gamma table is not supprted.
+ * gamma table is not supported.
*/
if (priv->dispc_ops->mgr_gamma_size(priv->dispc, channel)) {
unsigned int gamma_lut_size = 256;
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index 252f5ebb1acc..42ec51bb7b1b 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -82,12 +82,11 @@ static const u32 reg[][4] = {
static int dmm_dma_copy(struct dmm *dmm, dma_addr_t src, dma_addr_t dst)
{
- struct dma_device *dma_dev = dmm->wa_dma_chan->device;
struct dma_async_tx_descriptor *tx;
enum dma_status status;
dma_cookie_t cookie;
- tx = dma_dev->device_prep_dma_memcpy(dmm->wa_dma_chan, dst, src, 4, 0);
+ tx = dmaengine_prep_dma_memcpy(dmm->wa_dma_chan, dst, src, 4, 0);
if (!tx) {
dev_err(dmm->dev, "Failed to prepare DMA memcpy\n");
return -EIO;
@@ -99,7 +98,6 @@ static int dmm_dma_copy(struct dmm *dmm, dma_addr_t src, dma_addr_t dst)
return -EIO;
}
- dma_async_issue_pending(dmm->wa_dma_chan);
status = dma_sync_wait(dmm->wa_dma_chan, cookie);
if (status != DMA_COMPLETE)
dev_err(dmm->dev, "i878 wa DMA copy failure\n");
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index d2750f60f519..cdafd7ef1c32 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -12,6 +12,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
@@ -134,9 +135,6 @@ static void omap_disconnect_pipelines(struct drm_device *ddev)
for (i = 0; i < priv->num_pipes; i++) {
struct omap_drm_pipeline *pipe = &priv->pipes[i];
- if (pipe->output->panel)
- drm_panel_detach(pipe->output->panel);
-
omapdss_device_disconnect(NULL, pipe->output);
omapdss_device_put(pipe->output);
@@ -209,11 +207,12 @@ static int omap_display_id(struct omap_dss_device *output)
struct device_node *node = NULL;
if (output->next) {
- struct omap_dss_device *display;
+ struct omap_dss_device *display = output;
+
+ while (display->next)
+ display = display->next;
- display = omapdss_display_get(output);
node = display->dev->of_node;
- omapdss_device_put(display);
} else if (output->bridge) {
struct drm_bridge *bridge = output->bridge;
@@ -221,8 +220,6 @@ static int omap_display_id(struct omap_dss_device *output)
bridge = drm_bridge_get_next_bridge(bridge);
node = bridge->of_node;
- } else if (output->panel) {
- node = output->panel->dev->of_node;
}
return node ? of_alias_get_id(node, "display") : -ENODEV;
@@ -297,9 +294,14 @@ static int omap_modeset_init(struct drm_device *dev)
if (pipe->output->bridge) {
ret = drm_bridge_attach(pipe->encoder,
- pipe->output->bridge, NULL);
- if (ret < 0)
+ pipe->output->bridge, NULL,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret < 0) {
+ dev_err(priv->dev,
+ "unable to attach bridge %pOF\n",
+ pipe->output->bridge->of_node);
return ret;
+ }
}
id = omap_display_id(pipe->output);
@@ -330,20 +332,28 @@ static int omap_modeset_init(struct drm_device *dev)
struct drm_encoder *encoder = pipe->encoder;
struct drm_crtc *crtc;
- if (!pipe->output->bridge) {
+ if (pipe->output->next) {
pipe->connector = omap_connector_init(dev, pipe->output,
encoder);
if (!pipe->connector)
return -ENOMEM;
+ } else {
+ pipe->connector = drm_bridge_connector_init(dev, encoder);
+ if (IS_ERR(pipe->connector)) {
+ dev_err(priv->dev,
+ "unable to create bridge connector for %s\n",
+ pipe->output->name);
+ return PTR_ERR(pipe->connector);
+ }
+ }
- drm_connector_attach_encoder(pipe->connector, encoder);
+ drm_connector_attach_encoder(pipe->connector, encoder);
- if (pipe->output->panel) {
- ret = drm_panel_attach(pipe->output->panel,
- pipe->connector);
- if (ret < 0)
- return ret;
- }
+ if (pipe->output->panel) {
+ ret = drm_panel_attach(pipe->output->panel,
+ pipe->connector);
+ if (ret < 0)
+ return ret;
}
crtc = omap_crtc_init(dev, pipe, priv->planes[i]);
@@ -382,6 +392,23 @@ static int omap_modeset_init(struct drm_device *dev)
return 0;
}
+static void omap_modeset_fini(struct drm_device *ddev)
+{
+ struct omap_drm_private *priv = ddev->dev_private;
+ unsigned int i;
+
+ omap_drm_irq_uninstall(ddev);
+
+ for (i = 0; i < priv->num_pipes; i++) {
+ struct omap_drm_pipeline *pipe = &priv->pipes[i];
+
+ if (pipe->output->panel)
+ drm_panel_detach(pipe->output->panel);
+ }
+
+ drm_mode_config_cleanup(ddev);
+}
+
/*
* Enable the HPD in external components if supported
*/
@@ -391,8 +418,13 @@ static void omap_modeset_enable_external_hpd(struct drm_device *ddev)
unsigned int i;
for (i = 0; i < priv->num_pipes; i++) {
- if (priv->pipes[i].connector)
- omap_connector_enable_hpd(priv->pipes[i].connector);
+ struct drm_connector *connector = priv->pipes[i].connector;
+
+ if (!connector)
+ continue;
+
+ if (priv->pipes[i].output->bridge)
+ drm_bridge_connector_enable_hpd(connector);
}
}
@@ -405,8 +437,13 @@ static void omap_modeset_disable_external_hpd(struct drm_device *ddev)
unsigned int i;
for (i = 0; i < priv->num_pipes; i++) {
- if (priv->pipes[i].connector)
- omap_connector_disable_hpd(priv->pipes[i].connector);
+ struct drm_connector *connector = priv->pipes[i].connector;
+
+ if (!connector)
+ continue;
+
+ if (priv->pipes[i].output->bridge)
+ drm_bridge_connector_disable_hpd(connector);
}
}
@@ -629,8 +666,7 @@ err_cleanup_helpers:
omap_fbdev_fini(ddev);
err_cleanup_modeset:
- drm_mode_config_cleanup(ddev);
- omap_drm_irq_uninstall(ddev);
+ omap_modeset_fini(ddev);
err_gem_deinit:
omap_gem_deinit(ddev);
destroy_workqueue(priv->wq);
@@ -655,9 +691,7 @@ static void omapdrm_cleanup(struct omap_drm_private *priv)
drm_atomic_helper_shutdown(ddev);
- drm_mode_config_cleanup(ddev);
-
- omap_drm_irq_uninstall(ddev);
+ omap_modeset_fini(ddev);
omap_gem_deinit(ddev);
destroy_workqueue(priv->wq);
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 4f2165a37795..ae4b867a67a3 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -10,7 +10,6 @@
#include <drm/drm_crtc.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_edid.h>
-#include <drm/drm_panel.h>
#include "omap_drv.h"
@@ -70,30 +69,6 @@ static void omap_encoder_update_videomode_flags(struct videomode *vm,
}
}
-static void omap_encoder_hdmi_mode_set(struct drm_connector *connector,
- struct drm_encoder *encoder,
- struct drm_display_mode *adjusted_mode)
-{
- struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- struct omap_dss_device *dssdev = omap_encoder->output;
- bool hdmi_mode;
-
- hdmi_mode = omap_connector_get_hdmi_mode(connector);
-
- if (dssdev->ops->hdmi.set_hdmi_mode)
- dssdev->ops->hdmi.set_hdmi_mode(dssdev, hdmi_mode);
-
- if (hdmi_mode && dssdev->ops->hdmi.set_infoframe) {
- struct hdmi_avi_infoframe avi;
- int r;
-
- r = drm_hdmi_avi_infoframe_from_display_mode(&avi, connector,
- adjusted_mode);
- if (r == 0)
- dssdev->ops->hdmi.set_infoframe(dssdev, &avi);
- }
-}
-
static void omap_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -138,17 +113,8 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
bus_flags = connector->display_info.bus_flags;
omap_encoder_update_videomode_flags(&vm, bus_flags);
- /* Set timings for all devices in the display pipeline. */
+ /* Set timings for the dss manager. */
dss_mgr_set_timings(output, &vm);
-
- for (dssdev = output; dssdev; dssdev = dssdev->next) {
- if (dssdev->ops->set_timings)
- dssdev->ops->set_timings(dssdev, adjusted_mode);
- }
-
- /* Set the HDMI mode and HDMI infoframe if applicable. */
- if (output->type == OMAP_DISPLAY_TYPE_HDMI)
- omap_encoder_hdmi_mode_set(connector, encoder, adjusted_mode);
}
static void omap_encoder_disable(struct drm_encoder *encoder)
@@ -159,33 +125,12 @@ static void omap_encoder_disable(struct drm_encoder *encoder)
dev_dbg(dev->dev, "disable(%s)\n", dssdev->name);
- /* Disable the panel if present. */
- if (dssdev->panel) {
- drm_panel_disable(dssdev->panel);
- drm_panel_unprepare(dssdev->panel);
- }
-
/*
* Disable the chain of external devices, starting at the one at the
- * internal encoder's output.
+ * internal encoder's output. This is used for DSI outputs only, as
+ * dssdev->next is NULL for all other outputs.
*/
omapdss_device_disable(dssdev->next);
-
- /*
- * Disable the internal encoder. This will disable the DSS output. The
- * DSI is treated as an exception as DSI pipelines still use the legacy
- * flow where the pipeline output controls the encoder.
- */
- if (dssdev->type != OMAP_DISPLAY_TYPE_DSI) {
- dssdev->ops->disable(dssdev);
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
- }
-
- /*
- * Perform the post-disable operations on the chain of external devices
- * to complete the display pipeline disable.
- */
- omapdss_device_post_disable(dssdev->next);
}
static void omap_encoder_enable(struct drm_encoder *encoder)
@@ -196,30 +141,12 @@ static void omap_encoder_enable(struct drm_encoder *encoder)
dev_dbg(dev->dev, "enable(%s)\n", dssdev->name);
- /* Prepare the chain of external devices for pipeline enable. */
- omapdss_device_pre_enable(dssdev->next);
-
- /*
- * Enable the internal encoder. This will enable the DSS output. The
- * DSI is treated as an exception as DSI pipelines still use the legacy
- * flow where the pipeline output controls the encoder.
- */
- if (dssdev->type != OMAP_DISPLAY_TYPE_DSI) {
- dssdev->ops->enable(dssdev);
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
- }
-
/*
* Enable the chain of external devices, starting at the one at the
- * internal encoder's output.
+ * internal encoder's output. This is used for DSI outputs only, as
+ * dssdev->next is NULL for all other outputs.
*/
omapdss_device_enable(dssdev->next);
-
- /* Enable the panel if present. */
- if (dssdev->panel) {
- drm_panel_prepare(dssdev->panel);
- drm_panel_enable(dssdev->panel);
- }
}
static int omap_encoder_atomic_check(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index b06e5cbfd03a..09a84919ef73 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -242,14 +242,10 @@ void omap_fbdev_init(struct drm_device *dev)
drm_fb_helper_prepare(dev, helper, &omap_fb_helper_funcs);
- ret = drm_fb_helper_init(dev, helper, priv->num_pipes);
+ ret = drm_fb_helper_init(dev, helper);
if (ret)
goto fail;
- ret = drm_fb_helper_single_add_all_connectors(helper);
- if (ret)
- goto fini;
-
ret = drm_fb_helper_initial_config(helper, 32);
if (ret)
goto fini;
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index ae44ac2ec106..a1723c1b5fbf 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -29,6 +29,15 @@ config DRM_PANEL_BOE_HIMAX8279D
24 bit RGB per pixel. It provides a MIPI DSI interface to
the host and has a built-in LED backlight.
+config DRM_PANEL_BOE_TV101WUM_NL6
+ tristate "BOE TV101WUM and AUO KD101N80 45NA 1200x1920 panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to support for BOE TV101WUM and AUO KD101N80
+ 45NA WUXGA PANEL DSI Video Mode panel
+
config DRM_PANEL_LVDS
tristate "Generic LVDS panel driver"
depends on OF
@@ -50,6 +59,25 @@ config DRM_PANEL_SIMPLE
that it can be automatically turned off when the panel goes into a
low power state.
+config DRM_PANEL_ELIDA_KD35T133
+ tristate "Elida KD35T133 panel driver"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for the Elida
+ KD35T133 controller for 320x480 LCD panels with MIPI-DSI
+ system interfaces.
+
+config DRM_PANEL_FEIXIN_K101_IM2BA02
+ tristate "Feixin K101 IM2BA02 panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for the Feixin K101 IM2BA02
+ 4-lane 800x1280 MIPI DSI panel.
+
config DRM_PANEL_FEIYANG_FY07024DI26A30D
tristate "Feiyang FY07024DI26A30-D MIPI-DSI LCD panel"
depends on OF
@@ -149,6 +177,16 @@ config DRM_PANEL_NEC_NL8048HL11
panel (found on the Zoom2/3/3630 SDP boards). To compile this driver
as a module, choose M here.
+config DRM_PANEL_NOVATEK_NT35510
+ tristate "Novatek NT35510 RGB panel driver"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for the panels built
+ around the Novatek NT35510 display controller, such as some
+ Hydis panels.
+
config DRM_PANEL_NOVATEK_NT39016
tristate "Novatek NT39016 RGB/SPI panel"
depends on OF && SPI
@@ -275,6 +313,12 @@ config DRM_PANEL_SAMSUNG_S6E63M0
Say Y here if you want to enable support for Samsung S6E63M0
AMOLED LCD panel.
+config DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01
+ tristate "Samsung AMS452EF01 panel with S6E88A0 DSI video mode controller"
+ depends on OF
+ select DRM_MIPI_DSI
+ select VIDEOMODE_HELPERS
+
config DRM_PANEL_SAMSUNG_S6E8AA0
tristate "Samsung S6E8AA0 DSI video mode panel"
depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 7c4d3c581fd4..96a883cd6630 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -1,8 +1,11 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DRM_PANEL_ARM_VERSATILE) += panel-arm-versatile.o
obj-$(CONFIG_DRM_PANEL_BOE_HIMAX8279D) += panel-boe-himax8279d.o
+obj-$(CONFIG_DRM_PANEL_BOE_TV101WUM_NL6) += panel-boe-tv101wum-nl6.o
obj-$(CONFIG_DRM_PANEL_LVDS) += panel-lvds.o
obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
+obj-$(CONFIG_DRM_PANEL_ELIDA_KD35T133) += panel-elida-kd35t133.o
+obj-$(CONFIG_DRM_PANEL_FEIXIN_K101_IM2BA02) += panel-feixin-k101-im2ba02.o
obj-$(CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D) += panel-feiyang-fy07024di26a30d.o
obj-$(CONFIG_DRM_PANEL_ILITEK_IL9322) += panel-ilitek-ili9322.o
obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o
@@ -13,6 +16,7 @@ obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK500HD1829) += panel-leadtek-ltk500hd1829.o
obj-$(CONFIG_DRM_PANEL_LG_LB035Q02) += panel-lg-lb035q02.o
obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
obj-$(CONFIG_DRM_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o
+obj-$(CONFIG_DRM_PANEL_NOVATEK_NT35510) += panel-novatek-nt35510.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT39016) += panel-novatek-nt39016.o
obj-$(CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO) += panel-olimex-lcd-olinuxino.o
obj-$(CONFIG_DRM_PANEL_ORISETECH_OTM8009A) += panel-orisetech-otm8009a.o
@@ -28,6 +32,7 @@ obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D16D0) += panel-samsung-s6d16d0.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03) += panel-samsung-s6e63j0x03.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0) += panel-samsung-s6e63m0.o
+obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01) += panel-samsung-s6e88a0-ams452ef01.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o
obj-$(CONFIG_DRM_PANEL_SEIKO_43WVF1G) += panel-seiko-43wvf1g.o
obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
new file mode 100644
index 000000000000..48a164257d18
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
@@ -0,0 +1,854 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Jitao Shi <[email protected]>
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+struct panel_desc {
+ const struct drm_display_mode *modes;
+ unsigned int bpc;
+
+ /**
+ * @width_mm: width of the panel's active display area
+ * @height_mm: height of the panel's active display area
+ */
+ struct {
+ unsigned int width_mm;
+ unsigned int height_mm;
+ } size;
+
+ unsigned long mode_flags;
+ enum mipi_dsi_pixel_format format;
+ const struct panel_init_cmd *init_cmds;
+ unsigned int lanes;
+ bool discharge_on_disable;
+};
+
+struct boe_panel {
+ struct drm_panel base;
+ struct mipi_dsi_device *dsi;
+
+ const struct panel_desc *desc;
+
+ struct regulator *pp1800;
+ struct regulator *avee;
+ struct regulator *avdd;
+ struct gpio_desc *enable_gpio;
+
+ bool prepared;
+};
+
+enum dsi_cmd_type {
+ INIT_DCS_CMD,
+ DELAY_CMD,
+};
+
+struct panel_init_cmd {
+ enum dsi_cmd_type type;
+ size_t len;
+ const char *data;
+};
+
+#define _INIT_DCS_CMD(...) { \
+ .type = INIT_DCS_CMD, \
+ .len = sizeof((char[]){__VA_ARGS__}), \
+ .data = (char[]){__VA_ARGS__} }
+
+#define _INIT_DELAY_CMD(...) { \
+ .type = DELAY_CMD,\
+ .len = sizeof((char[]){__VA_ARGS__}), \
+ .data = (char[]){__VA_ARGS__} }
+
+static const struct panel_init_cmd boe_init_cmd[] = {
+ _INIT_DELAY_CMD(24),
+ _INIT_DCS_CMD(0xB0, 0x05),
+ _INIT_DCS_CMD(0xB1, 0xE5),
+ _INIT_DCS_CMD(0xB3, 0x52),
+ _INIT_DCS_CMD(0xB0, 0x00),
+ _INIT_DCS_CMD(0xB3, 0x88),
+ _INIT_DCS_CMD(0xB0, 0x04),
+ _INIT_DCS_CMD(0xB8, 0x00),
+ _INIT_DCS_CMD(0xB0, 0x00),
+ _INIT_DCS_CMD(0xB6, 0x03),
+ _INIT_DCS_CMD(0xBA, 0x8B),
+ _INIT_DCS_CMD(0xBF, 0x1A),
+ _INIT_DCS_CMD(0xC0, 0x0F),
+ _INIT_DCS_CMD(0xC2, 0x0C),
+ _INIT_DCS_CMD(0xC3, 0x02),
+ _INIT_DCS_CMD(0xC4, 0x0C),
+ _INIT_DCS_CMD(0xC5, 0x02),
+ _INIT_DCS_CMD(0xB0, 0x01),
+ _INIT_DCS_CMD(0xE0, 0x26),
+ _INIT_DCS_CMD(0xE1, 0x26),
+ _INIT_DCS_CMD(0xDC, 0x00),
+ _INIT_DCS_CMD(0xDD, 0x00),
+ _INIT_DCS_CMD(0xCC, 0x26),
+ _INIT_DCS_CMD(0xCD, 0x26),
+ _INIT_DCS_CMD(0xC8, 0x00),
+ _INIT_DCS_CMD(0xC9, 0x00),
+ _INIT_DCS_CMD(0xD2, 0x03),
+ _INIT_DCS_CMD(0xD3, 0x03),
+ _INIT_DCS_CMD(0xE6, 0x04),
+ _INIT_DCS_CMD(0xE7, 0x04),
+ _INIT_DCS_CMD(0xC4, 0x09),
+ _INIT_DCS_CMD(0xC5, 0x09),
+ _INIT_DCS_CMD(0xD8, 0x0A),
+ _INIT_DCS_CMD(0xD9, 0x0A),
+ _INIT_DCS_CMD(0xC2, 0x0B),
+ _INIT_DCS_CMD(0xC3, 0x0B),
+ _INIT_DCS_CMD(0xD6, 0x0C),
+ _INIT_DCS_CMD(0xD7, 0x0C),
+ _INIT_DCS_CMD(0xC0, 0x05),
+ _INIT_DCS_CMD(0xC1, 0x05),
+ _INIT_DCS_CMD(0xD4, 0x06),
+ _INIT_DCS_CMD(0xD5, 0x06),
+ _INIT_DCS_CMD(0xCA, 0x07),
+ _INIT_DCS_CMD(0xCB, 0x07),
+ _INIT_DCS_CMD(0xDE, 0x08),
+ _INIT_DCS_CMD(0xDF, 0x08),
+ _INIT_DCS_CMD(0xB0, 0x02),
+ _INIT_DCS_CMD(0xC0, 0x00),
+ _INIT_DCS_CMD(0xC1, 0x0D),
+ _INIT_DCS_CMD(0xC2, 0x17),
+ _INIT_DCS_CMD(0xC3, 0x26),
+ _INIT_DCS_CMD(0xC4, 0x31),
+ _INIT_DCS_CMD(0xC5, 0x1C),
+ _INIT_DCS_CMD(0xC6, 0x2C),
+ _INIT_DCS_CMD(0xC7, 0x33),
+ _INIT_DCS_CMD(0xC8, 0x31),
+ _INIT_DCS_CMD(0xC9, 0x37),
+ _INIT_DCS_CMD(0xCA, 0x37),
+ _INIT_DCS_CMD(0xCB, 0x37),
+ _INIT_DCS_CMD(0xCC, 0x39),
+ _INIT_DCS_CMD(0xCD, 0x2E),
+ _INIT_DCS_CMD(0xCE, 0x2F),
+ _INIT_DCS_CMD(0xCF, 0x2F),
+ _INIT_DCS_CMD(0xD0, 0x07),
+ _INIT_DCS_CMD(0xD2, 0x00),
+ _INIT_DCS_CMD(0xD3, 0x0D),
+ _INIT_DCS_CMD(0xD4, 0x17),
+ _INIT_DCS_CMD(0xD5, 0x26),
+ _INIT_DCS_CMD(0xD6, 0x31),
+ _INIT_DCS_CMD(0xD7, 0x3F),
+ _INIT_DCS_CMD(0xD8, 0x3F),
+ _INIT_DCS_CMD(0xD9, 0x3F),
+ _INIT_DCS_CMD(0xDA, 0x3F),
+ _INIT_DCS_CMD(0xDB, 0x37),
+ _INIT_DCS_CMD(0xDC, 0x37),
+ _INIT_DCS_CMD(0xDD, 0x37),
+ _INIT_DCS_CMD(0xDE, 0x39),
+ _INIT_DCS_CMD(0xDF, 0x2E),
+ _INIT_DCS_CMD(0xE0, 0x2F),
+ _INIT_DCS_CMD(0xE1, 0x2F),
+ _INIT_DCS_CMD(0xE2, 0x07),
+ _INIT_DCS_CMD(0xB0, 0x03),
+ _INIT_DCS_CMD(0xC8, 0x0B),
+ _INIT_DCS_CMD(0xC9, 0x07),
+ _INIT_DCS_CMD(0xC3, 0x00),
+ _INIT_DCS_CMD(0xE7, 0x00),
+ _INIT_DCS_CMD(0xC5, 0x2A),
+ _INIT_DCS_CMD(0xDE, 0x2A),
+ _INIT_DCS_CMD(0xCA, 0x43),
+ _INIT_DCS_CMD(0xC9, 0x07),
+ _INIT_DCS_CMD(0xE4, 0xC0),
+ _INIT_DCS_CMD(0xE5, 0x0D),
+ _INIT_DCS_CMD(0xCB, 0x00),
+ _INIT_DCS_CMD(0xB0, 0x06),
+ _INIT_DCS_CMD(0xB8, 0xA5),
+ _INIT_DCS_CMD(0xC0, 0xA5),
+ _INIT_DCS_CMD(0xC7, 0x0F),
+ _INIT_DCS_CMD(0xD5, 0x32),
+ _INIT_DCS_CMD(0xB8, 0x00),
+ _INIT_DCS_CMD(0xC0, 0x00),
+ _INIT_DCS_CMD(0xBC, 0x00),
+ _INIT_DCS_CMD(0xB0, 0x07),
+ _INIT_DCS_CMD(0xB1, 0x00),
+ _INIT_DCS_CMD(0xB2, 0x02),
+ _INIT_DCS_CMD(0xB3, 0x0F),
+ _INIT_DCS_CMD(0xB4, 0x25),
+ _INIT_DCS_CMD(0xB5, 0x39),
+ _INIT_DCS_CMD(0xB6, 0x4E),
+ _INIT_DCS_CMD(0xB7, 0x72),
+ _INIT_DCS_CMD(0xB8, 0x97),
+ _INIT_DCS_CMD(0xB9, 0xDC),
+ _INIT_DCS_CMD(0xBA, 0x22),
+ _INIT_DCS_CMD(0xBB, 0xA4),
+ _INIT_DCS_CMD(0xBC, 0x2B),
+ _INIT_DCS_CMD(0xBD, 0x2F),
+ _INIT_DCS_CMD(0xBE, 0xA9),
+ _INIT_DCS_CMD(0xBF, 0x25),
+ _INIT_DCS_CMD(0xC0, 0x61),
+ _INIT_DCS_CMD(0xC1, 0x97),
+ _INIT_DCS_CMD(0xC2, 0xB2),
+ _INIT_DCS_CMD(0xC3, 0xCD),
+ _INIT_DCS_CMD(0xC4, 0xD9),
+ _INIT_DCS_CMD(0xC5, 0xE7),
+ _INIT_DCS_CMD(0xC6, 0xF4),
+ _INIT_DCS_CMD(0xC7, 0xFA),
+ _INIT_DCS_CMD(0xC8, 0xFC),
+ _INIT_DCS_CMD(0xC9, 0x00),
+ _INIT_DCS_CMD(0xCA, 0x00),
+ _INIT_DCS_CMD(0xCB, 0x16),
+ _INIT_DCS_CMD(0xCC, 0xAF),
+ _INIT_DCS_CMD(0xCD, 0xFF),
+ _INIT_DCS_CMD(0xCE, 0xFF),
+ _INIT_DCS_CMD(0xB0, 0x08),
+ _INIT_DCS_CMD(0xB1, 0x04),
+ _INIT_DCS_CMD(0xB2, 0x05),
+ _INIT_DCS_CMD(0xB3, 0x11),
+ _INIT_DCS_CMD(0xB4, 0x24),
+ _INIT_DCS_CMD(0xB5, 0x39),
+ _INIT_DCS_CMD(0xB6, 0x4F),
+ _INIT_DCS_CMD(0xB7, 0x72),
+ _INIT_DCS_CMD(0xB8, 0x98),
+ _INIT_DCS_CMD(0xB9, 0xDC),
+ _INIT_DCS_CMD(0xBA, 0x23),
+ _INIT_DCS_CMD(0xBB, 0xA6),
+ _INIT_DCS_CMD(0xBC, 0x2C),
+ _INIT_DCS_CMD(0xBD, 0x30),
+ _INIT_DCS_CMD(0xBE, 0xAA),
+ _INIT_DCS_CMD(0xBF, 0x26),
+ _INIT_DCS_CMD(0xC0, 0x62),
+ _INIT_DCS_CMD(0xC1, 0x9B),
+ _INIT_DCS_CMD(0xC2, 0xB5),
+ _INIT_DCS_CMD(0xC3, 0xCF),
+ _INIT_DCS_CMD(0xC4, 0xDB),
+ _INIT_DCS_CMD(0xC5, 0xE8),
+ _INIT_DCS_CMD(0xC6, 0xF5),
+ _INIT_DCS_CMD(0xC7, 0xFA),
+ _INIT_DCS_CMD(0xC8, 0xFC),
+ _INIT_DCS_CMD(0xC9, 0x00),
+ _INIT_DCS_CMD(0xCA, 0x00),
+ _INIT_DCS_CMD(0xCB, 0x16),
+ _INIT_DCS_CMD(0xCC, 0xAF),
+ _INIT_DCS_CMD(0xCD, 0xFF),
+ _INIT_DCS_CMD(0xCE, 0xFF),
+ _INIT_DCS_CMD(0xB0, 0x09),
+ _INIT_DCS_CMD(0xB1, 0x04),
+ _INIT_DCS_CMD(0xB2, 0x02),
+ _INIT_DCS_CMD(0xB3, 0x16),
+ _INIT_DCS_CMD(0xB4, 0x24),
+ _INIT_DCS_CMD(0xB5, 0x3B),
+ _INIT_DCS_CMD(0xB6, 0x4F),
+ _INIT_DCS_CMD(0xB7, 0x73),
+ _INIT_DCS_CMD(0xB8, 0x99),
+ _INIT_DCS_CMD(0xB9, 0xE0),
+ _INIT_DCS_CMD(0xBA, 0x26),
+ _INIT_DCS_CMD(0xBB, 0xAD),
+ _INIT_DCS_CMD(0xBC, 0x36),
+ _INIT_DCS_CMD(0xBD, 0x3A),
+ _INIT_DCS_CMD(0xBE, 0xAE),
+ _INIT_DCS_CMD(0xBF, 0x2A),
+ _INIT_DCS_CMD(0xC0, 0x66),
+ _INIT_DCS_CMD(0xC1, 0x9E),
+ _INIT_DCS_CMD(0xC2, 0xB8),
+ _INIT_DCS_CMD(0xC3, 0xD1),
+ _INIT_DCS_CMD(0xC4, 0xDD),
+ _INIT_DCS_CMD(0xC5, 0xE9),
+ _INIT_DCS_CMD(0xC6, 0xF6),
+ _INIT_DCS_CMD(0xC7, 0xFA),
+ _INIT_DCS_CMD(0xC8, 0xFC),
+ _INIT_DCS_CMD(0xC9, 0x00),
+ _INIT_DCS_CMD(0xCA, 0x00),
+ _INIT_DCS_CMD(0xCB, 0x16),
+ _INIT_DCS_CMD(0xCC, 0xAF),
+ _INIT_DCS_CMD(0xCD, 0xFF),
+ _INIT_DCS_CMD(0xCE, 0xFF),
+ _INIT_DCS_CMD(0xB0, 0x0A),
+ _INIT_DCS_CMD(0xB1, 0x00),
+ _INIT_DCS_CMD(0xB2, 0x02),
+ _INIT_DCS_CMD(0xB3, 0x0F),
+ _INIT_DCS_CMD(0xB4, 0x25),
+ _INIT_DCS_CMD(0xB5, 0x39),
+ _INIT_DCS_CMD(0xB6, 0x4E),
+ _INIT_DCS_CMD(0xB7, 0x72),
+ _INIT_DCS_CMD(0xB8, 0x97),
+ _INIT_DCS_CMD(0xB9, 0xDC),
+ _INIT_DCS_CMD(0xBA, 0x22),
+ _INIT_DCS_CMD(0xBB, 0xA4),
+ _INIT_DCS_CMD(0xBC, 0x2B),
+ _INIT_DCS_CMD(0xBD, 0x2F),
+ _INIT_DCS_CMD(0xBE, 0xA9),
+ _INIT_DCS_CMD(0xBF, 0x25),
+ _INIT_DCS_CMD(0xC0, 0x61),
+ _INIT_DCS_CMD(0xC1, 0x97),
+ _INIT_DCS_CMD(0xC2, 0xB2),
+ _INIT_DCS_CMD(0xC3, 0xCD),
+ _INIT_DCS_CMD(0xC4, 0xD9),
+ _INIT_DCS_CMD(0xC5, 0xE7),
+ _INIT_DCS_CMD(0xC6, 0xF4),
+ _INIT_DCS_CMD(0xC7, 0xFA),
+ _INIT_DCS_CMD(0xC8, 0xFC),
+ _INIT_DCS_CMD(0xC9, 0x00),
+ _INIT_DCS_CMD(0xCA, 0x00),
+ _INIT_DCS_CMD(0xCB, 0x16),
+ _INIT_DCS_CMD(0xCC, 0xAF),
+ _INIT_DCS_CMD(0xCD, 0xFF),
+ _INIT_DCS_CMD(0xCE, 0xFF),
+ _INIT_DCS_CMD(0xB0, 0x0B),
+ _INIT_DCS_CMD(0xB1, 0x04),
+ _INIT_DCS_CMD(0xB2, 0x05),
+ _INIT_DCS_CMD(0xB3, 0x11),
+ _INIT_DCS_CMD(0xB4, 0x24),
+ _INIT_DCS_CMD(0xB5, 0x39),
+ _INIT_DCS_CMD(0xB6, 0x4F),
+ _INIT_DCS_CMD(0xB7, 0x72),
+ _INIT_DCS_CMD(0xB8, 0x98),
+ _INIT_DCS_CMD(0xB9, 0xDC),
+ _INIT_DCS_CMD(0xBA, 0x23),
+ _INIT_DCS_CMD(0xBB, 0xA6),
+ _INIT_DCS_CMD(0xBC, 0x2C),
+ _INIT_DCS_CMD(0xBD, 0x30),
+ _INIT_DCS_CMD(0xBE, 0xAA),
+ _INIT_DCS_CMD(0xBF, 0x26),
+ _INIT_DCS_CMD(0xC0, 0x62),
+ _INIT_DCS_CMD(0xC1, 0x9B),
+ _INIT_DCS_CMD(0xC2, 0xB5),
+ _INIT_DCS_CMD(0xC3, 0xCF),
+ _INIT_DCS_CMD(0xC4, 0xDB),
+ _INIT_DCS_CMD(0xC5, 0xE8),
+ _INIT_DCS_CMD(0xC6, 0xF5),
+ _INIT_DCS_CMD(0xC7, 0xFA),
+ _INIT_DCS_CMD(0xC8, 0xFC),
+ _INIT_DCS_CMD(0xC9, 0x00),
+ _INIT_DCS_CMD(0xCA, 0x00),
+ _INIT_DCS_CMD(0xCB, 0x16),
+ _INIT_DCS_CMD(0xCC, 0xAF),
+ _INIT_DCS_CMD(0xCD, 0xFF),
+ _INIT_DCS_CMD(0xCE, 0xFF),
+ _INIT_DCS_CMD(0xB0, 0x0C),
+ _INIT_DCS_CMD(0xB1, 0x04),
+ _INIT_DCS_CMD(0xB2, 0x02),
+ _INIT_DCS_CMD(0xB3, 0x16),
+ _INIT_DCS_CMD(0xB4, 0x24),
+ _INIT_DCS_CMD(0xB5, 0x3B),
+ _INIT_DCS_CMD(0xB6, 0x4F),
+ _INIT_DCS_CMD(0xB7, 0x73),
+ _INIT_DCS_CMD(0xB8, 0x99),
+ _INIT_DCS_CMD(0xB9, 0xE0),
+ _INIT_DCS_CMD(0xBA, 0x26),
+ _INIT_DCS_CMD(0xBB, 0xAD),
+ _INIT_DCS_CMD(0xBC, 0x36),
+ _INIT_DCS_CMD(0xBD, 0x3A),
+ _INIT_DCS_CMD(0xBE, 0xAE),
+ _INIT_DCS_CMD(0xBF, 0x2A),
+ _INIT_DCS_CMD(0xC0, 0x66),
+ _INIT_DCS_CMD(0xC1, 0x9E),
+ _INIT_DCS_CMD(0xC2, 0xB8),
+ _INIT_DCS_CMD(0xC3, 0xD1),
+ _INIT_DCS_CMD(0xC4, 0xDD),
+ _INIT_DCS_CMD(0xC5, 0xE9),
+ _INIT_DCS_CMD(0xC6, 0xF6),
+ _INIT_DCS_CMD(0xC7, 0xFA),
+ _INIT_DCS_CMD(0xC8, 0xFC),
+ _INIT_DCS_CMD(0xC9, 0x00),
+ _INIT_DCS_CMD(0xCA, 0x00),
+ _INIT_DCS_CMD(0xCB, 0x16),
+ _INIT_DCS_CMD(0xCC, 0xAF),
+ _INIT_DCS_CMD(0xCD, 0xFF),
+ _INIT_DCS_CMD(0xCE, 0xFF),
+ _INIT_DCS_CMD(0xB0, 0x00),
+ _INIT_DCS_CMD(0xB3, 0x08),
+ _INIT_DCS_CMD(0xB0, 0x04),
+ _INIT_DCS_CMD(0xB8, 0x68),
+ _INIT_DELAY_CMD(150),
+ {},
+};
+
+static const struct panel_init_cmd auo_kd101n80_45na_init_cmd[] = {
+ _INIT_DELAY_CMD(24),
+ _INIT_DCS_CMD(0x11),
+ _INIT_DELAY_CMD(120),
+ _INIT_DCS_CMD(0x29),
+ _INIT_DELAY_CMD(120),
+ {},
+};
+
+static const struct panel_init_cmd auo_b101uan08_3_init_cmd[] = {
+ _INIT_DELAY_CMD(24),
+ _INIT_DCS_CMD(0xB0, 0x01),
+ _INIT_DCS_CMD(0xC0, 0x48),
+ _INIT_DCS_CMD(0xC1, 0x48),
+ _INIT_DCS_CMD(0xC2, 0x47),
+ _INIT_DCS_CMD(0xC3, 0x47),
+ _INIT_DCS_CMD(0xC4, 0x46),
+ _INIT_DCS_CMD(0xC5, 0x46),
+ _INIT_DCS_CMD(0xC6, 0x45),
+ _INIT_DCS_CMD(0xC7, 0x45),
+ _INIT_DCS_CMD(0xC8, 0x64),
+ _INIT_DCS_CMD(0xC9, 0x64),
+ _INIT_DCS_CMD(0xCA, 0x4F),
+ _INIT_DCS_CMD(0xCB, 0x4F),
+ _INIT_DCS_CMD(0xCC, 0x40),
+ _INIT_DCS_CMD(0xCD, 0x40),
+ _INIT_DCS_CMD(0xCE, 0x66),
+ _INIT_DCS_CMD(0xCF, 0x66),
+ _INIT_DCS_CMD(0xD0, 0x4F),
+ _INIT_DCS_CMD(0xD1, 0x4F),
+ _INIT_DCS_CMD(0xD2, 0x41),
+ _INIT_DCS_CMD(0xD3, 0x41),
+ _INIT_DCS_CMD(0xD4, 0x48),
+ _INIT_DCS_CMD(0xD5, 0x48),
+ _INIT_DCS_CMD(0xD6, 0x47),
+ _INIT_DCS_CMD(0xD7, 0x47),
+ _INIT_DCS_CMD(0xD8, 0x46),
+ _INIT_DCS_CMD(0xD9, 0x46),
+ _INIT_DCS_CMD(0xDA, 0x45),
+ _INIT_DCS_CMD(0xDB, 0x45),
+ _INIT_DCS_CMD(0xDC, 0x64),
+ _INIT_DCS_CMD(0xDD, 0x64),
+ _INIT_DCS_CMD(0xDE, 0x4F),
+ _INIT_DCS_CMD(0xDF, 0x4F),
+ _INIT_DCS_CMD(0xE0, 0x40),
+ _INIT_DCS_CMD(0xE1, 0x40),
+ _INIT_DCS_CMD(0xE2, 0x66),
+ _INIT_DCS_CMD(0xE3, 0x66),
+ _INIT_DCS_CMD(0xE4, 0x4F),
+ _INIT_DCS_CMD(0xE5, 0x4F),
+ _INIT_DCS_CMD(0xE6, 0x41),
+ _INIT_DCS_CMD(0xE7, 0x41),
+ _INIT_DELAY_CMD(150),
+ {},
+};
+
+static inline struct boe_panel *to_boe_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct boe_panel, base);
+}
+
+static int boe_panel_init_dcs_cmd(struct boe_panel *boe)
+{
+ struct mipi_dsi_device *dsi = boe->dsi;
+ struct drm_panel *panel = &boe->base;
+ int i, err = 0;
+
+ if (boe->desc->init_cmds) {
+ const struct panel_init_cmd *init_cmds = boe->desc->init_cmds;
+
+ for (i = 0; init_cmds[i].len != 0; i++) {
+ const struct panel_init_cmd *cmd = &init_cmds[i];
+
+ switch (cmd->type) {
+ case DELAY_CMD:
+ msleep(cmd->data[0]);
+ err = 0;
+ break;
+
+ case INIT_DCS_CMD:
+ err = mipi_dsi_dcs_write(dsi, cmd->data[0],
+ cmd->len <= 1 ? NULL :
+ &cmd->data[1],
+ cmd->len - 1);
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+
+ if (err < 0) {
+ dev_err(panel->dev,
+ "failed to write command %u\n", i);
+ return err;
+ }
+ }
+ }
+ return 0;
+}
+
+static int boe_panel_enter_sleep_mode(struct boe_panel *boe)
+{
+ struct mipi_dsi_device *dsi = boe->dsi;
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0)
+ return ret;
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int boe_panel_unprepare(struct drm_panel *panel)
+{
+ struct boe_panel *boe = to_boe_panel(panel);
+ int ret;
+
+ if (!boe->prepared)
+ return 0;
+
+ ret = boe_panel_enter_sleep_mode(boe);
+ if (ret < 0) {
+ dev_err(panel->dev, "failed to set panel off: %d\n", ret);
+ return ret;
+ }
+
+ msleep(150);
+
+ if (boe->desc->discharge_on_disable) {
+ regulator_disable(boe->avee);
+ regulator_disable(boe->avdd);
+ usleep_range(5000, 7000);
+ gpiod_set_value(boe->enable_gpio, 0);
+ usleep_range(5000, 7000);
+ regulator_disable(boe->pp1800);
+ } else {
+ gpiod_set_value(boe->enable_gpio, 0);
+ usleep_range(500, 1000);
+ regulator_disable(boe->avee);
+ regulator_disable(boe->avdd);
+ usleep_range(5000, 7000);
+ regulator_disable(boe->pp1800);
+ }
+
+ boe->prepared = false;
+
+ return 0;
+}
+
+static int boe_panel_prepare(struct drm_panel *panel)
+{
+ struct boe_panel *boe = to_boe_panel(panel);
+ int ret;
+
+ if (boe->prepared)
+ return 0;
+
+ gpiod_set_value(boe->enable_gpio, 0);
+ usleep_range(1000, 1500);
+
+ ret = regulator_enable(boe->pp1800);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(3000, 5000);
+
+ ret = regulator_enable(boe->avdd);
+ if (ret < 0)
+ goto poweroff1v8;
+ ret = regulator_enable(boe->avee);
+ if (ret < 0)
+ goto poweroffavdd;
+
+ usleep_range(5000, 10000);
+
+ gpiod_set_value(boe->enable_gpio, 1);
+ usleep_range(1000, 2000);
+ gpiod_set_value(boe->enable_gpio, 0);
+ usleep_range(1000, 2000);
+ gpiod_set_value(boe->enable_gpio, 1);
+ usleep_range(6000, 10000);
+
+ ret = boe_panel_init_dcs_cmd(boe);
+ if (ret < 0) {
+ dev_err(panel->dev, "failed to init panel: %d\n", ret);
+ goto poweroff;
+ }
+
+ boe->prepared = true;
+
+ return 0;
+
+poweroff:
+ regulator_disable(boe->avee);
+poweroffavdd:
+ regulator_disable(boe->avdd);
+poweroff1v8:
+ usleep_range(5000, 7000);
+ regulator_disable(boe->pp1800);
+ gpiod_set_value(boe->enable_gpio, 0);
+
+ return ret;
+}
+
+static int boe_panel_enable(struct drm_panel *panel)
+{
+ msleep(130);
+ return 0;
+}
+
+static const struct drm_display_mode boe_tv101wum_nl6_default_mode = {
+ .clock = 159425,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 100,
+ .hsync_end = 1200 + 100 + 40,
+ .htotal = 1200 + 100 + 40 + 24,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 10,
+ .vsync_end = 1920 + 10 + 14,
+ .vtotal = 1920 + 10 + 14 + 4,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc boe_tv101wum_nl6_desc = {
+ .modes = &boe_tv101wum_nl6_default_mode,
+ .bpc = 8,
+ .size = {
+ .width_mm = 135,
+ .height_mm = 216,
+ },
+ .lanes = 4,
+ .format = MIPI_DSI_FMT_RGB888,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_LPM,
+ .init_cmds = boe_init_cmd,
+ .discharge_on_disable = false,
+};
+
+static const struct drm_display_mode auo_kd101n80_45na_default_mode = {
+ .clock = 157000,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 80,
+ .hsync_end = 1200 + 80 + 24,
+ .htotal = 1200 + 80 + 24 + 36,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 16,
+ .vsync_end = 1920 + 16 + 4,
+ .vtotal = 1920 + 16 + 4 + 16,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc auo_kd101n80_45na_desc = {
+ .modes = &auo_kd101n80_45na_default_mode,
+ .bpc = 8,
+ .size = {
+ .width_mm = 135,
+ .height_mm = 216,
+ },
+ .lanes = 4,
+ .format = MIPI_DSI_FMT_RGB888,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_LPM,
+ .init_cmds = auo_kd101n80_45na_init_cmd,
+ .discharge_on_disable = true,
+};
+
+static const struct drm_display_mode boe_tv101wum_n53_default_mode = {
+ .clock = 159916,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 80,
+ .hsync_end = 1200 + 80 + 24,
+ .htotal = 1200 + 80 + 24 + 60,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 20,
+ .vsync_end = 1920 + 20 + 4,
+ .vtotal = 1920 + 20 + 4 + 10,
+ .vrefresh = 60,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct panel_desc boe_tv101wum_n53_desc = {
+ .modes = &boe_tv101wum_n53_default_mode,
+ .bpc = 8,
+ .size = {
+ .width_mm = 135,
+ .height_mm = 216,
+ },
+ .lanes = 4,
+ .format = MIPI_DSI_FMT_RGB888,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_LPM,
+ .init_cmds = boe_init_cmd,
+};
+
+static const struct drm_display_mode auo_b101uan08_3_default_mode = {
+ .clock = 159667,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 60,
+ .hsync_end = 1200 + 60 + 4,
+ .htotal = 1200 + 60 + 4 + 80,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 34,
+ .vsync_end = 1920 + 34 + 2,
+ .vtotal = 1920 + 34 + 2 + 24,
+ .vrefresh = 60,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct panel_desc auo_b101uan08_3_desc = {
+ .modes = &auo_b101uan08_3_default_mode,
+ .bpc = 8,
+ .size = {
+ .width_mm = 135,
+ .height_mm = 216,
+ },
+ .lanes = 4,
+ .format = MIPI_DSI_FMT_RGB888,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_LPM,
+ .init_cmds = auo_b101uan08_3_init_cmd,
+};
+
+static int boe_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct boe_panel *boe = to_boe_panel(panel);
+ const struct drm_display_mode *m = boe->desc->modes;
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, m);
+ if (!mode) {
+ dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
+ m->hdisplay, m->vdisplay, m->vrefresh);
+ return -ENOMEM;
+ }
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+
+ connector->display_info.width_mm = boe->desc->size.width_mm;
+ connector->display_info.height_mm = boe->desc->size.height_mm;
+ connector->display_info.bpc = boe->desc->bpc;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs boe_panel_funcs = {
+ .unprepare = boe_panel_unprepare,
+ .prepare = boe_panel_prepare,
+ .enable = boe_panel_enable,
+ .get_modes = boe_panel_get_modes,
+};
+
+static int boe_panel_add(struct boe_panel *boe)
+{
+ struct device *dev = &boe->dsi->dev;
+ int err;
+
+ boe->avdd = devm_regulator_get(dev, "avdd");
+ if (IS_ERR(boe->avdd))
+ return PTR_ERR(boe->avdd);
+
+ boe->avee = devm_regulator_get(dev, "avee");
+ if (IS_ERR(boe->avee))
+ return PTR_ERR(boe->avee);
+
+ boe->pp1800 = devm_regulator_get(dev, "pp1800");
+ if (IS_ERR(boe->pp1800))
+ return PTR_ERR(boe->pp1800);
+
+ boe->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(boe->enable_gpio)) {
+ dev_err(dev, "cannot get reset-gpios %ld\n",
+ PTR_ERR(boe->enable_gpio));
+ return PTR_ERR(boe->enable_gpio);
+ }
+
+ gpiod_set_value(boe->enable_gpio, 0);
+
+ drm_panel_init(&boe->base, dev, &boe_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ err = drm_panel_of_backlight(&boe->base);
+ if (err)
+ return err;
+
+ boe->base.funcs = &boe_panel_funcs;
+ boe->base.dev = &boe->dsi->dev;
+
+ return drm_panel_add(&boe->base);
+}
+
+static int boe_panel_probe(struct mipi_dsi_device *dsi)
+{
+ struct boe_panel *boe;
+ int ret;
+ const struct panel_desc *desc;
+
+ boe = devm_kzalloc(&dsi->dev, sizeof(*boe), GFP_KERNEL);
+ if (!boe)
+ return -ENOMEM;
+
+ desc = of_device_get_match_data(&dsi->dev);
+ dsi->lanes = desc->lanes;
+ dsi->format = desc->format;
+ dsi->mode_flags = desc->mode_flags;
+ boe->desc = desc;
+ boe->dsi = dsi;
+ ret = boe_panel_add(boe);
+ if (ret < 0)
+ return ret;
+
+ mipi_dsi_set_drvdata(dsi, boe);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret)
+ drm_panel_remove(&boe->base);
+
+ return ret;
+}
+
+static void boe_panel_shutdown(struct mipi_dsi_device *dsi)
+{
+ struct boe_panel *boe = mipi_dsi_get_drvdata(dsi);
+
+ drm_panel_disable(&boe->base);
+ drm_panel_unprepare(&boe->base);
+}
+
+static int boe_panel_remove(struct mipi_dsi_device *dsi)
+{
+ struct boe_panel *boe = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ boe_panel_shutdown(dsi);
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
+
+ if (boe->base.dev)
+ drm_panel_remove(&boe->base);
+
+ return 0;
+}
+
+static const struct of_device_id boe_of_match[] = {
+ { .compatible = "boe,tv101wum-nl6",
+ .data = &boe_tv101wum_nl6_desc
+ },
+ { .compatible = "auo,kd101n80-45na",
+ .data = &auo_kd101n80_45na_desc
+ },
+ { .compatible = "boe,tv101wum-n53",
+ .data = &boe_tv101wum_n53_desc
+ },
+ { .compatible = "auo,b101uan08.3",
+ .data = &auo_b101uan08_3_desc
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, boe_of_match);
+
+static struct mipi_dsi_driver boe_panel_driver = {
+ .driver = {
+ .name = "panel-boe-tv101wum-nl6",
+ .of_match_table = boe_of_match,
+ },
+ .probe = boe_panel_probe,
+ .remove = boe_panel_remove,
+ .shutdown = boe_panel_shutdown,
+};
+module_mipi_dsi_driver(boe_panel_driver);
+
+MODULE_AUTHOR("Jitao Shi <[email protected]>");
+MODULE_DESCRIPTION("BOE tv101wum-nl6 1200x1920 video mode panel driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-elida-kd35t133.c b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
new file mode 100644
index 000000000000..711ded453c44
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
@@ -0,0 +1,352 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Elida kd35t133 5.5" MIPI-DSI panel driver
+ * Copyright (C) 2020 Theobroma Systems Design und Consulting GmbH
+ *
+ * based on
+ *
+ * Rockteck jh057n00900 5.5" MIPI-DSI panel driver
+ * Copyright (C) Purism SPC 2019
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/media-bus-format.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/display_timing.h>
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+/* Manufacturer specific Commands send via DSI */
+#define KD35T133_CMD_INTERFACEMODECTRL 0xb0
+#define KD35T133_CMD_FRAMERATECTRL 0xb1
+#define KD35T133_CMD_DISPLAYINVERSIONCTRL 0xb4
+#define KD35T133_CMD_DISPLAYFUNCTIONCTRL 0xb6
+#define KD35T133_CMD_POWERCONTROL1 0xc0
+#define KD35T133_CMD_POWERCONTROL2 0xc1
+#define KD35T133_CMD_VCOMCONTROL 0xc5
+#define KD35T133_CMD_POSITIVEGAMMA 0xe0
+#define KD35T133_CMD_NEGATIVEGAMMA 0xe1
+#define KD35T133_CMD_SETIMAGEFUNCTION 0xe9
+#define KD35T133_CMD_ADJUSTCONTROL3 0xf7
+
+struct kd35t133 {
+ struct device *dev;
+ struct drm_panel panel;
+ struct gpio_desc *reset_gpio;
+ struct regulator *vdd;
+ struct regulator *iovcc;
+ bool prepared;
+};
+
+static inline struct kd35t133 *panel_to_kd35t133(struct drm_panel *panel)
+{
+ return container_of(panel, struct kd35t133, panel);
+}
+
+#define dsi_dcs_write_seq(dsi, cmd, seq...) do { \
+ static const u8 d[] = { seq }; \
+ int ret; \
+ ret = mipi_dsi_dcs_write(dsi, cmd, d, ARRAY_SIZE(d)); \
+ if (ret < 0) \
+ return ret; \
+ } while (0)
+
+static int kd35t133_init_sequence(struct kd35t133 *ctx)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ struct device *dev = ctx->dev;
+
+ /*
+ * Init sequence was supplied by the panel vendor with minimal
+ * documentation.
+ */
+ dsi_dcs_write_seq(dsi, KD35T133_CMD_POSITIVEGAMMA,
+ 0x00, 0x13, 0x18, 0x04, 0x0f, 0x06, 0x3a, 0x56,
+ 0x4d, 0x03, 0x0a, 0x06, 0x30, 0x3e, 0x0f);
+ dsi_dcs_write_seq(dsi, KD35T133_CMD_NEGATIVEGAMMA,
+ 0x00, 0x13, 0x18, 0x01, 0x11, 0x06, 0x38, 0x34,
+ 0x4d, 0x06, 0x0d, 0x0b, 0x31, 0x37, 0x0f);
+ dsi_dcs_write_seq(dsi, KD35T133_CMD_POWERCONTROL1, 0x18, 0x17);
+ dsi_dcs_write_seq(dsi, KD35T133_CMD_POWERCONTROL2, 0x41);
+ dsi_dcs_write_seq(dsi, KD35T133_CMD_VCOMCONTROL, 0x00, 0x1a, 0x80);
+ dsi_dcs_write_seq(dsi, MIPI_DCS_SET_ADDRESS_MODE, 0x48);
+ dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PIXEL_FORMAT, 0x55);
+ dsi_dcs_write_seq(dsi, KD35T133_CMD_INTERFACEMODECTRL, 0x00);
+ dsi_dcs_write_seq(dsi, KD35T133_CMD_FRAMERATECTRL, 0xa0);
+ dsi_dcs_write_seq(dsi, KD35T133_CMD_DISPLAYINVERSIONCTRL, 0x02);
+ dsi_dcs_write_seq(dsi, KD35T133_CMD_DISPLAYFUNCTIONCTRL,
+ 0x20, 0x02);
+ dsi_dcs_write_seq(dsi, KD35T133_CMD_SETIMAGEFUNCTION, 0x00);
+ dsi_dcs_write_seq(dsi, KD35T133_CMD_ADJUSTCONTROL3,
+ 0xa9, 0x51, 0x2c, 0x82);
+ mipi_dsi_dcs_write(dsi, MIPI_DCS_ENTER_INVERT_MODE, NULL, 0);
+
+ DRM_DEV_DEBUG_DRIVER(dev, "Panel init sequence done\n");
+ return 0;
+}
+
+static int kd35t133_unprepare(struct drm_panel *panel)
+{
+ struct kd35t133 *ctx = panel_to_kd35t133(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ if (!ctx->prepared)
+ return 0;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0)
+ DRM_DEV_ERROR(ctx->dev, "failed to set display off: %d\n",
+ ret);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "failed to enter sleep mode: %d\n",
+ ret);
+ return ret;
+ }
+
+ regulator_disable(ctx->iovcc);
+ regulator_disable(ctx->vdd);
+
+ ctx->prepared = false;
+
+ return 0;
+}
+
+static int kd35t133_prepare(struct drm_panel *panel)
+{
+ struct kd35t133 *ctx = panel_to_kd35t133(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ if (ctx->prepared)
+ return 0;
+
+ DRM_DEV_DEBUG_DRIVER(ctx->dev, "Resetting the panel\n");
+ ret = regulator_enable(ctx->vdd);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev,
+ "Failed to enable vdd supply: %d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_enable(ctx->iovcc);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev,
+ "Failed to enable iovcc supply: %d\n", ret);
+ goto disable_vdd;
+ }
+
+ msleep(20);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(10, 20);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+
+ msleep(20);
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "Failed to exit sleep mode: %d\n", ret);
+ goto disable_iovcc;
+ }
+
+ msleep(250);
+
+ ret = kd35t133_init_sequence(ctx);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "Panel init sequence failed: %d\n",
+ ret);
+ goto disable_iovcc;
+ }
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "Failed to set display on: %d\n", ret);
+ goto disable_iovcc;
+ }
+
+ msleep(50);
+
+ ctx->prepared = true;
+
+ return 0;
+
+disable_iovcc:
+ regulator_disable(ctx->iovcc);
+disable_vdd:
+ regulator_disable(ctx->vdd);
+ return ret;
+}
+
+static const struct drm_display_mode default_mode = {
+ .hdisplay = 320,
+ .hsync_start = 320 + 130,
+ .hsync_end = 320 + 130 + 4,
+ .htotal = 320 + 130 + 4 + 130,
+ .vdisplay = 480,
+ .vsync_start = 480 + 2,
+ .vsync_end = 480 + 2 + 1,
+ .vtotal = 480 + 2 + 1 + 2,
+ .vrefresh = 60,
+ .clock = 17000,
+ .width_mm = 42,
+ .height_mm = 82,
+};
+
+static int kd35t133_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct kd35t133 *ctx = panel_to_kd35t133(panel);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &default_mode);
+ if (!mode) {
+ DRM_DEV_ERROR(ctx->dev, "Failed to add mode %ux%u@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ default_mode.vrefresh);
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs kd35t133_funcs = {
+ .unprepare = kd35t133_unprepare,
+ .prepare = kd35t133_prepare,
+ .get_modes = kd35t133_get_modes,
+};
+
+static int kd35t133_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct kd35t133 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio)) {
+ DRM_DEV_ERROR(dev, "cannot get reset gpio\n");
+ return PTR_ERR(ctx->reset_gpio);
+ }
+
+ ctx->vdd = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(ctx->vdd)) {
+ ret = PTR_ERR(ctx->vdd);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev,
+ "Failed to request vdd regulator: %d\n",
+ ret);
+ return ret;
+ }
+
+ ctx->iovcc = devm_regulator_get(dev, "iovcc");
+ if (IS_ERR(ctx->iovcc)) {
+ ret = PTR_ERR(ctx->iovcc);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev,
+ "Failed to request iovcc regulator: %d\n",
+ ret);
+ return ret;
+ }
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ ctx->dev = dev;
+
+ dsi->lanes = 1;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET;
+
+ drm_panel_init(&ctx->panel, &dsi->dev, &kd35t133_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return ret;
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "mipi_dsi_attach failed: %d\n", ret);
+ drm_panel_remove(&ctx->panel);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void kd35t133_shutdown(struct mipi_dsi_device *dsi)
+{
+ struct kd35t133 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = drm_panel_unprepare(&ctx->panel);
+ if (ret < 0)
+ DRM_DEV_ERROR(&dsi->dev, "Failed to unprepare panel: %d\n",
+ ret);
+
+ ret = drm_panel_disable(&ctx->panel);
+ if (ret < 0)
+ DRM_DEV_ERROR(&dsi->dev, "Failed to disable panel: %d\n",
+ ret);
+}
+
+static int kd35t133_remove(struct mipi_dsi_device *dsi)
+{
+ struct kd35t133 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ kd35t133_shutdown(dsi);
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ DRM_DEV_ERROR(&dsi->dev, "Failed to detach from DSI host: %d\n",
+ ret);
+
+ drm_panel_remove(&ctx->panel);
+
+ return 0;
+}
+
+static const struct of_device_id kd35t133_of_match[] = {
+ { .compatible = "elida,kd35t133" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, kd35t133_of_match);
+
+static struct mipi_dsi_driver kd35t133_driver = {
+ .driver = {
+ .name = "panel-elida-kd35t133",
+ .of_match_table = kd35t133_of_match,
+ },
+ .probe = kd35t133_probe,
+ .remove = kd35t133_remove,
+ .shutdown = kd35t133_shutdown,
+};
+module_mipi_dsi_driver(kd35t133_driver);
+
+MODULE_AUTHOR("Heiko Stuebner <[email protected]>");
+MODULE_DESCRIPTION("DRM driver for Elida kd35t133 MIPI DSI panel");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c b/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
new file mode 100644
index 000000000000..fddbfddf6566
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
@@ -0,0 +1,526 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019-2020 Icenowy Zheng <[email protected]>
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/delay.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+#define K101_IM2BA02_INIT_CMD_LEN 2
+
+static const char * const regulator_names[] = {
+ "dvdd",
+ "avdd",
+ "cvdd"
+};
+
+struct k101_im2ba02 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+
+ struct regulator_bulk_data supplies[ARRAY_SIZE(regulator_names)];
+ struct gpio_desc *reset;
+};
+
+static inline struct k101_im2ba02 *panel_to_k101_im2ba02(struct drm_panel *panel)
+{
+ return container_of(panel, struct k101_im2ba02, panel);
+}
+
+struct k101_im2ba02_init_cmd {
+ u8 data[K101_IM2BA02_INIT_CMD_LEN];
+};
+
+static const struct k101_im2ba02_init_cmd k101_im2ba02_init_cmds[] = {
+ /* Switch to page 0 */
+ { .data = { 0xE0, 0x00 } },
+
+ /* Seems to be some password */
+ { .data = { 0xE1, 0x93} },
+ { .data = { 0xE2, 0x65 } },
+ { .data = { 0xE3, 0xF8 } },
+
+ /* Lane number, 0x02 - 3 lanes, 0x03 - 4 lanes */
+ { .data = { 0x80, 0x03 } },
+
+ /* Sequence control */
+ { .data = { 0x70, 0x02 } },
+ { .data = { 0x71, 0x23 } },
+ { .data = { 0x72, 0x06 } },
+
+ /* Switch to page 1 */
+ { .data = { 0xE0, 0x01 } },
+
+ /* Set VCOM */
+ { .data = { 0x00, 0x00 } },
+ { .data = { 0x01, 0x66 } },
+ /* Set VCOM_Reverse */
+ { .data = { 0x03, 0x00 } },
+ { .data = { 0x04, 0x25 } },
+
+ /* Set Gamma Power, VG[MS][PN] */
+ { .data = { 0x17, 0x00 } },
+ { .data = { 0x18, 0x6D } },
+ { .data = { 0x19, 0x00 } },
+ { .data = { 0x1A, 0x00 } },
+ { .data = { 0x1B, 0xBF } }, /* VGMN = -4.5V */
+ { .data = { 0x1C, 0x00 } },
+
+ /* Set Gate Power */
+ { .data = { 0x1F, 0x3E } }, /* VGH_R = 15V */
+ { .data = { 0x20, 0x28 } }, /* VGL_R = -11V */
+ { .data = { 0x21, 0x28 } }, /* VGL_R2 = -11V */
+ { .data = { 0x22, 0x0E } }, /* PA[6:4] = 0, PA[0] = 0 */
+
+ /* Set Panel */
+ { .data = { 0x37, 0x09 } }, /* SS = 1, BGR = 1 */
+
+ /* Set RGBCYC */
+ { .data = { 0x38, 0x04 } }, /* JDT = 100 column inversion */
+ { .data = { 0x39, 0x08 } }, /* RGB_N_EQ1 */
+ { .data = { 0x3A, 0x12 } }, /* RGB_N_EQ2 */
+ { .data = { 0x3C, 0x78 } }, /* set EQ3 for TE_H */
+ { .data = { 0x3D, 0xFF } }, /* set CHGEN_ON */
+ { .data = { 0x3E, 0xFF } }, /* set CHGEN_OFF */
+ { .data = { 0x3F, 0x7F } }, /* set CHGEN_OFF2 */
+
+ /* Set TCON parameter */
+ { .data = { 0x40, 0x06 } }, /* RSO = 800 points */
+ { .data = { 0x41, 0xA0 } }, /* LN = 1280 lines */
+
+ /* Set power voltage */
+ { .data = { 0x55, 0x0F } }, /* DCDCM */
+ { .data = { 0x56, 0x01 } },
+ { .data = { 0x57, 0x69 } },
+ { .data = { 0x58, 0x0A } },
+ { .data = { 0x59, 0x0A } },
+ { .data = { 0x5A, 0x45 } },
+ { .data = { 0x5B, 0x15 } },
+
+ /* Set gamma */
+ { .data = { 0x5D, 0x7C } },
+ { .data = { 0x5E, 0x65 } },
+ { .data = { 0x5F, 0x55 } },
+ { .data = { 0x60, 0x49 } },
+ { .data = { 0x61, 0x44 } },
+ { .data = { 0x62, 0x35 } },
+ { .data = { 0x63, 0x3A } },
+ { .data = { 0x64, 0x23 } },
+ { .data = { 0x65, 0x3D } },
+ { .data = { 0x66, 0x3C } },
+ { .data = { 0x67, 0x3D } },
+ { .data = { 0x68, 0x5D } },
+ { .data = { 0x69, 0x4D } },
+ { .data = { 0x6A, 0x56 } },
+ { .data = { 0x6B, 0x48 } },
+ { .data = { 0x6C, 0x45 } },
+ { .data = { 0x6D, 0x38 } },
+ { .data = { 0x6E, 0x25 } },
+ { .data = { 0x6F, 0x00 } },
+ { .data = { 0x70, 0x7C } },
+ { .data = { 0x71, 0x65 } },
+ { .data = { 0x72, 0x55 } },
+ { .data = { 0x73, 0x49 } },
+ { .data = { 0x74, 0x44 } },
+ { .data = { 0x75, 0x35 } },
+ { .data = { 0x76, 0x3A } },
+ { .data = { 0x77, 0x23 } },
+ { .data = { 0x78, 0x3D } },
+ { .data = { 0x79, 0x3C } },
+ { .data = { 0x7A, 0x3D } },
+ { .data = { 0x7B, 0x5D } },
+ { .data = { 0x7C, 0x4D } },
+ { .data = { 0x7D, 0x56 } },
+ { .data = { 0x7E, 0x48 } },
+ { .data = { 0x7F, 0x45 } },
+ { .data = { 0x80, 0x38 } },
+ { .data = { 0x81, 0x25 } },
+ { .data = { 0x82, 0x00 } },
+
+ /* Switch to page 2, for GIP */
+ { .data = { 0xE0, 0x02 } },
+
+ { .data = { 0x00, 0x1E } },
+ { .data = { 0x01, 0x1E } },
+ { .data = { 0x02, 0x41 } },
+ { .data = { 0x03, 0x41 } },
+ { .data = { 0x04, 0x43 } },
+ { .data = { 0x05, 0x43 } },
+ { .data = { 0x06, 0x1F } },
+ { .data = { 0x07, 0x1F } },
+ { .data = { 0x08, 0x1F } },
+ { .data = { 0x09, 0x1F } },
+ { .data = { 0x0A, 0x1E } },
+ { .data = { 0x0B, 0x1E } },
+ { .data = { 0x0C, 0x1F } },
+ { .data = { 0x0D, 0x47 } },
+ { .data = { 0x0E, 0x47 } },
+ { .data = { 0x0F, 0x45 } },
+ { .data = { 0x10, 0x45 } },
+ { .data = { 0x11, 0x4B } },
+ { .data = { 0x12, 0x4B } },
+ { .data = { 0x13, 0x49 } },
+ { .data = { 0x14, 0x49 } },
+ { .data = { 0x15, 0x1F } },
+
+ { .data = { 0x16, 0x1E } },
+ { .data = { 0x17, 0x1E } },
+ { .data = { 0x18, 0x40 } },
+ { .data = { 0x19, 0x40 } },
+ { .data = { 0x1A, 0x42 } },
+ { .data = { 0x1B, 0x42 } },
+ { .data = { 0x1C, 0x1F } },
+ { .data = { 0x1D, 0x1F } },
+ { .data = { 0x1E, 0x1F } },
+ { .data = { 0x1F, 0x1f } },
+ { .data = { 0x20, 0x1E } },
+ { .data = { 0x21, 0x1E } },
+ { .data = { 0x22, 0x1f } },
+ { .data = { 0x23, 0x46 } },
+ { .data = { 0x24, 0x46 } },
+ { .data = { 0x25, 0x44 } },
+ { .data = { 0x26, 0x44 } },
+ { .data = { 0x27, 0x4A } },
+ { .data = { 0x28, 0x4A } },
+ { .data = { 0x29, 0x48 } },
+ { .data = { 0x2A, 0x48 } },
+ { .data = { 0x2B, 0x1f } },
+
+ { .data = { 0x2C, 0x1F } },
+ { .data = { 0x2D, 0x1F } },
+ { .data = { 0x2E, 0x42 } },
+ { .data = { 0x2F, 0x42 } },
+ { .data = { 0x30, 0x40 } },
+ { .data = { 0x31, 0x40 } },
+ { .data = { 0x32, 0x1E } },
+ { .data = { 0x33, 0x1E } },
+ { .data = { 0x34, 0x1F } },
+ { .data = { 0x35, 0x1F } },
+ { .data = { 0x36, 0x1E } },
+ { .data = { 0x37, 0x1E } },
+ { .data = { 0x38, 0x1F } },
+ { .data = { 0x39, 0x48 } },
+ { .data = { 0x3A, 0x48 } },
+ { .data = { 0x3B, 0x4A } },
+ { .data = { 0x3C, 0x4A } },
+ { .data = { 0x3D, 0x44 } },
+ { .data = { 0x3E, 0x44 } },
+ { .data = { 0x3F, 0x46 } },
+ { .data = { 0x40, 0x46 } },
+ { .data = { 0x41, 0x1F } },
+
+ { .data = { 0x42, 0x1F } },
+ { .data = { 0x43, 0x1F } },
+ { .data = { 0x44, 0x43 } },
+ { .data = { 0x45, 0x43 } },
+ { .data = { 0x46, 0x41 } },
+ { .data = { 0x47, 0x41 } },
+ { .data = { 0x48, 0x1E } },
+ { .data = { 0x49, 0x1E } },
+ { .data = { 0x4A, 0x1E } },
+ { .data = { 0x4B, 0x1F } },
+ { .data = { 0x4C, 0x1E } },
+ { .data = { 0x4D, 0x1E } },
+ { .data = { 0x4E, 0x1F } },
+ { .data = { 0x4F, 0x49 } },
+ { .data = { 0x50, 0x49 } },
+ { .data = { 0x51, 0x4B } },
+ { .data = { 0x52, 0x4B } },
+ { .data = { 0x53, 0x45 } },
+ { .data = { 0x54, 0x45 } },
+ { .data = { 0x55, 0x47 } },
+ { .data = { 0x56, 0x47 } },
+ { .data = { 0x57, 0x1F } },
+
+ { .data = { 0x58, 0x10 } },
+ { .data = { 0x59, 0x00 } },
+ { .data = { 0x5A, 0x00 } },
+ { .data = { 0x5B, 0x30 } },
+ { .data = { 0x5C, 0x02 } },
+ { .data = { 0x5D, 0x40 } },
+ { .data = { 0x5E, 0x01 } },
+ { .data = { 0x5F, 0x02 } },
+ { .data = { 0x60, 0x30 } },
+ { .data = { 0x61, 0x01 } },
+ { .data = { 0x62, 0x02 } },
+ { .data = { 0x63, 0x6A } },
+ { .data = { 0x64, 0x6A } },
+ { .data = { 0x65, 0x05 } },
+ { .data = { 0x66, 0x12 } },
+ { .data = { 0x67, 0x74 } },
+ { .data = { 0x68, 0x04 } },
+ { .data = { 0x69, 0x6A } },
+ { .data = { 0x6A, 0x6A } },
+ { .data = { 0x6B, 0x08 } },
+
+ { .data = { 0x6C, 0x00 } },
+ { .data = { 0x6D, 0x04 } },
+ { .data = { 0x6E, 0x04 } },
+ { .data = { 0x6F, 0x88 } },
+ { .data = { 0x70, 0x00 } },
+ { .data = { 0x71, 0x00 } },
+ { .data = { 0x72, 0x06 } },
+ { .data = { 0x73, 0x7B } },
+ { .data = { 0x74, 0x00 } },
+ { .data = { 0x75, 0x07 } },
+ { .data = { 0x76, 0x00 } },
+ { .data = { 0x77, 0x5D } },
+ { .data = { 0x78, 0x17 } },
+ { .data = { 0x79, 0x1F } },
+ { .data = { 0x7A, 0x00 } },
+ { .data = { 0x7B, 0x00 } },
+ { .data = { 0x7C, 0x00 } },
+ { .data = { 0x7D, 0x03 } },
+ { .data = { 0x7E, 0x7B } },
+
+ { .data = { 0xE0, 0x04 } },
+ { .data = { 0x2B, 0x2B } },
+ { .data = { 0x2E, 0x44 } },
+
+ { .data = { 0xE0, 0x01 } },
+ { .data = { 0x0E, 0x01 } },
+
+ { .data = { 0xE0, 0x03 } },
+ { .data = { 0x98, 0x2F } },
+
+ { .data = { 0xE0, 0x00 } },
+ { .data = { 0xE6, 0x02 } },
+ { .data = { 0xE7, 0x02 } },
+
+ { .data = { 0x11, 0x00 } },
+};
+
+static const struct k101_im2ba02_init_cmd timed_cmds[] = {
+ { .data = { 0x29, 0x00 } },
+ { .data = { 0x35, 0x00 } },
+};
+
+static int k101_im2ba02_prepare(struct drm_panel *panel)
+{
+ struct k101_im2ba02 *ctx = panel_to_k101_im2ba02(panel);
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ unsigned int i;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret)
+ return ret;
+
+ msleep(30);
+
+ gpiod_set_value(ctx->reset, 1);
+ msleep(50);
+
+ gpiod_set_value(ctx->reset, 0);
+ msleep(50);
+
+ gpiod_set_value(ctx->reset, 1);
+ msleep(200);
+
+ for (i = 0; i < ARRAY_SIZE(k101_im2ba02_init_cmds); i++) {
+ const struct k101_im2ba02_init_cmd *cmd = &k101_im2ba02_init_cmds[i];
+
+ ret = mipi_dsi_dcs_write_buffer(dsi, cmd->data, K101_IM2BA02_INIT_CMD_LEN);
+ if (ret < 0)
+ goto powerdown;
+ }
+
+ return 0;
+
+powerdown:
+ gpiod_set_value(ctx->reset, 0);
+ msleep(50);
+
+ return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+}
+
+static int k101_im2ba02_enable(struct drm_panel *panel)
+{
+ struct k101_im2ba02 *ctx = panel_to_k101_im2ba02(panel);
+ const struct k101_im2ba02_init_cmd *cmd = &timed_cmds[1];
+ int ret;
+
+ msleep(150);
+
+ ret = mipi_dsi_dcs_set_display_on(ctx->dsi);
+ if (ret < 0)
+ return ret;
+
+ msleep(50);
+
+ return mipi_dsi_dcs_write_buffer(ctx->dsi, cmd->data, K101_IM2BA02_INIT_CMD_LEN);
+}
+
+static int k101_im2ba02_disable(struct drm_panel *panel)
+{
+ struct k101_im2ba02 *ctx = panel_to_k101_im2ba02(panel);
+
+ return mipi_dsi_dcs_set_display_off(ctx->dsi);
+}
+
+static int k101_im2ba02_unprepare(struct drm_panel *panel)
+{
+ struct k101_im2ba02 *ctx = panel_to_k101_im2ba02(panel);
+ int ret;
+
+ ret = mipi_dsi_dcs_set_display_off(ctx->dsi);
+ if (ret < 0)
+ DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n",
+ ret);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(ctx->dsi);
+ if (ret < 0)
+ DRM_DEV_ERROR(panel->dev, "failed to enter sleep mode: %d\n",
+ ret);
+
+ msleep(200);
+
+ gpiod_set_value(ctx->reset, 0);
+ msleep(20);
+
+ return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+}
+
+static const struct drm_display_mode k101_im2ba02_default_mode = {
+ .clock = 70000,
+ .vrefresh = 60,
+
+ .hdisplay = 800,
+ .hsync_start = 800 + 20,
+ .hsync_end = 800 + 20 + 20,
+ .htotal = 800 + 20 + 20 + 20,
+
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 16,
+ .vsync_end = 1280 + 16 + 4,
+ .vtotal = 1280 + 16 + 4 + 4,
+
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+ .width_mm = 136,
+ .height_mm = 217,
+};
+
+static int k101_im2ba02_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct k101_im2ba02 *ctx = panel_to_k101_im2ba02(panel);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &k101_im2ba02_default_mode);
+ if (!mode) {
+ DRM_DEV_ERROR(&ctx->dsi->dev, "failed to add mode %ux%ux@%u\n",
+ k101_im2ba02_default_mode.hdisplay,
+ k101_im2ba02_default_mode.vdisplay,
+ k101_im2ba02_default_mode.vrefresh);
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs k101_im2ba02_funcs = {
+ .disable = k101_im2ba02_disable,
+ .unprepare = k101_im2ba02_unprepare,
+ .prepare = k101_im2ba02_prepare,
+ .enable = k101_im2ba02_enable,
+ .get_modes = k101_im2ba02_get_modes,
+};
+
+static int k101_im2ba02_dsi_probe(struct mipi_dsi_device *dsi)
+{
+ struct k101_im2ba02 *ctx;
+ unsigned int i;
+ int ret;
+
+ ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+ ctx->dsi = dsi;
+
+ for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++)
+ ctx->supplies[i].supply = regulator_names[i];
+
+ ret = devm_regulator_bulk_get(&dsi->dev, ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ if (ret < 0) {
+ DRM_DEV_ERROR(&dsi->dev, "Couldn't get regulators\n");
+ return ret;
+ }
+
+ ctx->reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset)) {
+ DRM_DEV_ERROR(&dsi->dev, "Couldn't get our reset GPIO\n");
+ return PTR_ERR(ctx->reset);
+ }
+
+ drm_panel_init(&ctx->panel, &dsi->dev, &k101_im2ba02_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return ret;
+
+ ret = drm_panel_add(&ctx->panel);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->lanes = 4;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ drm_panel_remove(&ctx->panel);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int k101_im2ba02_dsi_remove(struct mipi_dsi_device *dsi)
+{
+ struct k101_im2ba02 *ctx = mipi_dsi_get_drvdata(dsi);
+
+ mipi_dsi_detach(dsi);
+ drm_panel_remove(&ctx->panel);
+
+ return 0;
+}
+
+static const struct of_device_id k101_im2ba02_of_match[] = {
+ { .compatible = "feixin,k101-im2ba02", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, k101_im2ba02_of_match);
+
+static struct mipi_dsi_driver k101_im2ba02_driver = {
+ .probe = k101_im2ba02_dsi_probe,
+ .remove = k101_im2ba02_dsi_remove,
+ .driver = {
+ .name = "feixin-k101-im2ba02",
+ .of_match_table = k101_im2ba02_of_match,
+ },
+};
+module_mipi_dsi_driver(k101_im2ba02_driver);
+
+MODULE_AUTHOR("Icenowy Zheng <[email protected]>");
+MODULE_DESCRIPTION("Feixin K101 IM2BA02 MIPI-DSI LCD panel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
index f394d53a7da4..09935520e606 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
@@ -540,7 +540,7 @@ static int ili9322_enable(struct drm_panel *panel)
/* Serial RGB modes */
static const struct drm_display_mode srgb_320x240_mode = {
- .clock = 2453500,
+ .clock = 24535,
.hdisplay = 320,
.hsync_start = 320 + 359,
.hsync_end = 320 + 359 + 1,
@@ -554,7 +554,7 @@ static const struct drm_display_mode srgb_320x240_mode = {
};
static const struct drm_display_mode srgb_360x240_mode = {
- .clock = 2700000,
+ .clock = 27000,
.hdisplay = 360,
.hsync_start = 360 + 35,
.hsync_end = 360 + 35 + 1,
@@ -569,7 +569,7 @@ static const struct drm_display_mode srgb_360x240_mode = {
/* This is the only mode listed for parallel RGB in the datasheet */
static const struct drm_display_mode prgb_320x240_mode = {
- .clock = 6400000,
+ .clock = 64000,
.hdisplay = 320,
.hsync_start = 320 + 38,
.hsync_end = 320 + 38 + 1,
@@ -584,7 +584,7 @@ static const struct drm_display_mode prgb_320x240_mode = {
/* YUV modes */
static const struct drm_display_mode yuv_640x320_mode = {
- .clock = 2454000,
+ .clock = 24540,
.hdisplay = 640,
.hsync_start = 640 + 252,
.hsync_end = 640 + 252 + 1,
@@ -598,7 +598,7 @@ static const struct drm_display_mode yuv_640x320_mode = {
};
static const struct drm_display_mode yuv_720x360_mode = {
- .clock = 2700000,
+ .clock = 27000,
.hdisplay = 720,
.hsync_start = 720 + 252,
.hsync_end = 720 + 252 + 1,
@@ -613,7 +613,7 @@ static const struct drm_display_mode yuv_720x360_mode = {
/* BT.656 VGA mode, 640x480 */
static const struct drm_display_mode itu_r_bt_656_640_mode = {
- .clock = 2454000,
+ .clock = 24540,
.hdisplay = 640,
.hsync_start = 640 + 3,
.hsync_end = 640 + 3 + 1,
@@ -628,7 +628,7 @@ static const struct drm_display_mode itu_r_bt_656_640_mode = {
/* BT.656 D1 mode 720x480 */
static const struct drm_display_mode itu_r_bt_656_720_mode = {
- .clock = 2700000,
+ .clock = 27000,
.hdisplay = 720,
.hsync_start = 720 + 3,
.hsync_end = 720 + 3 + 1,
diff --git a/drivers/gpu/drm/panel/panel-lg-lg4573.c b/drivers/gpu/drm/panel/panel-lg-lg4573.c
index b262b53dbd85..5907f2503755 100644
--- a/drivers/gpu/drm/panel/panel-lg-lg4573.c
+++ b/drivers/gpu/drm/panel/panel-lg-lg4573.c
@@ -197,7 +197,7 @@ static int lg4573_enable(struct drm_panel *panel)
}
static const struct drm_display_mode default_mode = {
- .clock = 27000,
+ .clock = 28341,
.hdisplay = 480,
.hsync_start = 480 + 10,
.hsync_end = 480 + 10 + 59,
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35510.c b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
new file mode 100644
index 000000000000..4a8fa908a2cf
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
@@ -0,0 +1,1098 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Novatek NT35510 panel driver
+ * Copyright (C) 2020 Linus Walleij <[email protected]>
+ * Based on code by Robert Teather (C) 2012 Samsung
+ *
+ * This display driver (and I refer to the physical component NT35510,
+ * not this Linux kernel software driver) can handle:
+ * 480x864, 480x854, 480x800, 480x720 and 480x640 pixel displays.
+ * It has 480x840x24bit SRAM embedded for storing a frame.
+ * When powered on the display is by default in 480x800 mode.
+ *
+ * The actual panels using this component have different names, but
+ * the code needed to set up and configure the panel will be similar,
+ * so they should all use the NT35510 driver with appropriate configuration
+ * per-panel, e.g. for physical size.
+ *
+ * This driver is for the DSI interface to panels using the NT35510.
+ *
+ * The NT35510 can also use an RGB (DPI) interface combined with an
+ * I2C or SPI interface for setting up the NT35510. If this is needed
+ * this panel driver should be refactored to also support that use
+ * case.
+ */
+#include <linux/backlight.h>
+#include <linux/bitops.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+#define MCS_CMD_MAUCCTR 0xF0 /* Manufacturer command enable */
+#define MCS_CMD_READ_ID1 0xDA
+#define MCS_CMD_READ_ID2 0xDB
+#define MCS_CMD_READ_ID3 0xDC
+#define MCS_CMD_MTP_READ_SETTING 0xF8 /* Uncertain about name */
+#define MCS_CMD_MTP_READ_PARAM 0xFF /* Uncertain about name */
+
+/*
+ * These manufacturer commands are available after we enable manufacturer
+ * command set (MCS) for page 0.
+ */
+#define NT35510_P0_DOPCTR 0xB1
+#define NT35510_P0_SDHDTCTR 0xB6
+#define NT35510_P0_GSEQCTR 0xB7
+#define NT35510_P0_SDEQCTR 0xB8
+#define NT35510_P0_SDVPCTR 0xBA
+#define NT35510_P0_DPFRCTR1 0xBD
+#define NT35510_P0_DPFRCTR2 0xBE
+#define NT35510_P0_DPFRCTR3 0xBF
+#define NT35510_P0_DPMCTR12 0xCC
+
+#define NT35510_P0_DOPCTR_LEN 2
+#define NT35510_P0_GSEQCTR_LEN 2
+#define NT35510_P0_SDEQCTR_LEN 4
+#define NT35510_P0_SDVPCTR_LEN 1
+#define NT35510_P0_DPFRCTR1_LEN 5
+#define NT35510_P0_DPFRCTR2_LEN 5
+#define NT35510_P0_DPFRCTR3_LEN 5
+#define NT35510_P0_DPMCTR12_LEN 3
+
+#define NT35510_DOPCTR_0_RAMKP BIT(7) /* Contents kept in sleep */
+#define NT35510_DOPCTR_0_DSITE BIT(6) /* Enable TE signal */
+#define NT35510_DOPCTR_0_DSIG BIT(5) /* Enable generic read/write */
+#define NT35510_DOPCTR_0_DSIM BIT(4) /* Enable video mode on DSI */
+#define NT35510_DOPCTR_0_EOTP BIT(3) /* Support EoTP */
+#define NT35510_DOPCTR_0_N565 BIT(2) /* RGB or BGR pixel format */
+#define NT35510_DOPCTR_1_TW_PWR_SEL BIT(4) /* TE power selector */
+#define NT35510_DOPCTR_1_CRGB BIT(3) /* RGB or BGR byte order */
+#define NT35510_DOPCTR_1_CTB BIT(2) /* Vertical scanning direction */
+#define NT35510_DOPCTR_1_CRL BIT(1) /* Source driver data shift */
+#define NT35510_P0_SDVPCTR_PRG BIT(2) /* 0 = normal operation, 1 = VGLO */
+#define NT35510_P0_SDVPCTR_AVDD 0 /* source driver output = AVDD */
+#define NT35510_P0_SDVPCTR_OFFCOL 1 /* source driver output = off color */
+#define NT35510_P0_SDVPCTR_AVSS 2 /* source driver output = AVSS */
+#define NT35510_P0_SDVPCTR_HI_Z 3 /* source driver output = High impedance */
+
+/*
+ * These manufacturer commands are available after we enable manufacturer
+ * command set (MCS) for page 1.
+ */
+#define NT35510_P1_SETAVDD 0xB0
+#define NT35510_P1_SETAVEE 0xB1
+#define NT35510_P1_SETVCL 0xB2
+#define NT35510_P1_SETVGH 0xB3
+#define NT35510_P1_SETVRGH 0xB4
+#define NT35510_P1_SETVGL 0xB5
+#define NT35510_P1_BT1CTR 0xB6
+#define NT35510_P1_BT2CTR 0xB7
+#define NT35510_P1_BT3CTR 0xB8
+#define NT35510_P1_BT4CTR 0xB9 /* VGH boosting times/freq */
+#define NT35510_P1_BT5CTR 0xBA
+#define NT35510_P1_PFMCTR 0xBB
+#define NT35510_P1_SETVGP 0xBC
+#define NT35510_P1_SETVGN 0xBD
+#define NT35510_P1_SETVCMOFF 0xBE
+#define NT35510_P1_VGHCTR 0xBF /* VGH output ctrl */
+#define NT35510_P1_SET_GAMMA_RED_POS 0xD1
+#define NT35510_P1_SET_GAMMA_GREEN_POS 0xD2
+#define NT35510_P1_SET_GAMMA_BLUE_POS 0xD3
+#define NT35510_P1_SET_GAMMA_RED_NEG 0xD4
+#define NT35510_P1_SET_GAMMA_GREEN_NEG 0xD5
+#define NT35510_P1_SET_GAMMA_BLUE_NEG 0xD6
+
+/* AVDD and AVEE setting 3 bytes */
+#define NT35510_P1_AVDD_LEN 3
+#define NT35510_P1_AVEE_LEN 3
+#define NT35510_P1_VGH_LEN 3
+#define NT35510_P1_VGL_LEN 3
+#define NT35510_P1_VGP_LEN 3
+#define NT35510_P1_VGN_LEN 3
+/* BT1CTR thru BT5CTR setting 3 bytes */
+#define NT35510_P1_BT1CTR_LEN 3
+#define NT35510_P1_BT2CTR_LEN 3
+#define NT35510_P1_BT4CTR_LEN 3
+#define NT35510_P1_BT5CTR_LEN 3
+/* 52 gamma parameters times two per color: positive and negative */
+#define NT35510_P1_GAMMA_LEN 52
+
+/**
+ * struct nt35510_config - the display-specific NT35510 configuration
+ *
+ * Some of the settings provide an array of bytes, A, B C which mean:
+ * A = normal / idle off mode
+ * B = idle on mode
+ * C = partial / idle off mode
+ *
+ * Gamma correction arrays are 10bit numbers, two consecutive bytes
+ * makes out one point on the gamma correction curve. The points are
+ * not linearly placed along the X axis, we get points 0, 1, 3, 5
+ * 7, 11, 15, 23, 31, 47, 63, 95, 127, 128, 160, 192, 208, 224, 232,
+ * 240, 244, 248, 250, 252, 254, 255. The voltages tuples form
+ * V0, V1, V3 ... V255, with 0x0000 being the lowest voltage and
+ * 0x03FF being the highest voltage.
+ *
+ * Each value must be strictly higher than the previous value forming
+ * a rising curve like this:
+ *
+ * ^
+ * | V255
+ * | V254
+ * | ....
+ * | V5
+ * | V3
+ * | V1
+ * | V0
+ * +------------------------------------------->
+ *
+ * The details about all settings can be found in the NT35510 Application
+ * Note.
+ */
+struct nt35510_config {
+ /**
+ * @width_mm: physical panel width [mm]
+ */
+ u32 width_mm;
+ /**
+ * @height_mm: physical panel height [mm]
+ */
+ u32 height_mm;
+ /**
+ * @mode: the display mode. This is only relevant outside the panel
+ * in video mode: in command mode this is configuring the internal
+ * timing in the display controller.
+ */
+ const struct drm_display_mode mode;
+ /**
+ * @avdd: setting for AVDD ranging from 0x00 = 6.5V to 0x14 = 4.5V
+ * in 0.1V steps the default is 0x05 which means 6.0V
+ */
+ u8 avdd[NT35510_P1_AVDD_LEN];
+ /**
+ * @bt1ctr: setting for boost power control for the AVDD step-up
+ * circuit (1)
+ * bits 0..2 in the lower nibble controls PCK, the booster clock
+ * frequency for the step-up circuit:
+ * 0 = Hsync/32
+ * 1 = Hsync/16
+ * 2 = Hsync/8
+ * 3 = Hsync/4
+ * 4 = Hsync/2
+ * 5 = Hsync
+ * 6 = Hsync x 2
+ * 7 = Hsync x 4
+ * bits 4..6 in the upper nibble controls BTP, the boosting
+ * amplification for the the step-up circuit:
+ * 0 = Disable
+ * 1 = 1.5 x VDDB
+ * 2 = 1.66 x VDDB
+ * 3 = 2 x VDDB
+ * 4 = 2.5 x VDDB
+ * 5 = 3 x VDDB
+ * The defaults are 4 and 4 yielding 0x44
+ */
+ u8 bt1ctr[NT35510_P1_BT1CTR_LEN];
+ /**
+ * @avee: setting for AVEE ranging from 0x00 = -6.5V to 0x14 = -4.5V
+ * in 0.1V steps the default is 0x05 which means -6.0V
+ */
+ u8 avee[NT35510_P1_AVEE_LEN];
+ /**
+ * @bt2ctr: setting for boost power control for the AVEE step-up
+ * circuit (2)
+ * bits 0..2 in the lower nibble controls NCK, the booster clock
+ * frequency, the values are the same as for PCK in @bt1ctr.
+ * bits 4..5 in the upper nibble controls BTN, the boosting
+ * amplification for the the step-up circuit.
+ * 0 = Disable
+ * 1 = -1.5 x VDDB
+ * 2 = -2 x VDDB
+ * 3 = -2.5 x VDDB
+ * 4 = -3 x VDDB
+ * The defaults are 4 and 3 yielding 0x34
+ */
+ u8 bt2ctr[NT35510_P1_BT2CTR_LEN];
+ /**
+ * @vgh: setting for VGH ranging from 0x00 = 7.0V to 0x0B = 18.0V
+ * in 1V steps, the default is 0x08 which means 15V
+ */
+ u8 vgh[NT35510_P1_VGH_LEN];
+ /**
+ * @bt4ctr: setting for boost power control for the VGH step-up
+ * circuit (4)
+ * bits 0..2 in the lower nibble controls HCK, the booster clock
+ * frequency, the values are the same as for PCK in @bt1ctr.
+ * bits 4..5 in the upper nibble controls BTH, the boosting
+ * amplification for the the step-up circuit.
+ * 0 = AVDD + VDDB
+ * 1 = AVDD - AVEE
+ * 2 = AVDD - AVEE + VDDB
+ * 3 = AVDD x 2 - AVEE
+ * The defaults are 4 and 3 yielding 0x34
+ */
+ u8 bt4ctr[NT35510_P1_BT4CTR_LEN];
+ /**
+ * @vgl: setting for VGL ranging from 0x00 = -2V to 0x0f = -15V in
+ * 1V steps, the default is 0x08 which means -10V
+ */
+ u8 vgl[NT35510_P1_VGL_LEN];
+ /**
+ * @bt5ctr: setting for boost power control for the VGL step-up
+ * circuit (5)
+ * bits 0..2 in the lower nibble controls LCK, the booster clock
+ * frequency, the values are the same as for PCK in @bt1ctr.
+ * bits 4..5 in the upper nibble controls BTL, the boosting
+ * amplification for the the step-up circuit.
+ * 0 = AVEE + VCL
+ * 1 = AVEE - AVDD
+ * 2 = AVEE + VCL - AVDD
+ * 3 = AVEE x 2 - AVDD
+ * The defaults are 3 and 2 yielding 0x32
+ */
+ u8 bt5ctr[NT35510_P1_BT5CTR_LEN];
+ /**
+ * @vgp: setting for VGP, the positive gamma divider voltages
+ * VGMP the high voltage and VGSP the low voltage.
+ * The first byte contains bit 8 of VGMP and VGSP in bits 4 and 0
+ * The second byte contains bit 0..7 of VGMP
+ * The third byte contains bit 0..7 of VGSP
+ * VGMP 0x00 = 3.0V .. 0x108 = 6.3V in steps of 12.5mV
+ * VGSP 0x00 = 0V .. 0x111 = 3.7V in steps of 12.5mV
+ */
+ u8 vgp[NT35510_P1_VGP_LEN];
+ /**
+ * @vgn: setting for VGN, the negative gamma divider voltages,
+ * same layout of bytes as @vgp.
+ */
+ u8 vgn[NT35510_P1_VGN_LEN];
+ /**
+ * @sdeqctr: Source driver control settings, first byte is
+ * 0 for mode 1 and 1 for mode 2. Mode 1 uses two steps and
+ * mode 2 uses three steps meaning EQS3 is not used in mode
+ * 1. Mode 2 is default. The last three parameters are EQS1, EQS2
+ * and EQS3, setting the rise time for each equalizer step:
+ * 0x00 = 0.0 us to 0x0f = 7.5 us in steps of 0.5us. The default
+ * is 0x07 = 3.5 us.
+ */
+ u8 sdeqctr[NT35510_P0_SDEQCTR_LEN];
+ /**
+ * @sdvpctr: power/voltage behaviour during vertical porch time
+ */
+ u8 sdvpctr;
+ /**
+ * @t1: the number of pixel clocks on one scanline, range
+ * 0x100 (258 ticks) .. 0x3FF (1024 ticks) so the value + 1
+ * clock ticks.
+ */
+ u16 t1;
+ /**
+ * @vbp: vertical back porch toward the PANEL note: not toward
+ * the DSI host; these are separate interfaces, in from DSI host
+ * and out to the panel.
+ */
+ u8 vbp;
+ /**
+ * @vfp: vertical front porch toward the PANEL.
+ */
+ u8 vfp;
+ /**
+ * @psel: pixel clock divisor: 0 = 1, 1 = 2, 2 = 4, 3 = 8.
+ */
+ u8 psel;
+ /**
+ * @dpmctr12: Display timing control 12
+ * Byte 1 bit 4 selects LVGL voltage level: 0 = VGLX, 1 = VGL_REG
+ * Byte 1 bit 1 selects gate signal mode: 0 = non-overlap, 1 = overlap
+ * Byte 1 bit 0 selects output signal control R/L swap, 0 = normal
+ * 1 = swap all O->E, L->R
+ * Byte 2 is CLW delay clock for CK O/E and CKB O/E signals:
+ * 0x00 = 0us .. 0xFF = 12.75us in 0.05us steps
+ * Byte 3 is FTI_H0 delay time for STP O/E signals:
+ * 0x00 = 0us .. 0xFF = 12.75us in 0.05us steps
+ */
+ u8 dpmctr12[NT35510_P0_DPMCTR12_LEN];
+ /**
+ * @gamma_corr_pos_r: Red gamma correction parameters, positive
+ */
+ u8 gamma_corr_pos_r[NT35510_P1_GAMMA_LEN];
+ /**
+ * @gamma_corr_pos_g: Green gamma correction parameters, positive
+ */
+ u8 gamma_corr_pos_g[NT35510_P1_GAMMA_LEN];
+ /**
+ * @gamma_corr_pos_b: Blue gamma correction parameters, positive
+ */
+ u8 gamma_corr_pos_b[NT35510_P1_GAMMA_LEN];
+ /**
+ * @gamma_corr_neg_r: Red gamma correction parameters, negative
+ */
+ u8 gamma_corr_neg_r[NT35510_P1_GAMMA_LEN];
+ /**
+ * @gamma_corr_neg_g: Green gamma correction parameters, negative
+ */
+ u8 gamma_corr_neg_g[NT35510_P1_GAMMA_LEN];
+ /**
+ * @gamma_corr_neg_b: Blue gamma correction parameters, negative
+ */
+ u8 gamma_corr_neg_b[NT35510_P1_GAMMA_LEN];
+};
+
+/**
+ * struct nt35510 - state container for the NT35510 panel
+ */
+struct nt35510 {
+ /**
+ * @dev: the container device
+ */
+ struct device *dev;
+ /**
+ * @conf: the specific panel configuration, as the NT35510
+ * can be combined with many physical panels, they can have
+ * different physical dimensions and gamma correction etc,
+ * so this is stored in the config.
+ */
+ const struct nt35510_config *conf;
+ /**
+ * @panel: the DRM panel object for the instance
+ */
+ struct drm_panel panel;
+ /**
+ * @supplies: regulators supplying the panel
+ */
+ struct regulator_bulk_data supplies[2];
+ /**
+ * @reset_gpio: the reset line
+ */
+ struct gpio_desc *reset_gpio;
+};
+
+/* Manufacturer command has strictly this byte sequence */
+static const u8 nt35510_mauc_select_page_0[] = { 0x55, 0xAA, 0x52, 0x08, 0x00 };
+static const u8 nt35510_mauc_select_page_1[] = { 0x55, 0xAA, 0x52, 0x08, 0x01 };
+static const u8 nt35510_vgh_on[] = { 0x01 };
+
+static inline struct nt35510 *panel_to_nt35510(struct drm_panel *panel)
+{
+ return container_of(panel, struct nt35510, panel);
+}
+
+#define NT35510_ROTATE_0_SETTING 0x02
+#define NT35510_ROTATE_180_SETTING 0x00
+
+static int nt35510_send_long(struct nt35510 *nt, struct mipi_dsi_device *dsi,
+ u8 cmd, u8 cmdlen, const u8 *seq)
+{
+ const u8 *seqp = seq;
+ int cmdwritten = 0;
+ int chunk = cmdlen;
+ int ret;
+
+ if (chunk > 15)
+ chunk = 15;
+ ret = mipi_dsi_dcs_write(dsi, cmd, seqp, chunk);
+ if (ret < 0) {
+ DRM_DEV_ERROR(nt->dev,
+ "error sending DCS command seq cmd %02x\n",
+ cmd);
+ return ret;
+ }
+ cmdwritten += chunk;
+ seqp += chunk;
+
+ while (cmdwritten < cmdlen) {
+ chunk = cmdlen - cmdwritten;
+ if (chunk > 15)
+ chunk = 15;
+ ret = mipi_dsi_generic_write(dsi, seqp, chunk);
+ if (ret < 0) {
+ DRM_DEV_ERROR(nt->dev,
+ "error sending generic write seq %02x\n",
+ cmd);
+ return ret;
+ }
+ cmdwritten += chunk;
+ seqp += chunk;
+ }
+ DRM_DEV_DEBUG(nt->dev, "sent command %02x %02x bytes\n",
+ cmd, cmdlen);
+ return 0;
+}
+
+static int nt35510_read_id(struct nt35510 *nt)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
+ u8 id1, id2, id3;
+ int ret;
+
+ ret = mipi_dsi_dcs_read(dsi, MCS_CMD_READ_ID1, &id1, 1);
+ if (ret < 0) {
+ DRM_DEV_ERROR(nt->dev, "could not read MTP ID1\n");
+ return ret;
+ }
+ ret = mipi_dsi_dcs_read(dsi, MCS_CMD_READ_ID2, &id2, 1);
+ if (ret < 0) {
+ DRM_DEV_ERROR(nt->dev, "could not read MTP ID2\n");
+ return ret;
+ }
+ ret = mipi_dsi_dcs_read(dsi, MCS_CMD_READ_ID3, &id3, 1);
+ if (ret < 0) {
+ DRM_DEV_ERROR(nt->dev, "could not read MTP ID3\n");
+ return ret;
+ }
+
+ /*
+ * Multi-Time Programmable (?) memory contains manufacturer
+ * ID (e.g. Hydis 0x55), driver ID (e.g. NT35510 0xc0) and
+ * version.
+ */
+ DRM_DEV_INFO(nt->dev,
+ "MTP ID manufacturer: %02x version: %02x driver: %02x\n",
+ id1, id2, id3);
+
+ return 0;
+}
+
+/**
+ * nt35510_setup_power() - set up power config in page 1
+ * @nt: the display instance to set up
+ */
+static int nt35510_setup_power(struct nt35510 *nt)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
+ int ret;
+
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SETAVDD,
+ NT35510_P1_AVDD_LEN,
+ nt->conf->avdd);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_BT1CTR,
+ NT35510_P1_BT1CTR_LEN,
+ nt->conf->bt1ctr);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SETAVEE,
+ NT35510_P1_AVEE_LEN,
+ nt->conf->avee);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_BT2CTR,
+ NT35510_P1_BT2CTR_LEN,
+ nt->conf->bt2ctr);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SETVGH,
+ NT35510_P1_VGH_LEN,
+ nt->conf->vgh);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_BT4CTR,
+ NT35510_P1_BT4CTR_LEN,
+ nt->conf->bt4ctr);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_VGHCTR,
+ ARRAY_SIZE(nt35510_vgh_on),
+ nt35510_vgh_on);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SETVGL,
+ NT35510_P1_VGL_LEN,
+ nt->conf->vgl);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_BT5CTR,
+ NT35510_P1_BT5CTR_LEN,
+ nt->conf->bt5ctr);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SETVGP,
+ NT35510_P1_VGP_LEN,
+ nt->conf->vgp);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SETVGN,
+ NT35510_P1_VGN_LEN,
+ nt->conf->vgn);
+ if (ret)
+ return ret;
+
+ /* Typically 10 ms */
+ usleep_range(10000, 20000);
+
+ return 0;
+}
+
+/**
+ * nt35510_setup_display() - set up display config in page 0
+ * @nt: the display instance to set up
+ */
+static int nt35510_setup_display(struct nt35510 *nt)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
+ const struct nt35510_config *conf = nt->conf;
+ u8 dopctr[NT35510_P0_DOPCTR_LEN];
+ u8 gseqctr[NT35510_P0_GSEQCTR_LEN];
+ u8 dpfrctr[NT35510_P0_DPFRCTR1_LEN];
+ /* FIXME: set up any rotation (assume none for now) */
+ u8 addr_mode = NT35510_ROTATE_0_SETTING;
+ u8 val;
+ int ret;
+
+ /* Enable TE, EoTP and RGB pixel format */
+ dopctr[0] = NT35510_DOPCTR_0_DSITE | NT35510_DOPCTR_0_EOTP |
+ NT35510_DOPCTR_0_N565;
+ dopctr[1] = NT35510_DOPCTR_1_CTB;
+ ret = nt35510_send_long(nt, dsi, NT35510_P0_DOPCTR,
+ NT35510_P0_DOPCTR_LEN,
+ dopctr);
+ if (ret)
+ return ret;
+
+ ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_ADDRESS_MODE, &addr_mode,
+ sizeof(addr_mode));
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Source data hold time, default 0x05 = 2.5us
+ * 0x00..0x3F = 0 .. 31.5us in steps of 0.5us
+ * 0x0A = 5us
+ */
+ val = 0x0A;
+ ret = mipi_dsi_dcs_write(dsi, NT35510_P0_SDHDTCTR, &val,
+ sizeof(val));
+ if (ret < 0)
+ return ret;
+
+ /* EQ control for gate signals, 0x00 = 0 us */
+ gseqctr[0] = 0x00;
+ gseqctr[1] = 0x00;
+ ret = nt35510_send_long(nt, dsi, NT35510_P0_GSEQCTR,
+ NT35510_P0_GSEQCTR_LEN,
+ gseqctr);
+ if (ret)
+ return ret;
+
+ ret = nt35510_send_long(nt, dsi, NT35510_P0_SDEQCTR,
+ NT35510_P0_SDEQCTR_LEN,
+ conf->sdeqctr);
+ if (ret)
+ return ret;
+
+ ret = mipi_dsi_dcs_write(dsi, NT35510_P0_SDVPCTR,
+ &conf->sdvpctr, 1);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Display timing control for active and idle off mode:
+ * the first byte contains
+ * the two high bits of T1A and second byte the low 8 bits, and
+ * the valid range is 0x100 (257) to 0x3ff (1023) representing
+ * 258..1024 (+1) pixel clock ticks for one scanline. At 20MHz pixel
+ * clock this covers the range of 12.90us .. 51.20us in steps of
+ * 0.05us, the default is 0x184 (388) representing 389 ticks.
+ * The third byte is VBPDA, vertical back porch display active
+ * and the fourth VFPDA, vertical front porch display active,
+ * both given in number of scanlines in the range 0x02..0xff
+ * for 2..255 scanlines. The fifth byte is 2 bits selecting
+ * PSEL for active and idle off mode, how much the 20MHz clock
+ * is divided by 0..3. This needs to be adjusted to get the right
+ * frame rate.
+ */
+ dpfrctr[0] = (conf->t1 >> 8) & 0xFF;
+ dpfrctr[1] = conf->t1 & 0xFF;
+ /* Vertical back porch */
+ dpfrctr[2] = conf->vbp;
+ /* Vertical front porch */
+ dpfrctr[3] = conf->vfp;
+ dpfrctr[4] = conf->psel;
+ ret = nt35510_send_long(nt, dsi, NT35510_P0_DPFRCTR1,
+ NT35510_P0_DPFRCTR1_LEN,
+ dpfrctr);
+ if (ret)
+ return ret;
+ /* For idle and partial idle off mode we decrease front porch by one */
+ dpfrctr[3]--;
+ ret = nt35510_send_long(nt, dsi, NT35510_P0_DPFRCTR2,
+ NT35510_P0_DPFRCTR2_LEN,
+ dpfrctr);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P0_DPFRCTR3,
+ NT35510_P0_DPFRCTR3_LEN,
+ dpfrctr);
+ if (ret)
+ return ret;
+
+ /* Enable TE on vblank */
+ ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ if (ret)
+ return ret;
+
+ /* Turn on the pads? */
+ ret = nt35510_send_long(nt, dsi, NT35510_P0_DPMCTR12,
+ NT35510_P0_DPMCTR12_LEN,
+ conf->dpmctr12);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int nt35510_set_brightness(struct backlight_device *bl)
+{
+ struct nt35510 *nt = bl_get_data(bl);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
+ u8 brightness = bl->props.brightness;
+ int ret;
+
+ DRM_DEV_DEBUG(nt->dev, "set brightness %d\n", brightness);
+ ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
+ &brightness,
+ sizeof(brightness));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static const struct backlight_ops nt35510_bl_ops = {
+ .update_status = nt35510_set_brightness,
+};
+
+/*
+ * This power-on sequence
+ */
+static int nt35510_power_on(struct nt35510 *nt)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(nt->supplies), nt->supplies);
+ if (ret < 0) {
+ dev_err(nt->dev, "unable to enable regulators\n");
+ return ret;
+ }
+
+ /* Toggle RESET in accordance with datasheet page 370 */
+ if (nt->reset_gpio) {
+ gpiod_set_value(nt->reset_gpio, 1);
+ /* Active min 10 us according to datasheet, let's say 20 */
+ usleep_range(20, 1000);
+ gpiod_set_value(nt->reset_gpio, 0);
+ /*
+ * 5 ms during sleep mode, 120 ms during sleep out mode
+ * according to datasheet, let's use 120-140 ms.
+ */
+ usleep_range(120000, 140000);
+ }
+
+ ret = nt35510_read_id(nt);
+ if (ret)
+ return ret;
+
+ /* Set up stuff in manufacturer control, page 1 */
+ ret = nt35510_send_long(nt, dsi, MCS_CMD_MAUCCTR,
+ ARRAY_SIZE(nt35510_mauc_select_page_1),
+ nt35510_mauc_select_page_1);
+ if (ret)
+ return ret;
+
+ ret = nt35510_setup_power(nt);
+ if (ret)
+ return ret;
+
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_RED_POS,
+ NT35510_P1_GAMMA_LEN,
+ nt->conf->gamma_corr_pos_r);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_GREEN_POS,
+ NT35510_P1_GAMMA_LEN,
+ nt->conf->gamma_corr_pos_g);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_BLUE_POS,
+ NT35510_P1_GAMMA_LEN,
+ nt->conf->gamma_corr_pos_b);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_RED_NEG,
+ NT35510_P1_GAMMA_LEN,
+ nt->conf->gamma_corr_neg_r);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_GREEN_NEG,
+ NT35510_P1_GAMMA_LEN,
+ nt->conf->gamma_corr_neg_g);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_BLUE_NEG,
+ NT35510_P1_GAMMA_LEN,
+ nt->conf->gamma_corr_neg_b);
+ if (ret)
+ return ret;
+
+ /* Set up stuff in manufacturer control, page 0 */
+ ret = nt35510_send_long(nt, dsi, MCS_CMD_MAUCCTR,
+ ARRAY_SIZE(nt35510_mauc_select_page_0),
+ nt35510_mauc_select_page_0);
+ if (ret)
+ return ret;
+
+ ret = nt35510_setup_display(nt);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int nt35510_power_off(struct nt35510 *nt)
+{
+ int ret;
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(nt->supplies), nt->supplies);
+ if (ret)
+ return ret;
+
+ if (nt->reset_gpio)
+ gpiod_set_value(nt->reset_gpio, 1);
+
+ return 0;
+}
+
+static int nt35510_unprepare(struct drm_panel *panel)
+{
+ struct nt35510 *nt = panel_to_nt35510(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
+ int ret;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret) {
+ DRM_DEV_ERROR(nt->dev, "failed to turn display off (%d)\n",
+ ret);
+ return ret;
+ }
+ usleep_range(10000, 20000);
+
+ /* Enter sleep mode */
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret) {
+ DRM_DEV_ERROR(nt->dev, "failed to enter sleep mode (%d)\n",
+ ret);
+ return ret;
+ }
+
+ /* Wait 4 frames, how much is that 5ms in the vendor driver */
+ usleep_range(5000, 10000);
+
+ ret = nt35510_power_off(nt);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int nt35510_prepare(struct drm_panel *panel)
+{
+ struct nt35510 *nt = panel_to_nt35510(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
+ int ret;
+
+ ret = nt35510_power_on(nt);
+ if (ret)
+ return ret;
+
+ /* Exit sleep mode */
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret) {
+ DRM_DEV_ERROR(nt->dev, "failed to exit sleep mode (%d)\n",
+ ret);
+ return ret;
+ }
+ /* Up to 120 ms */
+ usleep_range(120000, 150000);
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret) {
+ DRM_DEV_ERROR(nt->dev, "failed to turn display on (%d)\n",
+ ret);
+ return ret;
+ }
+ /* Some 10 ms */
+ usleep_range(10000, 20000);
+
+ return 0;
+}
+
+static int nt35510_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct nt35510 *nt = panel_to_nt35510(panel);
+ struct drm_display_mode *mode;
+ struct drm_display_info *info;
+
+ info = &connector->display_info;
+ info->width_mm = nt->conf->width_mm;
+ info->height_mm = nt->conf->height_mm;
+ mode = drm_mode_duplicate(connector->dev, &nt->conf->mode);
+ if (!mode) {
+ DRM_ERROR("bad mode or failed to add mode\n");
+ return -EINVAL;
+ }
+ drm_mode_set_name(mode);
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+
+ mode->width_mm = nt->conf->width_mm;
+ mode->height_mm = nt->conf->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1; /* Number of modes */
+}
+
+static const struct drm_panel_funcs nt35510_drm_funcs = {
+ .unprepare = nt35510_unprepare,
+ .prepare = nt35510_prepare,
+ .get_modes = nt35510_get_modes,
+};
+
+static int nt35510_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct nt35510 *nt;
+ int ret;
+
+ nt = devm_kzalloc(dev, sizeof(struct nt35510), GFP_KERNEL);
+ if (!nt)
+ return -ENOMEM;
+ mipi_dsi_set_drvdata(dsi, nt);
+ nt->dev = dev;
+
+ dsi->lanes = 2;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ /*
+ * Datasheet suggests max HS rate for NT35510 is 250 MHz
+ * (period time 4ns, see figure 7.6.4 page 365) and max LP rate is
+ * 20 MHz (period time 50ns, see figure 7.6.6. page 366).
+ * However these frequencies appear in source code for the Hydis
+ * HVA40WV1 panel and setting up the LP frequency makes the panel
+ * not work.
+ *
+ * TODO: if other panels prove to be closer to the datasheet,
+ * maybe make this a per-panel config in struct nt35510_config?
+ */
+ dsi->hs_rate = 349440000;
+ dsi->lp_rate = 9600000;
+ dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS |
+ MIPI_DSI_MODE_EOT_PACKET;
+
+ /*
+ * Every new incarnation of this display must have a unique
+ * data entry for the system in this driver.
+ */
+ nt->conf = of_device_get_match_data(dev);
+ if (!nt->conf) {
+ dev_err(dev, "missing device configuration\n");
+ return -ENODEV;
+ }
+
+ nt->supplies[0].supply = "vdd"; /* 2.3-4.8 V */
+ nt->supplies[1].supply = "vddi"; /* 1.65-3.3V */
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(nt->supplies),
+ nt->supplies);
+ if (ret < 0)
+ return ret;
+ ret = regulator_set_voltage(nt->supplies[0].consumer,
+ 2300000, 4800000);
+ if (ret)
+ return ret;
+ ret = regulator_set_voltage(nt->supplies[1].consumer,
+ 1650000, 3300000);
+ if (ret)
+ return ret;
+
+ nt->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_ASIS);
+ if (IS_ERR(nt->reset_gpio)) {
+ dev_err(dev, "error getting RESET GPIO\n");
+ return PTR_ERR(nt->reset_gpio);
+ }
+
+ drm_panel_init(&nt->panel, dev, &nt35510_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ /*
+ * First, try to locate an external backlight (such as on GPIO)
+ * if this fails, assume we will want to use the internal backlight
+ * control.
+ */
+ ret = drm_panel_of_backlight(&nt->panel);
+ if (ret) {
+ dev_err(dev, "error getting external backlight %d\n", ret);
+ return ret;
+ }
+ if (!nt->panel.backlight) {
+ struct backlight_device *bl;
+
+ bl = devm_backlight_device_register(dev, "nt35510", dev, nt,
+ &nt35510_bl_ops, NULL);
+ if (IS_ERR(bl)) {
+ DRM_DEV_ERROR(dev, "failed to register backlight device\n");
+ return PTR_ERR(bl);
+ }
+ bl->props.max_brightness = 255;
+ bl->props.brightness = 255;
+ bl->props.power = FB_BLANK_POWERDOWN;
+ nt->panel.backlight = bl;
+ }
+
+ ret = drm_panel_add(&nt->panel);
+ if (ret < 0)
+ return ret;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0)
+ drm_panel_remove(&nt->panel);
+
+ return 0;
+}
+
+static int nt35510_remove(struct mipi_dsi_device *dsi)
+{
+ struct nt35510 *nt = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ mipi_dsi_detach(dsi);
+ /* Power off */
+ ret = nt35510_power_off(nt);
+ drm_panel_remove(&nt->panel);
+
+ return ret;
+}
+
+/*
+ * These gamma correction values are 10bit tuples, so only bits 0 and 1 is
+ * ever used in the first byte. They form a positive and negative gamma
+ * correction curve for each color, values must be strictly higher for each
+ * step on the curve. As can be seen these default curves goes from 0x0001
+ * to 0x03FE.
+ */
+#define NT35510_GAMMA_POS_DEFAULT 0x00, 0x01, 0x00, 0x43, 0x00, \
+ 0x6B, 0x00, 0x87, 0x00, 0xA3, 0x00, 0xCE, 0x00, 0xF1, 0x01, \
+ 0x27, 0x01, 0x53, 0x01, 0x98, 0x01, 0xCE, 0x02, 0x22, 0x02, \
+ 0x83, 0x02, 0x78, 0x02, 0x9E, 0x02, 0xDD, 0x03, 0x00, 0x03, \
+ 0x2E, 0x03, 0x54, 0x03, 0x7F, 0x03, 0x95, 0x03, 0xB3, 0x03, \
+ 0xC2, 0x03, 0xE1, 0x03, 0xF1, 0x03, 0xFE
+
+#define NT35510_GAMMA_NEG_DEFAULT 0x00, 0x01, 0x00, 0x43, 0x00, \
+ 0x6B, 0x00, 0x87, 0x00, 0xA3, 0x00, 0xCE, 0x00, 0xF1, 0x01, \
+ 0x27, 0x01, 0x53, 0x01, 0x98, 0x01, 0xCE, 0x02, 0x22, 0x02, \
+ 0x43, 0x02, 0x50, 0x02, 0x9E, 0x02, 0xDD, 0x03, 0x00, 0x03, \
+ 0x2E, 0x03, 0x54, 0x03, 0x7F, 0x03, 0x95, 0x03, 0xB3, 0x03, \
+ 0xC2, 0x03, 0xE1, 0x03, 0xF1, 0x03, 0xFE
+
+/*
+ * The Hydis HVA40WV1 panel
+ */
+static const struct nt35510_config nt35510_hydis_hva40wv1 = {
+ .width_mm = 52,
+ .height_mm = 86,
+ /**
+ * As the Hydis panel is used in command mode, the porches etc
+ * are settings programmed internally into the NT35510 controller
+ * and generated toward the physical display. As the panel is not
+ * used in video mode, these are not really exposed to the DSI
+ * host.
+ *
+ * Display frame rate control:
+ * Frame rate = (20 MHz / 1) / (389 * (7 + 50 + 800)) ~= 60 Hz
+ */
+ .mode = {
+ /* The internal pixel clock of the NT35510 is 20 MHz */
+ .clock = 20000,
+ .hdisplay = 480,
+ .hsync_start = 480 + 2, /* HFP = 2 */
+ .hsync_end = 480 + 2 + 0, /* HSync = 0 */
+ .htotal = 480 + 2 + 0 + 5, /* HFP = 5 */
+ .vdisplay = 800,
+ .vsync_start = 800 + 2, /* VFP = 2 */
+ .vsync_end = 800 + 2 + 0, /* VSync = 0 */
+ .vtotal = 800 + 2 + 0 + 5, /* VBP = 5 */
+ .vrefresh = 60, /* Calculated */
+ .flags = 0,
+ },
+ /* 0x09: AVDD = 5.6V */
+ .avdd = { 0x09, 0x09, 0x09 },
+ /* 0x34: PCK = Hsync/2, BTP = 2 x VDDB */
+ .bt1ctr = { 0x34, 0x34, 0x34 },
+ /* 0x09: AVEE = -5.6V */
+ .avee = { 0x09, 0x09, 0x09 },
+ /* 0x24: NCK = Hsync/2, BTN = -2 x VDDB */
+ .bt2ctr = { 0x24, 0x24, 0x24 },
+ /* 0x05 = 12V */
+ .vgh = { 0x05, 0x05, 0x05 },
+ /* 0x24: NCKA = Hsync/2, VGH = 2 x AVDD - AVEE */
+ .bt4ctr = { 0x24, 0x24, 0x24 },
+ /* 0x0B = -13V */
+ .vgl = { 0x0B, 0x0B, 0x0B },
+ /* 0x24: LCKA = Hsync, VGL = AVDD + VCL - AVDD */
+ .bt5ctr = { 0x24, 0x24, 0x24 },
+ /* VGMP: 0x0A3 = 5.0375V, VGSP = 0V */
+ .vgp = { 0x00, 0xA3, 0x00 },
+ /* VGMP: 0x0A3 = 5.0375V, VGSP = 0V */
+ .vgn = { 0x00, 0xA3, 0x00 },
+ /* SDEQCTR: source driver EQ mode 2, 2.5 us rise time on each step */
+ .sdeqctr = { 0x01, 0x05, 0x05, 0x05 },
+ /* SDVPCTR: Normal operation off color during v porch */
+ .sdvpctr = 0x01,
+ /* T1: number of pixel clocks on one scanline: 0x184 = 389 clocks */
+ .t1 = 0x0184,
+ /* VBP: vertical back porch toward the panel */
+ .vbp = 7,
+ /* VFP: vertical front porch toward the panel */
+ .vfp = 50,
+ /* PSEL: divide pixel clock 20MHz with 1 (no clock downscaling) */
+ .psel = 0,
+ /* DPTMCTR12: 0x03: LVGL = VGLX, overlap mode, swap R->L O->E */
+ .dpmctr12 = { 0x03, 0x00, 0x00, },
+ /* Default gamma correction values */
+ .gamma_corr_pos_r = { NT35510_GAMMA_POS_DEFAULT },
+ .gamma_corr_pos_g = { NT35510_GAMMA_POS_DEFAULT },
+ .gamma_corr_pos_b = { NT35510_GAMMA_POS_DEFAULT },
+ .gamma_corr_neg_r = { NT35510_GAMMA_NEG_DEFAULT },
+ .gamma_corr_neg_g = { NT35510_GAMMA_NEG_DEFAULT },
+ .gamma_corr_neg_b = { NT35510_GAMMA_NEG_DEFAULT },
+};
+
+static const struct of_device_id nt35510_of_match[] = {
+ {
+ .compatible = "hydis,hva40wv1",
+ .data = &nt35510_hydis_hva40wv1,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, nt35510_of_match);
+
+static struct mipi_dsi_driver nt35510_driver = {
+ .probe = nt35510_probe,
+ .remove = nt35510_remove,
+ .driver = {
+ .name = "panel-novatek-nt35510",
+ .of_match_table = nt35510_of_match,
+ },
+};
+module_mipi_dsi_driver(nt35510_driver);
+
+MODULE_AUTHOR("Linus Walleij <[email protected]>");
+MODULE_DESCRIPTION("NT35510-based panel driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-samsung-ld9040.c b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
index 3c52f15f7a1c..9bb2e8c7934a 100644
--- a/drivers/gpu/drm/panel/panel-samsung-ld9040.c
+++ b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
@@ -373,6 +373,12 @@ static const struct of_device_id ld9040_of_match[] = {
};
MODULE_DEVICE_TABLE(of, ld9040_of_match);
+static const struct spi_device_id ld9040_ids[] = {
+ { "ld9040", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, ld9040_ids);
+
static struct spi_driver ld9040_driver = {
.probe = ld9040_probe,
.remove = ld9040_remove,
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
new file mode 100644
index 000000000000..9d843fcc3a22
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2019, Michael Srba
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+struct s6e88a0_ams452ef01 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct regulator_bulk_data supplies[2];
+ struct gpio_desc *reset_gpio;
+
+ bool prepared;
+};
+
+static inline struct
+s6e88a0_ams452ef01 *to_s6e88a0_ams452ef01(struct drm_panel *panel)
+{
+ return container_of(panel, struct s6e88a0_ams452ef01, panel);
+}
+
+#define dsi_dcs_write_seq(dsi, seq...) do { \
+ static const u8 d[] = { seq }; \
+ int ret; \
+ ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \
+ if (ret < 0) \
+ return ret; \
+ } while (0)
+
+static void s6e88a0_ams452ef01_reset(struct s6e88a0_ams452ef01 *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(5000, 6000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(1000, 2000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(10000, 11000);
+}
+
+static int s6e88a0_ams452ef01_on(struct s6e88a0_ams452ef01 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ dsi_dcs_write_seq(dsi, 0xf0, 0x5a, 0x5a); // enable LEVEL2 commands
+ dsi_dcs_write_seq(dsi, 0xcc, 0x4c); // set Pixel Clock Divider polarity
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
+ return ret;
+ }
+ msleep(120);
+
+ // set default brightness/gama
+ dsi_dcs_write_seq(dsi, 0xca,
+ 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, // V255 RR,GG,BB
+ 0x80, 0x80, 0x80, // V203 R,G,B
+ 0x80, 0x80, 0x80, // V151 R,G,B
+ 0x80, 0x80, 0x80, // V87 R,G,B
+ 0x80, 0x80, 0x80, // V51 R,G,B
+ 0x80, 0x80, 0x80, // V35 R,G,B
+ 0x80, 0x80, 0x80, // V23 R,G,B
+ 0x80, 0x80, 0x80, // V11 R,G,B
+ 0x6b, 0x68, 0x71, // V3 R,G,B
+ 0x00, 0x00, 0x00); // V1 R,G,B
+ // set default Amoled Off Ratio
+ dsi_dcs_write_seq(dsi, 0xb2, 0x40, 0x0a, 0x17, 0x00, 0x0a);
+ dsi_dcs_write_seq(dsi, 0xb6, 0x2c, 0x0b); // set default elvss voltage
+ dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
+ dsi_dcs_write_seq(dsi, 0xf7, 0x03); // gamma/aor update
+ dsi_dcs_write_seq(dsi, 0xf0, 0xa5, 0xa5); // disable LEVEL2 commands
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display on: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int s6e88a0_ams452ef01_off(struct s6e88a0_ams452ef01 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display off: %d\n", ret);
+ return ret;
+ }
+ msleep(35);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
+ return ret;
+ }
+ msleep(120);
+
+ return 0;
+}
+
+static int s6e88a0_ams452ef01_prepare(struct drm_panel *panel)
+{
+ struct s6e88a0_ams452ef01 *ctx = to_s6e88a0_ams452ef01(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ if (ctx->prepared)
+ return 0;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable regulators: %d\n", ret);
+ return ret;
+ }
+
+ s6e88a0_ams452ef01_reset(ctx);
+
+ ret = s6e88a0_ams452ef01_on(ctx);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize panel: %d\n", ret);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ regulator_bulk_disable(ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ return ret;
+ }
+
+ ctx->prepared = true;
+ return 0;
+}
+
+static int s6e88a0_ams452ef01_unprepare(struct drm_panel *panel)
+{
+ struct s6e88a0_ams452ef01 *ctx = to_s6e88a0_ams452ef01(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ if (!ctx->prepared)
+ return 0;
+
+ ret = s6e88a0_ams452ef01_off(ctx);
+ if (ret < 0)
+ dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+
+ ctx->prepared = false;
+ return 0;
+}
+
+static const struct drm_display_mode s6e88a0_ams452ef01_mode = {
+ .clock = (540 + 88 + 4 + 20) * (960 + 14 + 2 + 8) * 60 / 1000,
+ .hdisplay = 540,
+ .hsync_start = 540 + 88,
+ .hsync_end = 540 + 88 + 4,
+ .htotal = 540 + 88 + 4 + 20,
+ .vdisplay = 960,
+ .vsync_start = 960 + 14,
+ .vsync_end = 960 + 14 + 2,
+ .vtotal = 960 + 14 + 2 + 8,
+ .vrefresh = 60,
+ .width_mm = 56,
+ .height_mm = 100,
+};
+
+static int s6e88a0_ams452ef01_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &s6e88a0_ams452ef01_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs s6e88a0_ams452ef01_panel_funcs = {
+ .unprepare = s6e88a0_ams452ef01_unprepare,
+ .prepare = s6e88a0_ams452ef01_prepare,
+ .get_modes = s6e88a0_ams452ef01_get_modes,
+};
+
+static int s6e88a0_ams452ef01_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct s6e88a0_ams452ef01 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->supplies[0].supply = "vdd3";
+ ctx->supplies[1].supply = "vci";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ if (ret < 0) {
+ dev_err(dev, "Failed to get regulators: %d\n", ret);
+ return ret;
+ }
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio)) {
+ ret = PTR_ERR(ctx->reset_gpio);
+ dev_err(dev, "Failed to get reset-gpios: %d\n", ret);
+ return ret;
+ }
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = 2;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST;
+
+ drm_panel_init(&ctx->panel, dev, &s6e88a0_ams452ef01_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ ret = drm_panel_add(&ctx->panel);
+ if (ret < 0) {
+ dev_err(dev, "Failed to add panel: %d\n", ret);
+ return ret;
+ }
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to attach to DSI host: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int s6e88a0_ams452ef01_remove(struct mipi_dsi_device *dsi)
+{
+ struct s6e88a0_ams452ef01 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&ctx->panel);
+
+ return 0;
+}
+
+static const struct of_device_id s6e88a0_ams452ef01_of_match[] = {
+ { .compatible = "samsung,s6e88a0-ams452ef01" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, s6e88a0_ams452ef01_of_match);
+
+static struct mipi_dsi_driver s6e88a0_ams452ef01_driver = {
+ .probe = s6e88a0_ams452ef01_probe,
+ .remove = s6e88a0_ams452ef01_remove,
+ .driver = {
+ .name = "panel-s6e88a0-ams452ef01",
+ .of_match_table = s6e88a0_ams452ef01_of_match,
+ },
+};
+module_mipi_dsi_driver(s6e88a0_ams452ef01_driver);
+
+MODULE_AUTHOR("Michael Srba <[email protected]>");
+MODULE_DESCRIPTION("MIPI-DSI based Panel Driver for AMS452EF01 AMOLED LCD with a S6E88A0 controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index e14c14ac62b5..0ce81b1f36af 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -351,6 +351,65 @@ static const struct drm_panel_funcs panel_simple_funcs = {
.get_timings = panel_simple_get_timings,
};
+static struct panel_desc panel_dpi;
+
+static int panel_dpi_probe(struct device *dev,
+ struct panel_simple *panel)
+{
+ struct display_timing *timing;
+ const struct device_node *np;
+ struct panel_desc *desc;
+ unsigned int bus_flags;
+ struct videomode vm;
+ const char *mapping;
+ int ret;
+
+ np = dev->of_node;
+ desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ timing = devm_kzalloc(dev, sizeof(*timing), GFP_KERNEL);
+ if (!timing)
+ return -ENOMEM;
+
+ ret = of_get_display_timing(np, "panel-timing", timing);
+ if (ret < 0) {
+ dev_err(dev, "%pOF: no panel-timing node found for \"panel-dpi\" binding\n",
+ np);
+ return ret;
+ }
+
+ desc->timings = timing;
+ desc->num_timings = 1;
+
+ of_property_read_u32(np, "width-mm", &desc->size.width);
+ of_property_read_u32(np, "height-mm", &desc->size.height);
+
+ of_property_read_string(np, "data-mapping", &mapping);
+ if (!strcmp(mapping, "rgb24"))
+ desc->bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ else if (!strcmp(mapping, "rgb565"))
+ desc->bus_format = MEDIA_BUS_FMT_RGB565_1X16;
+ else if (!strcmp(mapping, "bgr666"))
+ desc->bus_format = MEDIA_BUS_FMT_RGB666_1X18;
+ else if (!strcmp(mapping, "lvds666"))
+ desc->bus_format = MEDIA_BUS_FMT_RGB666_1X24_CPADHI;
+
+ /* Extract bus_flags from display_timing */
+ bus_flags = 0;
+ vm.flags = timing->flags;
+ drm_bus_flags_from_videomode(&vm, &bus_flags);
+ desc->bus_flags = bus_flags;
+
+ /* We do not know the connector for the DT node, so guess it */
+ desc->connector_type = DRM_MODE_CONNECTOR_DPI;
+
+ panel->desc = desc;
+
+ return 0;
+}
+
#define PANEL_SIMPLE_BOUNDS_CHECK(to_check, bounds, field) \
(to_check->field.typ >= bounds->field.min && \
to_check->field.typ <= bounds->field.max)
@@ -437,8 +496,15 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
return -EPROBE_DEFER;
}
- if (!of_get_display_timing(dev->of_node, "panel-timing", &dt))
- panel_simple_parse_panel_timing_node(dev, panel, &dt);
+ if (desc == &panel_dpi) {
+ /* Handle the generic panel-dpi binding */
+ err = panel_dpi_probe(dev, panel);
+ if (err)
+ goto free_ddc;
+ } else {
+ if (!of_get_display_timing(dev->of_node, "panel-timing", &dt))
+ panel_simple_parse_panel_timing_node(dev, panel, &dt);
+ }
drm_panel_init(&panel->base, dev, &panel_simple_funcs,
desc->connector_type);
@@ -1301,6 +1367,37 @@ static const struct panel_desc edt_et035012dm6 = {
.bus_flags = DRM_BUS_FLAG_DE_LOW | DRM_BUS_FLAG_PIXDATA_NEGEDGE,
};
+static const struct drm_display_mode edt_etm043080dh6gp_mode = {
+ .clock = 10870,
+ .hdisplay = 480,
+ .hsync_start = 480 + 8,
+ .hsync_end = 480 + 8 + 4,
+ .htotal = 480 + 8 + 4 + 41,
+
+ /*
+ * IWG22M: Y resolution changed for "dc_linuxfb" module crashing while
+ * fb_align
+ */
+
+ .vdisplay = 288,
+ .vsync_start = 288 + 2,
+ .vsync_end = 288 + 2 + 4,
+ .vtotal = 288 + 2 + 4 + 10,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc edt_etm043080dh6gp = {
+ .modes = &edt_etm043080dh6gp_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 100,
+ .height = 65,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
+};
+
static const struct drm_display_mode edt_etm0430g0dh6_mode = {
.clock = 9000,
.hdisplay = 480,
@@ -1440,6 +1537,33 @@ static const struct panel_desc foxlink_fl500wvr00_a0t = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
+static const struct drm_display_mode frida_frd350h54004_mode = {
+ .clock = 6000,
+ .hdisplay = 320,
+ .hsync_start = 320 + 44,
+ .hsync_end = 320 + 44 + 16,
+ .htotal = 320 + 44 + 16 + 20,
+ .vdisplay = 240,
+ .vsync_start = 240 + 2,
+ .vsync_end = 240 + 2 + 6,
+ .vtotal = 240 + 2 + 6 + 2,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc frida_frd350h54004 = {
+ .modes = &frida_frd350h54004_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 77,
+ .height = 64,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
+};
+
static const struct drm_display_mode friendlyarm_hd702e_mode = {
.clock = 67185,
.hdisplay = 800,
@@ -2080,6 +2204,64 @@ static const struct panel_desc lg_lp129qe = {
},
};
+static const struct display_timing logictechno_lt161010_2nh_timing = {
+ .pixelclock = { 26400000, 33300000, 46800000 },
+ .hactive = { 800, 800, 800 },
+ .hfront_porch = { 16, 210, 354 },
+ .hback_porch = { 46, 46, 46 },
+ .hsync_len = { 1, 20, 40 },
+ .vactive = { 480, 480, 480 },
+ .vfront_porch = { 7, 22, 147 },
+ .vback_porch = { 23, 23, 23 },
+ .vsync_len = { 1, 10, 20 },
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_SYNC_POSEDGE,
+};
+
+static const struct panel_desc logictechno_lt161010_2nh = {
+ .timings = &logictechno_lt161010_2nh_timing,
+ .num_timings = 1,
+ .size = {
+ .width = 154,
+ .height = 86,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH |
+ DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE |
+ DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
+};
+
+static const struct display_timing logictechno_lt170410_2whc_timing = {
+ .pixelclock = { 68900000, 71100000, 73400000 },
+ .hactive = { 1280, 1280, 1280 },
+ .hfront_porch = { 23, 60, 71 },
+ .hback_porch = { 23, 60, 71 },
+ .hsync_len = { 15, 40, 47 },
+ .vactive = { 800, 800, 800 },
+ .vfront_porch = { 5, 7, 10 },
+ .vback_porch = { 5, 7, 10 },
+ .vsync_len = { 6, 9, 12 },
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_SYNC_POSEDGE,
+};
+
+static const struct panel_desc logictechno_lt170410_2whc = {
+ .timings = &logictechno_lt170410_2whc_timing,
+ .num_timings = 1,
+ .size = {
+ .width = 217,
+ .height = 136,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH |
+ DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE |
+ DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct drm_display_mode mitsubishi_aa070mc01_mode = {
.clock = 30400,
.hdisplay = 800,
@@ -2095,7 +2277,7 @@ static const struct drm_display_mode mitsubishi_aa070mc01_mode = {
};
static const struct drm_display_mode logicpd_type_28_mode = {
- .clock = 9000,
+ .clock = 9107,
.hdisplay = 480,
.hsync_start = 480 + 3,
.hsync_end = 480 + 3 + 42,
@@ -2224,6 +2406,51 @@ static const struct panel_desc netron_dy_e231732 = {
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
+static const struct drm_display_mode neweast_wjfh116008a_modes[] = {
+ {
+ .clock = 138500,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 48,
+ .hsync_end = 1920 + 48 + 32,
+ .htotal = 1920 + 48 + 32 + 80,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 3,
+ .vsync_end = 1080 + 3 + 5,
+ .vtotal = 1080 + 3 + 5 + 23,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+ }, {
+ .clock = 110920,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 48,
+ .hsync_end = 1920 + 48 + 32,
+ .htotal = 1920 + 48 + 32 + 80,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 3,
+ .vsync_end = 1080 + 3 + 5,
+ .vtotal = 1080 + 3 + 5 + 23,
+ .vrefresh = 48,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+ }
+};
+
+static const struct panel_desc neweast_wjfh116008a = {
+ .modes = neweast_wjfh116008a_modes,
+ .num_modes = 2,
+ .bpc = 6,
+ .size = {
+ .width = 260,
+ .height = 150,
+ },
+ .delay = {
+ .prepare = 110,
+ .enable = 20,
+ .unprepare = 500,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+ .connector_type = DRM_MODE_CONNECTOR_eDP,
+};
+
static const struct drm_display_mode newhaven_nhd_43_480272ef_atxl_mode = {
.clock = 9000,
.hdisplay = 480,
@@ -2390,15 +2617,15 @@ static const struct panel_desc ontat_yx700wv03 = {
};
static const struct drm_display_mode ortustech_com37h3m_mode = {
- .clock = 22153,
+ .clock = 22230,
.hdisplay = 480,
- .hsync_start = 480 + 8,
- .hsync_end = 480 + 8 + 10,
- .htotal = 480 + 8 + 10 + 10,
+ .hsync_start = 480 + 40,
+ .hsync_end = 480 + 40 + 10,
+ .htotal = 480 + 40 + 10 + 40,
.vdisplay = 640,
.vsync_start = 640 + 4,
- .vsync_end = 640 + 4 + 3,
- .vtotal = 640 + 4 + 3 + 4,
+ .vsync_end = 640 + 4 + 2,
+ .vtotal = 640 + 4 + 2 + 4,
.vrefresh = 60,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
};
@@ -2439,6 +2666,7 @@ static const struct panel_desc ortustech_com43h4m85ulc = {
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct drm_display_mode osddisplays_osd070t1718_19ts_mode = {
@@ -2464,7 +2692,8 @@ static const struct panel_desc osddisplays_osd070t1718_19ts = {
.height = 91,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE |
+ DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE,
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
@@ -2546,6 +2775,35 @@ static const struct panel_desc rocktech_rk070er9427 = {
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
+static const struct drm_display_mode rocktech_rk101ii01d_ct_mode = {
+ .clock = 71100,
+ .hdisplay = 1280,
+ .hsync_start = 1280 + 48,
+ .hsync_end = 1280 + 48 + 32,
+ .htotal = 1280 + 48 + 32 + 80,
+ .vdisplay = 800,
+ .vsync_start = 800 + 2,
+ .vsync_end = 800 + 2 + 5,
+ .vtotal = 800 + 2 + 5 + 16,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc rocktech_rk101ii01d_ct = {
+ .modes = &rocktech_rk101ii01d_ct_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 217,
+ .height = 136,
+ },
+ .delay = {
+ .prepare = 50,
+ .disable = 50,
+ },
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct drm_display_mode samsung_lsn122dl01_c01_mode = {
.clock = 271560,
.hdisplay = 2560,
@@ -2768,30 +3026,6 @@ static const struct panel_desc sharp_lq123p1jx31 = {
},
};
-static const struct drm_display_mode sharp_lq150x1lg11_mode = {
- .clock = 71100,
- .hdisplay = 1024,
- .hsync_start = 1024 + 168,
- .hsync_end = 1024 + 168 + 64,
- .htotal = 1024 + 168 + 64 + 88,
- .vdisplay = 768,
- .vsync_start = 768 + 37,
- .vsync_end = 768 + 37 + 2,
- .vtotal = 768 + 37 + 2 + 8,
- .vrefresh = 60,
-};
-
-static const struct panel_desc sharp_lq150x1lg11 = {
- .modes = &sharp_lq150x1lg11_mode,
- .num_modes = 1,
- .bpc = 6,
- .size = {
- .width = 304,
- .height = 228,
- },
- .bus_format = MEDIA_BUS_FMT_RGB565_1X16,
-};
-
static const struct display_timing sharp_ls020b1dd01d_timing = {
.pixelclock = { 2000000, 4200000, 5000000 },
.hactive = { 240, 240, 240 },
@@ -3023,7 +3257,7 @@ static const struct panel_desc toshiba_lt089ac29000 = {
.width = 194,
.height = 116,
},
- .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
@@ -3286,6 +3520,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "edt,et035012dm6",
.data = &edt_et035012dm6,
}, {
+ .compatible = "edt,etm043080dh6gp",
+ .data = &edt_etm043080dh6gp,
+ }, {
.compatible = "edt,etm0430g0dh6",
.data = &edt_etm0430g0dh6,
}, {
@@ -3310,6 +3547,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "foxlink,fl500wvr00-a0t",
.data = &foxlink_fl500wvr00_a0t,
}, {
+ .compatible = "frida,frd350h54004",
+ .data = &frida_frd350h54004,
+ }, {
.compatible = "friendlyarm,hd702e",
.data = &friendlyarm_hd702e,
}, {
@@ -3388,6 +3628,15 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "logicpd,type28",
.data = &logicpd_type_28,
}, {
+ .compatible = "logictechno,lt161010-2nhc",
+ .data = &logictechno_lt161010_2nh,
+ }, {
+ .compatible = "logictechno,lt161010-2nhr",
+ .data = &logictechno_lt161010_2nh,
+ }, {
+ .compatible = "logictechno,lt170410-2whc",
+ .data = &logictechno_lt170410_2whc,
+ }, {
.compatible = "mitsubishi,aa070mc01-ca1",
.data = &mitsubishi_aa070mc01,
}, {
@@ -3400,6 +3649,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "netron-dy,e231732",
.data = &netron_dy_e231732,
}, {
+ .compatible = "neweast,wjfh116008a",
+ .data = &neweast_wjfh116008a,
+ }, {
.compatible = "newhaven,nhd-4.3-480272ef-atxl",
.data = &newhaven_nhd_43_480272ef_atxl,
}, {
@@ -3439,6 +3691,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "rocktech,rk070er9427",
.data = &rocktech_rk070er9427,
}, {
+ .compatible = "rocktech,rk101ii01d-ct",
+ .data = &rocktech_rk101ii01d_ct,
+ }, {
.compatible = "samsung,lsn122dl01-c01",
.data = &samsung_lsn122dl01_c01,
}, {
@@ -3466,9 +3721,6 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "sharp,lq123p1jx31",
.data = &sharp_lq123p1jx31,
}, {
- .compatible = "sharp,lq150x1lg11",
- .data = &sharp_lq150x1lg11,
- }, {
.compatible = "sharp,ls020b1dd01d",
.data = &sharp_ls020b1dd01d,
}, {
@@ -3526,6 +3778,10 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "winstar,wf35ltiacd",
.data = &winstar_wf35ltiacd,
}, {
+ /* Must be the last entry */
+ .compatible = "panel-dpi",
+ .data = &panel_dpi,
+ }, {
/* sentinel */
}
};
diff --git a/drivers/gpu/drm/panel/panel-sony-acx424akp.c b/drivers/gpu/drm/panel/panel-sony-acx424akp.c
index de0abf76ae6f..c91e55b2d7a3 100644
--- a/drivers/gpu/drm/panel/panel-sony-acx424akp.c
+++ b/drivers/gpu/drm/panel/panel-sony-acx424akp.c
@@ -48,7 +48,7 @@ struct acx424akp {
};
static const struct drm_display_mode sony_acx424akp_vid_mode = {
- .clock = 330000,
+ .clock = 27234,
.hdisplay = 480,
.hsync_start = 480 + 15,
.hsync_end = 480 + 15 + 0,
@@ -68,7 +68,7 @@ static const struct drm_display_mode sony_acx424akp_vid_mode = {
* command mode using the maximum HS frequency.
*/
static const struct drm_display_mode sony_acx424akp_cmd_mode = {
- .clock = 420160,
+ .clock = 35478,
.hdisplay = 480,
.hsync_start = 480 + 154,
.hsync_end = 480 + 154 + 16,
diff --git a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
index cf29405a2dbe..aeca15dfeb3c 100644
--- a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
@@ -86,7 +86,12 @@ struct td028ttec1_panel {
#define to_td028ttec1_device(p) container_of(p, struct td028ttec1_panel, panel)
-static int jbt_ret_write_0(struct td028ttec1_panel *lcd, u8 reg, int *err)
+/*
+ * noinline_for_stack so we don't get multiple copies of tx_buf
+ * on the stack in case of gcc-plugin-structleak
+ */
+static int noinline_for_stack
+jbt_ret_write_0(struct td028ttec1_panel *lcd, u8 reg, int *err)
{
struct spi_device *spi = lcd->spi;
u16 tx_buf = JBT_COMMAND | reg;
@@ -105,8 +110,9 @@ static int jbt_ret_write_0(struct td028ttec1_panel *lcd, u8 reg, int *err)
return ret;
}
-static int jbt_reg_write_1(struct td028ttec1_panel *lcd,
- u8 reg, u8 data, int *err)
+static int noinline_for_stack
+jbt_reg_write_1(struct td028ttec1_panel *lcd,
+ u8 reg, u8 data, int *err)
{
struct spi_device *spi = lcd->spi;
u16 tx_buf[2];
@@ -128,8 +134,9 @@ static int jbt_reg_write_1(struct td028ttec1_panel *lcd,
return ret;
}
-static int jbt_reg_write_2(struct td028ttec1_panel *lcd,
- u8 reg, u16 data, int *err)
+static int noinline_for_stack
+jbt_reg_write_2(struct td028ttec1_panel *lcd,
+ u8 reg, u16 data, int *err)
{
struct spi_device *spi = lcd->spi;
u16 tx_buf[3];
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
index 238fb6d54df4..8136babd3ba9 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -5,6 +5,7 @@
#include <linux/clk.h>
#include <linux/reset.h>
#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
#include <linux/regulator/consumer.h>
#include "panfrost_device.h"
@@ -87,18 +88,27 @@ static void panfrost_clk_fini(struct panfrost_device *pfdev)
static int panfrost_regulator_init(struct panfrost_device *pfdev)
{
- int ret;
+ int ret, i;
- pfdev->regulator = devm_regulator_get(pfdev->dev, "mali");
- if (IS_ERR(pfdev->regulator)) {
- ret = PTR_ERR(pfdev->regulator);
- dev_err(pfdev->dev, "failed to get regulator: %d\n", ret);
+ if (WARN(pfdev->comp->num_supplies > ARRAY_SIZE(pfdev->regulators),
+ "Too many supplies in compatible structure.\n"))
+ return -EINVAL;
+
+ for (i = 0; i < pfdev->comp->num_supplies; i++)
+ pfdev->regulators[i].supply = pfdev->comp->supply_names[i];
+
+ ret = devm_regulator_bulk_get(pfdev->dev,
+ pfdev->comp->num_supplies,
+ pfdev->regulators);
+ if (ret < 0) {
+ dev_err(pfdev->dev, "failed to get regulators: %d\n", ret);
return ret;
}
- ret = regulator_enable(pfdev->regulator);
+ ret = regulator_bulk_enable(pfdev->comp->num_supplies,
+ pfdev->regulators);
if (ret < 0) {
- dev_err(pfdev->dev, "failed to enable regulator: %d\n", ret);
+ dev_err(pfdev->dev, "failed to enable regulators: %d\n", ret);
return ret;
}
@@ -107,7 +117,81 @@ static int panfrost_regulator_init(struct panfrost_device *pfdev)
static void panfrost_regulator_fini(struct panfrost_device *pfdev)
{
- regulator_disable(pfdev->regulator);
+ regulator_bulk_disable(pfdev->comp->num_supplies,
+ pfdev->regulators);
+}
+
+static void panfrost_pm_domain_fini(struct panfrost_device *pfdev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pfdev->pm_domain_devs); i++) {
+ if (!pfdev->pm_domain_devs[i])
+ break;
+
+ if (pfdev->pm_domain_links[i])
+ device_link_del(pfdev->pm_domain_links[i]);
+
+ dev_pm_domain_detach(pfdev->pm_domain_devs[i], true);
+ }
+}
+
+static int panfrost_pm_domain_init(struct panfrost_device *pfdev)
+{
+ int err;
+ int i, num_domains;
+
+ num_domains = of_count_phandle_with_args(pfdev->dev->of_node,
+ "power-domains",
+ "#power-domain-cells");
+
+ /*
+ * Single domain is handled by the core, and, if only a single power
+ * the power domain is requested, the property is optional.
+ */
+ if (num_domains < 2 && pfdev->comp->num_pm_domains < 2)
+ return 0;
+
+ if (num_domains != pfdev->comp->num_pm_domains) {
+ dev_err(pfdev->dev,
+ "Incorrect number of power domains: %d provided, %d needed\n",
+ num_domains, pfdev->comp->num_pm_domains);
+ return -EINVAL;
+ }
+
+ if (WARN(num_domains > ARRAY_SIZE(pfdev->pm_domain_devs),
+ "Too many supplies in compatible structure.\n"))
+ return -EINVAL;
+
+ for (i = 0; i < num_domains; i++) {
+ pfdev->pm_domain_devs[i] =
+ dev_pm_domain_attach_by_name(pfdev->dev,
+ pfdev->comp->pm_domain_names[i]);
+ if (IS_ERR_OR_NULL(pfdev->pm_domain_devs[i])) {
+ err = PTR_ERR(pfdev->pm_domain_devs[i]) ? : -ENODATA;
+ pfdev->pm_domain_devs[i] = NULL;
+ dev_err(pfdev->dev,
+ "failed to get pm-domain %s(%d): %d\n",
+ pfdev->comp->pm_domain_names[i], i, err);
+ goto err;
+ }
+
+ pfdev->pm_domain_links[i] = device_link_add(pfdev->dev,
+ pfdev->pm_domain_devs[i], DL_FLAG_PM_RUNTIME |
+ DL_FLAG_STATELESS | DL_FLAG_RPM_ACTIVE);
+ if (!pfdev->pm_domain_links[i]) {
+ dev_err(pfdev->pm_domain_devs[i],
+ "adding device link failed!\n");
+ err = -ENODEV;
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ panfrost_pm_domain_fini(pfdev);
+ return err;
}
int panfrost_device_init(struct panfrost_device *pfdev)
@@ -140,37 +224,43 @@ int panfrost_device_init(struct panfrost_device *pfdev)
goto err_out1;
}
+ err = panfrost_pm_domain_init(pfdev);
+ if (err)
+ goto err_out2;
+
res = platform_get_resource(pfdev->pdev, IORESOURCE_MEM, 0);
pfdev->iomem = devm_ioremap_resource(pfdev->dev, res);
if (IS_ERR(pfdev->iomem)) {
dev_err(pfdev->dev, "failed to ioremap iomem\n");
err = PTR_ERR(pfdev->iomem);
- goto err_out2;
+ goto err_out3;
}
err = panfrost_gpu_init(pfdev);
if (err)
- goto err_out2;
+ goto err_out3;
err = panfrost_mmu_init(pfdev);
if (err)
- goto err_out3;
+ goto err_out4;
err = panfrost_job_init(pfdev);
if (err)
- goto err_out4;
+ goto err_out5;
err = panfrost_perfcnt_init(pfdev);
if (err)
- goto err_out5;
+ goto err_out6;
return 0;
-err_out5:
+err_out6:
panfrost_job_fini(pfdev);
-err_out4:
+err_out5:
panfrost_mmu_fini(pfdev);
-err_out3:
+err_out4:
panfrost_gpu_fini(pfdev);
+err_out3:
+ panfrost_pm_domain_fini(pfdev);
err_out2:
panfrost_reset_fini(pfdev);
err_out1:
@@ -186,6 +276,7 @@ void panfrost_device_fini(struct panfrost_device *pfdev)
panfrost_job_fini(pfdev);
panfrost_mmu_fini(pfdev);
panfrost_gpu_fini(pfdev);
+ panfrost_pm_domain_fini(pfdev);
panfrost_reset_fini(pfdev);
panfrost_regulator_fini(pfdev);
panfrost_clk_fini(pfdev);
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
index 06713811b92c..c30c719a8059 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -7,6 +7,7 @@
#include <linux/atomic.h>
#include <linux/io-pgtable.h>
+#include <linux/regulator/consumer.h>
#include <linux/spinlock.h>
#include <drm/drm_device.h>
#include <drm/drm_mm.h>
@@ -19,6 +20,8 @@ struct panfrost_job;
struct panfrost_perfcnt;
#define NUM_JOB_SLOTS 3
+#define MAX_REGULATORS 2
+#define MAX_PM_DOMAINS 3
struct panfrost_features {
u16 id;
@@ -51,6 +54,23 @@ struct panfrost_features {
unsigned long hw_issues[64 / BITS_PER_LONG];
};
+/*
+ * Features that cannot be automatically detected and need matching using the
+ * compatible string, typically SoC-specific.
+ */
+struct panfrost_compatible {
+ /* Supplies count and names. */
+ int num_supplies;
+ const char * const *supply_names;
+ /*
+ * Number of power domains required, note that values 0 and 1 are
+ * handled identically, as only values > 1 need special handling.
+ */
+ int num_pm_domains;
+ /* Only required if num_pm_domains > 1. */
+ const char * const *pm_domain_names;
+};
+
struct panfrost_device {
struct device *dev;
struct drm_device *ddev;
@@ -59,10 +79,14 @@ struct panfrost_device {
void __iomem *iomem;
struct clk *clock;
struct clk *bus_clock;
- struct regulator *regulator;
+ struct regulator_bulk_data regulators[MAX_REGULATORS];
struct reset_control *rstc;
+ /* pm_domains for devices with more than one. */
+ struct device *pm_domain_devs[MAX_PM_DOMAINS];
+ struct device_link *pm_domain_links[MAX_PM_DOMAINS];
struct panfrost_features features;
+ const struct panfrost_compatible *comp;
spinlock_t as_lock;
unsigned long as_in_use_mask;
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 48e3c4165247..882fecc33fdb 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -78,8 +78,10 @@ static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct
static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ struct panfrost_file_priv *priv = file->driver_priv;
struct panfrost_gem_object *bo;
struct drm_panfrost_create_bo *args = data;
+ struct panfrost_gem_mapping *mapping;
if (!args->size || args->pad ||
(args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP)))
@@ -95,7 +97,14 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
if (IS_ERR(bo))
return PTR_ERR(bo);
- args->offset = bo->node.start << PAGE_SHIFT;
+ mapping = panfrost_gem_mapping_get(bo, priv);
+ if (!mapping) {
+ drm_gem_object_put_unlocked(&bo->base.base);
+ return -EINVAL;
+ }
+
+ args->offset = mapping->mmnode.start << PAGE_SHIFT;
+ panfrost_gem_mapping_put(mapping);
return 0;
}
@@ -119,6 +128,11 @@ panfrost_lookup_bos(struct drm_device *dev,
struct drm_panfrost_submit *args,
struct panfrost_job *job)
{
+ struct panfrost_file_priv *priv = file_priv->driver_priv;
+ struct panfrost_gem_object *bo;
+ unsigned int i;
+ int ret;
+
job->bo_count = args->bo_handle_count;
if (!job->bo_count)
@@ -130,9 +144,33 @@ panfrost_lookup_bos(struct drm_device *dev,
if (!job->implicit_fences)
return -ENOMEM;
- return drm_gem_objects_lookup(file_priv,
- (void __user *)(uintptr_t)args->bo_handles,
- job->bo_count, &job->bos);
+ ret = drm_gem_objects_lookup(file_priv,
+ (void __user *)(uintptr_t)args->bo_handles,
+ job->bo_count, &job->bos);
+ if (ret)
+ return ret;
+
+ job->mappings = kvmalloc_array(job->bo_count,
+ sizeof(struct panfrost_gem_mapping *),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!job->mappings)
+ return -ENOMEM;
+
+ for (i = 0; i < job->bo_count; i++) {
+ struct panfrost_gem_mapping *mapping;
+
+ bo = to_panfrost_bo(job->bos[i]);
+ mapping = panfrost_gem_mapping_get(bo, priv);
+ if (!mapping) {
+ ret = -EINVAL;
+ break;
+ }
+
+ atomic_inc(&bo->gpu_usecount);
+ job->mappings[i] = mapping;
+ }
+
+ return ret;
}
/**
@@ -320,7 +358,9 @@ out:
static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct panfrost_file_priv *priv = file_priv->driver_priv;
struct drm_panfrost_get_bo_offset *args = data;
+ struct panfrost_gem_mapping *mapping;
struct drm_gem_object *gem_obj;
struct panfrost_gem_object *bo;
@@ -331,18 +371,26 @@ static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
}
bo = to_panfrost_bo(gem_obj);
- args->offset = bo->node.start << PAGE_SHIFT;
-
+ mapping = panfrost_gem_mapping_get(bo, priv);
drm_gem_object_put_unlocked(gem_obj);
+
+ if (!mapping)
+ return -EINVAL;
+
+ args->offset = mapping->mmnode.start << PAGE_SHIFT;
+ panfrost_gem_mapping_put(mapping);
return 0;
}
static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct panfrost_file_priv *priv = file_priv->driver_priv;
struct drm_panfrost_madvise *args = data;
struct panfrost_device *pfdev = dev->dev_private;
struct drm_gem_object *gem_obj;
+ struct panfrost_gem_object *bo;
+ int ret = 0;
gem_obj = drm_gem_object_lookup(file_priv, args->handle);
if (!gem_obj) {
@@ -350,22 +398,48 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
return -ENOENT;
}
+ bo = to_panfrost_bo(gem_obj);
+
mutex_lock(&pfdev->shrinker_lock);
+ mutex_lock(&bo->mappings.lock);
+ if (args->madv == PANFROST_MADV_DONTNEED) {
+ struct panfrost_gem_mapping *first;
+
+ first = list_first_entry(&bo->mappings.list,
+ struct panfrost_gem_mapping,
+ node);
+
+ /*
+ * If we want to mark the BO purgeable, there must be only one
+ * user: the caller FD.
+ * We could do something smarter and mark the BO purgeable only
+ * when all its users have marked it purgeable, but globally
+ * visible/shared BOs are likely to never be marked purgeable
+ * anyway, so let's not bother.
+ */
+ if (!list_is_singular(&bo->mappings.list) ||
+ WARN_ON_ONCE(first->mmu != &priv->mmu)) {
+ ret = -EINVAL;
+ goto out_unlock_mappings;
+ }
+ }
+
args->retained = drm_gem_shmem_madvise(gem_obj, args->madv);
if (args->retained) {
- struct panfrost_gem_object *bo = to_panfrost_bo(gem_obj);
-
if (args->madv == PANFROST_MADV_DONTNEED)
list_add_tail(&bo->base.madv_list,
&pfdev->shrinker_list);
else if (args->madv == PANFROST_MADV_WILLNEED)
list_del_init(&bo->base.madv_list);
}
+
+out_unlock_mappings:
+ mutex_unlock(&bo->mappings.lock);
mutex_unlock(&pfdev->shrinker_lock);
drm_gem_object_put_unlocked(gem_obj);
- return 0;
+ return ret;
}
int panfrost_unstable_ioctl_check(void)
@@ -510,6 +584,10 @@ static int panfrost_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pfdev);
+ pfdev->comp = of_device_get_match_data(&pdev->dev);
+ if (!pfdev->comp)
+ return -ENODEV;
+
/* Allocate and initialze the DRM device. */
ddev = drm_dev_alloc(&panfrost_drm_driver, &pdev->dev);
if (IS_ERR(ddev))
@@ -581,16 +659,24 @@ static int panfrost_remove(struct platform_device *pdev)
return 0;
}
+static const char * const default_supplies[] = { "mali" };
+static const struct panfrost_compatible default_data = {
+ .num_supplies = ARRAY_SIZE(default_supplies),
+ .supply_names = default_supplies,
+ .num_pm_domains = 1, /* optional */
+ .pm_domain_names = NULL,
+};
+
static const struct of_device_id dt_match[] = {
- { .compatible = "arm,mali-t604" },
- { .compatible = "arm,mali-t624" },
- { .compatible = "arm,mali-t628" },
- { .compatible = "arm,mali-t720" },
- { .compatible = "arm,mali-t760" },
- { .compatible = "arm,mali-t820" },
- { .compatible = "arm,mali-t830" },
- { .compatible = "arm,mali-t860" },
- { .compatible = "arm,mali-t880" },
+ { .compatible = "arm,mali-t604", .data = &default_data, },
+ { .compatible = "arm,mali-t624", .data = &default_data, },
+ { .compatible = "arm,mali-t628", .data = &default_data, },
+ { .compatible = "arm,mali-t720", .data = &default_data, },
+ { .compatible = "arm,mali-t760", .data = &default_data, },
+ { .compatible = "arm,mali-t820", .data = &default_data, },
+ { .compatible = "arm,mali-t830", .data = &default_data, },
+ { .compatible = "arm,mali-t860", .data = &default_data, },
+ { .compatible = "arm,mali-t880", .data = &default_data, },
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index fd766b1395fb..17b654e1eb94 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -29,6 +29,12 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
list_del_init(&bo->base.madv_list);
mutex_unlock(&pfdev->shrinker_lock);
+ /*
+ * If we still have mappings attached to the BO, there's a problem in
+ * our refcounting.
+ */
+ WARN_ON_ONCE(!list_empty(&bo->mappings.list));
+
if (bo->sgts) {
int i;
int n_sgt = bo->base.base.size / SZ_2M;
@@ -46,6 +52,69 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
drm_gem_shmem_free_object(obj);
}
+struct panfrost_gem_mapping *
+panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
+ struct panfrost_file_priv *priv)
+{
+ struct panfrost_gem_mapping *iter, *mapping = NULL;
+
+ mutex_lock(&bo->mappings.lock);
+ list_for_each_entry(iter, &bo->mappings.list, node) {
+ if (iter->mmu == &priv->mmu) {
+ kref_get(&iter->refcount);
+ mapping = iter;
+ break;
+ }
+ }
+ mutex_unlock(&bo->mappings.lock);
+
+ return mapping;
+}
+
+static void
+panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping)
+{
+ struct panfrost_file_priv *priv;
+
+ if (mapping->active)
+ panfrost_mmu_unmap(mapping);
+
+ priv = container_of(mapping->mmu, struct panfrost_file_priv, mmu);
+ spin_lock(&priv->mm_lock);
+ if (drm_mm_node_allocated(&mapping->mmnode))
+ drm_mm_remove_node(&mapping->mmnode);
+ spin_unlock(&priv->mm_lock);
+}
+
+static void panfrost_gem_mapping_release(struct kref *kref)
+{
+ struct panfrost_gem_mapping *mapping;
+
+ mapping = container_of(kref, struct panfrost_gem_mapping, refcount);
+
+ panfrost_gem_teardown_mapping(mapping);
+ drm_gem_object_put_unlocked(&mapping->obj->base.base);
+ kfree(mapping);
+}
+
+void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping)
+{
+ if (!mapping)
+ return;
+
+ kref_put(&mapping->refcount, panfrost_gem_mapping_release);
+}
+
+void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo)
+{
+ struct panfrost_gem_mapping *mapping;
+
+ mutex_lock(&bo->mappings.lock);
+ list_for_each_entry(mapping, &bo->mappings.list, node)
+ panfrost_gem_teardown_mapping(mapping);
+ mutex_unlock(&bo->mappings.lock);
+}
+
int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
{
int ret;
@@ -54,6 +123,16 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
unsigned long color = bo->noexec ? PANFROST_BO_NOEXEC : 0;
struct panfrost_file_priv *priv = file_priv->driver_priv;
+ struct panfrost_gem_mapping *mapping;
+
+ mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
+ if (!mapping)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&mapping->node);
+ kref_init(&mapping->refcount);
+ drm_gem_object_get(obj);
+ mapping->obj = bo;
/*
* Executable buffers cannot cross a 16MB boundary as the program
@@ -66,37 +145,48 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
else
align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0;
- bo->mmu = &priv->mmu;
+ mapping->mmu = &priv->mmu;
spin_lock(&priv->mm_lock);
- ret = drm_mm_insert_node_generic(&priv->mm, &bo->node,
+ ret = drm_mm_insert_node_generic(&priv->mm, &mapping->mmnode,
size >> PAGE_SHIFT, align, color, 0);
spin_unlock(&priv->mm_lock);
if (ret)
- return ret;
+ goto err;
if (!bo->is_heap) {
- ret = panfrost_mmu_map(bo);
- if (ret) {
- spin_lock(&priv->mm_lock);
- drm_mm_remove_node(&bo->node);
- spin_unlock(&priv->mm_lock);
- }
+ ret = panfrost_mmu_map(mapping);
+ if (ret)
+ goto err;
}
+
+ mutex_lock(&bo->mappings.lock);
+ WARN_ON(bo->base.madv != PANFROST_MADV_WILLNEED);
+ list_add_tail(&mapping->node, &bo->mappings.list);
+ mutex_unlock(&bo->mappings.lock);
+
+err:
+ if (ret)
+ panfrost_gem_mapping_put(mapping);
return ret;
}
void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
{
- struct panfrost_gem_object *bo = to_panfrost_bo(obj);
struct panfrost_file_priv *priv = file_priv->driver_priv;
+ struct panfrost_gem_object *bo = to_panfrost_bo(obj);
+ struct panfrost_gem_mapping *mapping = NULL, *iter;
- if (bo->is_mapped)
- panfrost_mmu_unmap(bo);
+ mutex_lock(&bo->mappings.lock);
+ list_for_each_entry(iter, &bo->mappings.list, node) {
+ if (iter->mmu == &priv->mmu) {
+ mapping = iter;
+ list_del(&iter->node);
+ break;
+ }
+ }
+ mutex_unlock(&bo->mappings.lock);
- spin_lock(&priv->mm_lock);
- if (drm_mm_node_allocated(&bo->node))
- drm_mm_remove_node(&bo->node);
- spin_unlock(&priv->mm_lock);
+ panfrost_gem_mapping_put(mapping);
}
static int panfrost_gem_pin(struct drm_gem_object *obj)
@@ -136,6 +226,8 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t
if (!obj)
return NULL;
+ INIT_LIST_HEAD(&obj->mappings.list);
+ mutex_init(&obj->mappings.lock);
obj->base.base.funcs = &panfrost_gem_funcs;
return &obj->base.base;
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h
index 4b17e7308764..b3517ff9630c 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.h
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
@@ -13,23 +13,52 @@ struct panfrost_gem_object {
struct drm_gem_shmem_object base;
struct sg_table *sgts;
- struct panfrost_mmu *mmu;
- struct drm_mm_node node;
- bool is_mapped :1;
+ /*
+ * Use a list for now. If searching a mapping ever becomes the
+ * bottleneck, we should consider using an RB-tree, or even better,
+ * let the core store drm_gem_object_mapping entries (where we
+ * could place driver specific data) instead of drm_gem_object ones
+ * in its drm_file->object_idr table.
+ *
+ * struct drm_gem_object_mapping {
+ * struct drm_gem_object *obj;
+ * void *driver_priv;
+ * };
+ */
+ struct {
+ struct list_head list;
+ struct mutex lock;
+ } mappings;
+
+ /*
+ * Count the number of jobs referencing this BO so we don't let the
+ * shrinker reclaim this object prematurely.
+ */
+ atomic_t gpu_usecount;
+
bool noexec :1;
bool is_heap :1;
};
+struct panfrost_gem_mapping {
+ struct list_head node;
+ struct kref refcount;
+ struct panfrost_gem_object *obj;
+ struct drm_mm_node mmnode;
+ struct panfrost_mmu *mmu;
+ bool active :1;
+};
+
static inline
struct panfrost_gem_object *to_panfrost_bo(struct drm_gem_object *obj)
{
return container_of(to_drm_gem_shmem_obj(obj), struct panfrost_gem_object, base);
}
-static inline
-struct panfrost_gem_object *drm_mm_node_to_panfrost_bo(struct drm_mm_node *node)
+static inline struct panfrost_gem_mapping *
+drm_mm_node_to_panfrost_mapping(struct drm_mm_node *node)
{
- return container_of(node, struct panfrost_gem_object, node);
+ return container_of(node, struct panfrost_gem_mapping, mmnode);
}
struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size);
@@ -49,6 +78,12 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv);
void panfrost_gem_close(struct drm_gem_object *obj,
struct drm_file *file_priv);
+struct panfrost_gem_mapping *
+panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
+ struct panfrost_file_priv *priv);
+void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping);
+void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo);
+
void panfrost_gem_shrinker_init(struct drm_device *dev);
void panfrost_gem_shrinker_cleanup(struct drm_device *dev);
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
index 458f0fa68111..288e46c40673 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
@@ -39,11 +39,15 @@ panfrost_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc
static bool panfrost_gem_purge(struct drm_gem_object *obj)
{
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+ struct panfrost_gem_object *bo = to_panfrost_bo(obj);
+
+ if (atomic_read(&bo->gpu_usecount))
+ return false;
if (!mutex_trylock(&shmem->pages_lock))
return false;
- panfrost_mmu_unmap(to_panfrost_bo(obj));
+ panfrost_gem_teardown_mappings(bo);
drm_gem_shmem_purge_locked(obj);
mutex_unlock(&shmem->pages_lock);
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
index 8822ec13a0d6..f2c1ddc41a9b 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
@@ -308,28 +308,26 @@ void panfrost_gpu_power_on(struct panfrost_device *pfdev)
gpu_write(pfdev, L2_PWRON_LO, pfdev->features.l2_present);
ret = readl_relaxed_poll_timeout(pfdev->iomem + L2_READY_LO,
val, val == pfdev->features.l2_present, 100, 1000);
-
- gpu_write(pfdev, STACK_PWRON_LO, pfdev->features.stack_present);
- ret |= readl_relaxed_poll_timeout(pfdev->iomem + STACK_READY_LO,
- val, val == pfdev->features.stack_present, 100, 1000);
+ if (ret)
+ dev_err(pfdev->dev, "error powering up gpu L2");
gpu_write(pfdev, SHADER_PWRON_LO, pfdev->features.shader_present);
- ret |= readl_relaxed_poll_timeout(pfdev->iomem + SHADER_READY_LO,
+ ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_READY_LO,
val, val == pfdev->features.shader_present, 100, 1000);
+ if (ret)
+ dev_err(pfdev->dev, "error powering up gpu shader");
gpu_write(pfdev, TILER_PWRON_LO, pfdev->features.tiler_present);
- ret |= readl_relaxed_poll_timeout(pfdev->iomem + TILER_READY_LO,
+ ret = readl_relaxed_poll_timeout(pfdev->iomem + TILER_READY_LO,
val, val == pfdev->features.tiler_present, 100, 1000);
-
if (ret)
- dev_err(pfdev->dev, "error powering up gpu");
+ dev_err(pfdev->dev, "error powering up gpu tiler");
}
void panfrost_gpu_power_off(struct panfrost_device *pfdev)
{
gpu_write(pfdev, TILER_PWROFF_LO, 0);
gpu_write(pfdev, SHADER_PWROFF_LO, 0);
- gpu_write(pfdev, STACK_PWROFF_LO, 0);
gpu_write(pfdev, L2_PWROFF_LO, 0);
}
@@ -351,7 +349,7 @@ int panfrost_gpu_init(struct panfrost_device *pfdev)
return -ENODEV;
err = devm_request_irq(pfdev->dev, irq, panfrost_gpu_irq_handler,
- IRQF_SHARED, "gpu", pfdev);
+ IRQF_SHARED, KBUILD_MODNAME "-gpu", pfdev);
if (err) {
dev_err(pfdev->dev, "failed to request gpu irq");
return err;
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index a9ed088ebf08..7914b1570841 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -268,9 +268,21 @@ static void panfrost_job_cleanup(struct kref *ref)
dma_fence_put(job->done_fence);
dma_fence_put(job->render_done_fence);
+ if (job->mappings) {
+ for (i = 0; i < job->bo_count; i++) {
+ if (!job->mappings[i])
+ break;
+
+ atomic_dec(&job->mappings[i]->obj->gpu_usecount);
+ panfrost_gem_mapping_put(job->mappings[i]);
+ }
+ kvfree(job->mappings);
+ }
+
if (job->bos) {
for (i = 0; i < job->bo_count; i++)
drm_gem_object_put_unlocked(job->bos[i]);
+
kvfree(job->bos);
}
@@ -496,7 +508,7 @@ int panfrost_job_init(struct panfrost_device *pfdev)
return -ENODEV;
ret = devm_request_irq(pfdev->dev, irq, panfrost_job_irq_handler,
- IRQF_SHARED, "job", pfdev);
+ IRQF_SHARED, KBUILD_MODNAME "-job", pfdev);
if (ret) {
dev_err(pfdev->dev, "failed to request job irq");
return ret;
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h
index 62454128a792..bbd3ba97ff67 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.h
+++ b/drivers/gpu/drm/panfrost/panfrost_job.h
@@ -32,6 +32,7 @@ struct panfrost_job {
/* Exclusive fences we have taken from the BOs to wait for */
struct dma_fence **implicit_fences;
+ struct panfrost_gem_mapping **mappings;
struct drm_gem_object **bos;
u32 bo_count;
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index a3ed64a1f15e..ed28aeba6d59 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -151,7 +151,12 @@ u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
as = mmu->as;
if (as >= 0) {
int en = atomic_inc_return(&mmu->as_count);
- WARN_ON(en >= NUM_JOB_SLOTS);
+
+ /*
+ * AS can be retained by active jobs or a perfcnt context,
+ * hence the '+ 1' here.
+ */
+ WARN_ON(en >= (NUM_JOB_SLOTS + 1));
list_move(&mmu->list, &pfdev->as_lru_list);
goto out;
@@ -269,14 +274,15 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
return 0;
}
-int panfrost_mmu_map(struct panfrost_gem_object *bo)
+int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
{
+ struct panfrost_gem_object *bo = mapping->obj;
struct drm_gem_object *obj = &bo->base.base;
struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
struct sg_table *sgt;
int prot = IOMMU_READ | IOMMU_WRITE;
- if (WARN_ON(bo->is_mapped))
+ if (WARN_ON(mapping->active))
return 0;
if (bo->noexec)
@@ -286,25 +292,28 @@ int panfrost_mmu_map(struct panfrost_gem_object *bo)
if (WARN_ON(IS_ERR(sgt)))
return PTR_ERR(sgt);
- mmu_map_sg(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, prot, sgt);
- bo->is_mapped = true;
+ mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT,
+ prot, sgt);
+ mapping->active = true;
return 0;
}
-void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
+void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping)
{
+ struct panfrost_gem_object *bo = mapping->obj;
struct drm_gem_object *obj = &bo->base.base;
struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
- struct io_pgtable_ops *ops = bo->mmu->pgtbl_ops;
- u64 iova = bo->node.start << PAGE_SHIFT;
- size_t len = bo->node.size << PAGE_SHIFT;
+ struct io_pgtable_ops *ops = mapping->mmu->pgtbl_ops;
+ u64 iova = mapping->mmnode.start << PAGE_SHIFT;
+ size_t len = mapping->mmnode.size << PAGE_SHIFT;
size_t unmapped_len = 0;
- if (WARN_ON(!bo->is_mapped))
+ if (WARN_ON(!mapping->active))
return;
- dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx", bo->mmu->as, iova, len);
+ dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx",
+ mapping->mmu->as, iova, len);
while (unmapped_len < len) {
size_t unmapped_page;
@@ -318,8 +327,9 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
unmapped_len += pgsize;
}
- panfrost_mmu_flush_range(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, len);
- bo->is_mapped = false;
+ panfrost_mmu_flush_range(pfdev, mapping->mmu,
+ mapping->mmnode.start << PAGE_SHIFT, len);
+ mapping->active = false;
}
static void mmu_tlb_inv_context_s1(void *cookie)
@@ -394,10 +404,10 @@ void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv)
free_io_pgtable_ops(mmu->pgtbl_ops);
}
-static struct panfrost_gem_object *
-addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr)
+static struct panfrost_gem_mapping *
+addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr)
{
- struct panfrost_gem_object *bo = NULL;
+ struct panfrost_gem_mapping *mapping = NULL;
struct panfrost_file_priv *priv;
struct drm_mm_node *node;
u64 offset = addr >> PAGE_SHIFT;
@@ -418,8 +428,9 @@ found_mmu:
drm_mm_for_each_node(node, &priv->mm) {
if (offset >= node->start &&
offset < (node->start + node->size)) {
- bo = drm_mm_node_to_panfrost_bo(node);
- drm_gem_object_get(&bo->base.base);
+ mapping = drm_mm_node_to_panfrost_mapping(node);
+
+ kref_get(&mapping->refcount);
break;
}
}
@@ -427,7 +438,7 @@ found_mmu:
spin_unlock(&priv->mm_lock);
out:
spin_unlock(&pfdev->as_lock);
- return bo;
+ return mapping;
}
#define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
@@ -436,28 +447,30 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
u64 addr)
{
int ret, i;
+ struct panfrost_gem_mapping *bomapping;
struct panfrost_gem_object *bo;
struct address_space *mapping;
pgoff_t page_offset;
struct sg_table *sgt;
struct page **pages;
- bo = addr_to_drm_mm_node(pfdev, as, addr);
- if (!bo)
+ bomapping = addr_to_mapping(pfdev, as, addr);
+ if (!bomapping)
return -ENOENT;
+ bo = bomapping->obj;
if (!bo->is_heap) {
dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)",
- bo->node.start << PAGE_SHIFT);
+ bomapping->mmnode.start << PAGE_SHIFT);
ret = -EINVAL;
goto err_bo;
}
- WARN_ON(bo->mmu->as != as);
+ WARN_ON(bomapping->mmu->as != as);
/* Assume 2MB alignment and size multiple */
addr &= ~((u64)SZ_2M - 1);
page_offset = addr >> PAGE_SHIFT;
- page_offset -= bo->node.start;
+ page_offset -= bomapping->mmnode.start;
mutex_lock(&bo->base.pages_lock);
@@ -509,13 +522,14 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
goto err_map;
}
- mmu_map_sg(pfdev, bo->mmu, addr, IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
+ mmu_map_sg(pfdev, bomapping->mmu, addr,
+ IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
- bo->is_mapped = true;
+ bomapping->active = true;
dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
- drm_gem_object_put_unlocked(&bo->base.base);
+ panfrost_gem_mapping_put(bomapping);
return 0;
@@ -587,33 +601,27 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
source_id = (fault_status >> 16);
/* Page fault only */
- if ((status & mask) == BIT(i)) {
- WARN_ON(exception_type < 0xC1 || exception_type > 0xC4);
-
+ ret = -1;
+ if ((status & mask) == BIT(i) && (exception_type & 0xF8) == 0xC0)
ret = panfrost_mmu_map_fault_addr(pfdev, i, addr);
- if (!ret) {
- mmu_write(pfdev, MMU_INT_CLEAR, BIT(i));
- status &= ~mask;
- continue;
- }
- }
- /* terminal fault, print info about the fault */
- dev_err(pfdev->dev,
- "Unhandled Page fault in AS%d at VA 0x%016llX\n"
- "Reason: %s\n"
- "raw fault status: 0x%X\n"
- "decoded fault status: %s\n"
- "exception type 0x%X: %s\n"
- "access type 0x%X: %s\n"
- "source id 0x%X\n",
- i, addr,
- "TODO",
- fault_status,
- (fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
- exception_type, panfrost_exception_name(pfdev, exception_type),
- access_type, access_type_name(pfdev, fault_status),
- source_id);
+ if (ret)
+ /* terminal fault, print info about the fault */
+ dev_err(pfdev->dev,
+ "Unhandled Page fault in AS%d at VA 0x%016llX\n"
+ "Reason: %s\n"
+ "raw fault status: 0x%X\n"
+ "decoded fault status: %s\n"
+ "exception type 0x%X: %s\n"
+ "access type 0x%X: %s\n"
+ "source id 0x%X\n",
+ i, addr,
+ "TODO",
+ fault_status,
+ (fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
+ exception_type, panfrost_exception_name(pfdev, exception_type),
+ access_type, access_type_name(pfdev, fault_status),
+ source_id);
mmu_write(pfdev, MMU_INT_CLEAR, mask);
@@ -632,9 +640,11 @@ int panfrost_mmu_init(struct panfrost_device *pfdev)
if (irq <= 0)
return -ENODEV;
- err = devm_request_threaded_irq(pfdev->dev, irq, panfrost_mmu_irq_handler,
+ err = devm_request_threaded_irq(pfdev->dev, irq,
+ panfrost_mmu_irq_handler,
panfrost_mmu_irq_handler_thread,
- IRQF_SHARED, "mmu", pfdev);
+ IRQF_SHARED, KBUILD_MODNAME "-mmu",
+ pfdev);
if (err) {
dev_err(pfdev->dev, "failed to request mmu irq");
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.h b/drivers/gpu/drm/panfrost/panfrost_mmu.h
index 7c5b6775ae23..44fc2edf63ce 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.h
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.h
@@ -4,12 +4,12 @@
#ifndef __PANFROST_MMU_H__
#define __PANFROST_MMU_H__
-struct panfrost_gem_object;
+struct panfrost_gem_mapping;
struct panfrost_file_priv;
struct panfrost_mmu;
-int panfrost_mmu_map(struct panfrost_gem_object *bo);
-void panfrost_mmu_unmap(struct panfrost_gem_object *bo);
+int panfrost_mmu_map(struct panfrost_gem_mapping *mapping);
+void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping);
int panfrost_mmu_init(struct panfrost_device *pfdev);
void panfrost_mmu_fini(struct panfrost_device *pfdev);
diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
index 2c04e858c50a..6913578d5aa7 100644
--- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
+++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
@@ -25,7 +25,7 @@
#define V4_SHADERS_PER_COREGROUP 4
struct panfrost_perfcnt {
- struct panfrost_gem_object *bo;
+ struct panfrost_gem_mapping *mapping;
size_t bosize;
void *buf;
struct panfrost_file_priv *user;
@@ -49,7 +49,7 @@ static int panfrost_perfcnt_dump_locked(struct panfrost_device *pfdev)
int ret;
reinit_completion(&pfdev->perfcnt->dump_comp);
- gpuva = pfdev->perfcnt->bo->node.start << PAGE_SHIFT;
+ gpuva = pfdev->perfcnt->mapping->mmnode.start << PAGE_SHIFT;
gpu_write(pfdev, GPU_PERFCNT_BASE_LO, gpuva);
gpu_write(pfdev, GPU_PERFCNT_BASE_HI, gpuva >> 32);
gpu_write(pfdev, GPU_INT_CLEAR,
@@ -73,7 +73,7 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
struct panfrost_file_priv *user = file_priv->driver_priv;
struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
struct drm_gem_shmem_object *bo;
- u32 cfg;
+ u32 cfg, as;
int ret;
if (user == perfcnt->user)
@@ -89,17 +89,22 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
if (IS_ERR(bo))
return PTR_ERR(bo);
- perfcnt->bo = to_panfrost_bo(&bo->base);
-
/* Map the perfcnt buf in the address space attached to file_priv. */
- ret = panfrost_gem_open(&perfcnt->bo->base.base, file_priv);
+ ret = panfrost_gem_open(&bo->base, file_priv);
if (ret)
goto err_put_bo;
+ perfcnt->mapping = panfrost_gem_mapping_get(to_panfrost_bo(&bo->base),
+ user);
+ if (!perfcnt->mapping) {
+ ret = -EINVAL;
+ goto err_close_bo;
+ }
+
perfcnt->buf = drm_gem_shmem_vmap(&bo->base);
if (IS_ERR(perfcnt->buf)) {
ret = PTR_ERR(perfcnt->buf);
- goto err_close_bo;
+ goto err_put_mapping;
}
/*
@@ -121,12 +126,8 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
perfcnt->user = user;
- /*
- * Always use address space 0 for now.
- * FIXME: this needs to be updated when we start using different
- * address space.
- */
- cfg = GPU_PERFCNT_CFG_AS(0) |
+ as = panfrost_mmu_as_get(pfdev, perfcnt->mapping->mmu);
+ cfg = GPU_PERFCNT_CFG_AS(as) |
GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_MANUAL);
/*
@@ -154,12 +155,17 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
if (panfrost_has_hw_issue(pfdev, HW_ISSUE_8186))
gpu_write(pfdev, GPU_PRFCNT_TILER_EN, 0xffffffff);
+ /* The BO ref is retained by the mapping. */
+ drm_gem_object_put_unlocked(&bo->base);
+
return 0;
err_vunmap:
- drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf);
+ drm_gem_shmem_vunmap(&bo->base, perfcnt->buf);
+err_put_mapping:
+ panfrost_gem_mapping_put(perfcnt->mapping);
err_close_bo:
- panfrost_gem_close(&perfcnt->bo->base.base, file_priv);
+ panfrost_gem_close(&bo->base, file_priv);
err_put_bo:
drm_gem_object_put_unlocked(&bo->base);
return ret;
@@ -182,11 +188,12 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF));
perfcnt->user = NULL;
- drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf);
+ drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base.base, perfcnt->buf);
perfcnt->buf = NULL;
- panfrost_gem_close(&perfcnt->bo->base.base, file_priv);
- drm_gem_object_put_unlocked(&perfcnt->bo->base.base);
- perfcnt->bo = NULL;
+ panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv);
+ panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu);
+ panfrost_gem_mapping_put(perfcnt->mapping);
+ perfcnt->mapping = NULL;
pm_runtime_mark_last_busy(pfdev->dev);
pm_runtime_put_autosuspend(pfdev->dev);
diff --git a/drivers/gpu/drm/pl111/pl111_versatile.c b/drivers/gpu/drm/pl111/pl111_versatile.c
index 09aeaffb7660..4f325c410b5d 100644
--- a/drivers/gpu/drm/pl111/pl111_versatile.c
+++ b/drivers/gpu/drm/pl111/pl111_versatile.c
@@ -19,6 +19,7 @@ static struct regmap *versatile_syscon_map;
* We detect the different syscon types from the compatible strings.
*/
enum versatile_clcd {
+ INTEGRATOR_IMPD1,
INTEGRATOR_CLCD_CM,
VERSATILE_CLCD,
REALVIEW_CLCD_EB,
@@ -65,6 +66,14 @@ static const struct of_device_id versatile_clcd_of_match[] = {
{},
};
+static const struct of_device_id impd1_clcd_of_match[] = {
+ {
+ .compatible = "arm,im-pd1-syscon",
+ .data = (void *)INTEGRATOR_IMPD1,
+ },
+ {},
+};
+
/*
* Core module CLCD control on the Integrator/CP, bits
* 8 thru 19 of the CM_CONTROL register controls a bunch
@@ -125,6 +134,36 @@ static void pl111_integrator_enable(struct drm_device *drm, u32 format)
val);
}
+#define IMPD1_CTRL_OFFSET 0x18
+#define IMPD1_CTRL_DISP_LCD (0 << 0)
+#define IMPD1_CTRL_DISP_VGA (1 << 0)
+#define IMPD1_CTRL_DISP_LCD1 (2 << 0)
+#define IMPD1_CTRL_DISP_ENABLE (1 << 2)
+#define IMPD1_CTRL_DISP_MASK (7 << 0)
+
+static void pl111_impd1_enable(struct drm_device *drm, u32 format)
+{
+ u32 val;
+
+ dev_info(drm->dev, "enable IM-PD1 CLCD connectors\n");
+ val = IMPD1_CTRL_DISP_VGA | IMPD1_CTRL_DISP_ENABLE;
+
+ regmap_update_bits(versatile_syscon_map,
+ IMPD1_CTRL_OFFSET,
+ IMPD1_CTRL_DISP_MASK,
+ val);
+}
+
+static void pl111_impd1_disable(struct drm_device *drm)
+{
+ dev_info(drm->dev, "disable IM-PD1 CLCD connectors\n");
+
+ regmap_update_bits(versatile_syscon_map,
+ IMPD1_CTRL_OFFSET,
+ IMPD1_CTRL_DISP_MASK,
+ 0);
+}
+
/*
* This configuration register in the Versatile and RealView
* family is uniformly present but appears more and more
@@ -271,6 +310,20 @@ static const struct pl111_variant_data pl110_integrator = {
};
/*
+ * The IM-PD1 variant is a PL110 with a bunch of broken, or not
+ * yet implemented features
+ */
+static const struct pl111_variant_data pl110_impd1 = {
+ .name = "PL110 IM-PD1",
+ .is_pl110 = true,
+ .broken_clockdivider = true,
+ .broken_vblank = true,
+ .formats = pl110_integrator_pixel_formats,
+ .nformats = ARRAY_SIZE(pl110_integrator_pixel_formats),
+ .fb_bpp = 16,
+};
+
+/*
* This is the in-between PL110 variant found in the ARM Versatile,
* supporting RGB565/BGR565
*/
@@ -322,8 +375,21 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
/* Non-ARM reference designs, just bail out */
return 0;
}
+
versatile_clcd_type = (enum versatile_clcd)clcd_id->data;
+ /*
+ * On the Integrator, check if we should use the IM-PD1 instead,
+ * if we find it, it will take precedence. This is on the Integrator/AP
+ * which only has this option for PL110 graphics.
+ */
+ if (versatile_clcd_type == INTEGRATOR_CLCD_CM) {
+ np = of_find_matching_node_and_match(NULL, impd1_clcd_of_match,
+ &clcd_id);
+ if (np)
+ versatile_clcd_type = (enum versatile_clcd)clcd_id->data;
+ }
+
/* Versatile Express special handling */
if (versatile_clcd_type == VEXPRESS_CLCD_V2M) {
struct platform_device *pdev;
@@ -367,6 +433,13 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
priv->variant_display_enable = pl111_integrator_enable;
dev_info(dev, "set up callbacks for Integrator PL110\n");
break;
+ case INTEGRATOR_IMPD1:
+ versatile_syscon_map = map;
+ priv->variant = &pl110_impd1;
+ priv->variant_display_enable = pl111_impd1_enable;
+ priv->variant_display_disable = pl111_impd1_disable;
+ dev_info(dev, "set up callbacks for IM-PD1 PL110\n");
+ break;
case VERSATILE_CLCD:
versatile_syscon_map = map;
/* This can do RGB565 with external PLD */
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index ef09dc6bc635..d1086b2a6892 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -36,7 +36,7 @@ static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap);
struct ring {
struct qxl_ring_header header;
- uint8_t elements[0];
+ uint8_t elements[];
};
struct qxl_ring {
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 16d73b22f3f5..09583a08e141 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -31,7 +31,7 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_vblank.h>
+#include <drm/drm_simple_kms_helper.h>
#include "qxl_drv.h"
#include "qxl_object.h"
@@ -372,19 +372,6 @@ static void qxl_crtc_update_monitors_config(struct drm_crtc *crtc,
static void qxl_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
- struct drm_device *dev = crtc->dev;
- struct drm_pending_vblank_event *event;
- unsigned long flags;
-
- if (crtc->state && crtc->state->event) {
- event = crtc->state->event;
- crtc->state->event = NULL;
-
- spin_lock_irqsave(&dev->event_lock, flags);
- drm_crtc_send_vblank_event(crtc, event);
- spin_unlock_irqrestore(&dev->event_lock, flags);
- }
-
qxl_crtc_update_monitors_config(crtc, "flush");
}
@@ -1021,9 +1008,6 @@ static struct drm_encoder *qxl_best_encoder(struct drm_connector *connector)
return &qxl_output->enc;
}
-static const struct drm_encoder_helper_funcs qxl_enc_helper_funcs = {
-};
-
static const struct drm_connector_helper_funcs qxl_connector_helper_funcs = {
.get_modes = qxl_conn_get_modes,
.mode_valid = qxl_conn_mode_valid,
@@ -1073,15 +1057,6 @@ static const struct drm_connector_funcs qxl_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static void qxl_enc_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs qxl_enc_funcs = {
- .destroy = qxl_enc_destroy,
-};
-
static int qxl_mode_create_hotplug_mode_update_property(struct qxl_device *qdev)
{
if (qdev->hotplug_mode_update_property)
@@ -1100,6 +1075,7 @@ static int qdev_output_init(struct drm_device *dev, int num_output)
struct qxl_output *qxl_output;
struct drm_connector *connector;
struct drm_encoder *encoder;
+ int ret;
qxl_output = kzalloc(sizeof(struct qxl_output), GFP_KERNEL);
if (!qxl_output)
@@ -1112,15 +1088,19 @@ static int qdev_output_init(struct drm_device *dev, int num_output)
drm_connector_init(dev, &qxl_output->base,
&qxl_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL);
- drm_encoder_init(dev, &qxl_output->enc, &qxl_enc_funcs,
- DRM_MODE_ENCODER_VIRTUAL, NULL);
+ ret = drm_simple_encoder_init(dev, &qxl_output->enc,
+ DRM_MODE_ENCODER_VIRTUAL);
+ if (ret) {
+ drm_err(dev, "drm_simple_encoder_init() failed, error %d\n",
+ ret);
+ goto err_drm_connector_cleanup;
+ }
/* we get HPD via client monitors config */
connector->polled = DRM_CONNECTOR_POLL_HPD;
encoder->possible_crtcs = 1 << num_output;
drm_connector_attach_encoder(&qxl_output->base,
&qxl_output->enc);
- drm_encoder_helper_add(encoder, &qxl_enc_helper_funcs);
drm_connector_helper_add(connector, &qxl_connector_helper_funcs);
drm_object_attach_property(&connector->base,
@@ -1130,6 +1110,11 @@ static int qdev_output_init(struct drm_device *dev, int num_output)
drm_object_attach_property(&connector->base,
dev->mode_config.suggested_y_property, 0);
return 0;
+
+err_drm_connector_cleanup:
+ drm_connector_cleanup(&qxl_output->base);
+ kfree(qxl_output);
+ return ret;
}
static struct drm_framebuffer *
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 1d601f57a6ba..4fda3f9b29f4 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -34,6 +34,7 @@
#include <linux/pci.h>
#include <drm/drm.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_modeset_helper.h>
@@ -132,21 +133,30 @@ free_dev:
return ret;
}
+static void qxl_drm_release(struct drm_device *dev)
+{
+ struct qxl_device *qdev = dev->dev_private;
+
+ /*
+ * TODO: qxl_device_fini() call should be in qxl_pci_remove(),
+ * reodering qxl_modeset_fini() + qxl_device_fini() calls is
+ * non-trivial though.
+ */
+ qxl_modeset_fini(qdev);
+ qxl_device_fini(qdev);
+ dev->dev_private = NULL;
+ kfree(qdev);
+}
+
static void
qxl_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- struct qxl_device *qdev = dev->dev_private;
drm_dev_unregister(dev);
-
- qxl_modeset_fini(qdev);
- qxl_device_fini(qdev);
+ drm_atomic_helper_shutdown(dev);
if (is_vga(pdev))
vga_put(pdev, VGA_RSRC_LEGACY_IO);
-
- dev->dev_private = NULL;
- kfree(qdev);
drm_dev_put(dev);
}
@@ -279,6 +289,8 @@ static struct drm_driver qxl_driver = {
.major = 0,
.minor = 1,
.patchlevel = 0,
+
+ .release = qxl_drm_release,
};
static int __init qxl_init(void)
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index bfc1631093e9..70b20ee4741a 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -299,12 +299,12 @@ void qxl_device_fini(struct qxl_device *qdev)
{
qxl_bo_unref(&qdev->current_release_bo[0]);
qxl_bo_unref(&qdev->current_release_bo[1]);
+ qxl_gem_fini(qdev);
+ qxl_bo_fini(qdev);
flush_work(&qdev->gc_work);
qxl_ring_free(qdev->command_ring);
qxl_ring_free(qdev->cursor_ring);
qxl_ring_free(qdev->release_ring);
- qxl_gem_fini(qdev);
- qxl_bo_fini(qdev);
io_mapping_free(qdev->surface_mapping);
io_mapping_free(qdev->vram_mapping);
iounmap(qdev->ram_header);
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 16a5e903533d..62a5e424971b 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -48,11 +48,6 @@ static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev)
return qdev;
}
-static int qxl_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
-{
- return 0;
-}
-
static int qxl_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man)
{
@@ -256,7 +251,6 @@ static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
static struct ttm_bo_driver qxl_bo_driver = {
.ttm_tt_create = &qxl_ttm_tt_create,
- .invalidate_caches = &qxl_invalidate_caches,
.init_mem_type = &qxl_init_mem_type,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = &qxl_evict_flags,
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 92ccd7aed0d4..c693b2ca0329 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -5,7 +5,7 @@
ccflags-y := -Idrivers/gpu/drm/amd/include
-hostprogs-y := mkregtable
+hostprogs := mkregtable
clean-files := rn50_reg_safe.h r100_reg_safe.h r200_reg_safe.h rv515_reg_safe.h r300_reg_safe.h r420_reg_safe.h rs600_reg_safe.h r600_reg_safe.h evergreen_reg_safe.h cayman_reg_safe.h
quiet_cmd_mkregtable = MKREGTABLE $@
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index be583695427a..91811757104c 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -2231,6 +2231,7 @@ static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
.prepare = atombios_crtc_prepare,
.commit = atombios_crtc_commit,
.disable = atombios_crtc_disable,
+ .get_scanout_position = radeon_get_crtc_scanout_position,
};
void radeon_atombios_init_crtc(struct drm_device *dev,
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index a522e092038b..266e3cbbd09b 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1263,7 +1263,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
* locking inversion with the driver load path. And the access here is
* completely racy anyway. So don't bother with locking for now.
*/
- return dev->open_count == 0;
+ return atomic_read(&dev->open_count) == 0;
}
static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index d07c7db0c815..35db79a168bf 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -45,6 +45,10 @@
#include "atom.h"
#include "radeon.h"
+u32 radeon_get_vblank_counter_kms(struct drm_crtc *crtc);
+int radeon_enable_vblank_kms(struct drm_crtc *crtc);
+void radeon_disable_vblank_kms(struct drm_crtc *crtc);
+
static void avivo_crtc_load_lut(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
@@ -460,7 +464,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
(!ASIC_IS_AVIVO(rdev) ||
((int) (work->target_vblank -
- dev->driver->get_vblank_counter(dev, work->crtc_id)) > 0)))
+ crtc->funcs->get_vblank_counter(crtc)) > 0)))
usleep_range(1000, 2000);
/* We borrow the event spin lock for protecting flip_status */
@@ -576,7 +580,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
}
work->base = base;
work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
- dev->driver->get_vblank_counter(dev, work->crtc_id);
+ crtc->funcs->get_vblank_counter(crtc);
/* We borrow the event spin lock for protecting flip_work */
spin_lock_irqsave(&crtc->dev->event_lock, flags);
@@ -668,6 +672,10 @@ static const struct drm_crtc_funcs radeon_crtc_funcs = {
.set_config = radeon_crtc_set_config,
.destroy = radeon_crtc_destroy,
.page_flip_target = radeon_crtc_page_flip_target,
+ .get_vblank_counter = radeon_get_vblank_counter_kms,
+ .enable_vblank = radeon_enable_vblank_kms,
+ .disable_vblank = radeon_disable_vblank_kms,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static void radeon_crtc_init(struct drm_device *dev, int index)
@@ -1973,3 +1981,16 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
return ret;
}
+
+bool
+radeon_get_crtc_scanout_position(struct drm_crtc *crtc,
+ bool in_vblank_irq, int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
+{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
+
+ return radeon_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
+ stime, etime, mode);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index 28eef9282874..008308780443 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -301,35 +301,8 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol
return connector;
}
-static void radeon_dp_register_mst_connector(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct radeon_device *rdev = dev->dev_private;
-
- radeon_fb_add_connector(rdev, connector);
-
- drm_connector_register(connector);
-}
-
-static void radeon_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_connector *connector)
-{
- struct radeon_connector *master = container_of(mgr, struct radeon_connector, mst_mgr);
- struct drm_device *dev = master->base.dev;
- struct radeon_device *rdev = dev->dev_private;
-
- drm_connector_unregister(connector);
- radeon_fb_remove_connector(rdev, connector);
- drm_connector_cleanup(connector);
-
- kfree(connector);
- DRM_DEBUG_KMS("\n");
-}
-
static const struct drm_dp_mst_topology_cbs mst_cbs = {
.add_connector = radeon_dp_add_mst_connector,
- .register_connector = radeon_dp_register_mst_connector,
- .destroy_connector = radeon_dp_destroy_mst_connector,
};
static struct
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index fd74e2611185..59f8186a2415 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -37,6 +37,7 @@
#include <linux/vga_switcheroo.h>
#include <linux/mmu_notifier.h>
+#include <drm/drm_agpsupport.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
@@ -119,9 +120,6 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
int radeon_suspend_kms(struct drm_device *dev, bool suspend,
bool fbcon, bool freeze);
int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
-u32 radeon_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
-int radeon_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
-void radeon_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
@@ -325,6 +323,7 @@ static int radeon_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
unsigned long flags = 0;
+ struct drm_device *dev;
int ret;
if (!ent)
@@ -365,7 +364,44 @@ static int radeon_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- return drm_get_pci_dev(pdev, ent, &kms_driver);
+ dev = drm_dev_alloc(&kms_driver, &pdev->dev);
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ goto err_free;
+
+ dev->pdev = pdev;
+#ifdef __alpha__
+ dev->hose = pdev->sysdata;
+#endif
+
+ pci_set_drvdata(pdev, dev);
+
+ if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP))
+ dev->agp = drm_agp_init(dev);
+ if (dev->agp) {
+ dev->agp->agp_mtrr = arch_phys_wc_add(
+ dev->agp->agp_info.aper_base,
+ dev->agp->agp_info.aper_size *
+ 1024 * 1024);
+ }
+
+ ret = drm_dev_register(dev, ent->driver_data);
+ if (ret)
+ goto err_agp;
+
+ return 0;
+
+err_agp:
+ if (dev->agp)
+ arch_phys_wc_del(dev->agp->agp_mtrr);
+ kfree(dev->agp);
+ pci_disable_device(pdev);
+err_free:
+ drm_dev_put(dev);
+ return ret;
}
static void
@@ -563,29 +599,14 @@ static const struct file_operations radeon_driver_kms_fops = {
#endif
};
-static bool
-radeon_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe,
- bool in_vblank_irq, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode)
-{
- return radeon_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
- stime, etime, mode);
-}
-
static struct drm_driver kms_driver = {
.driver_features =
- DRIVER_USE_AGP | DRIVER_GEM | DRIVER_RENDER,
+ DRIVER_GEM | DRIVER_RENDER,
.load = radeon_driver_load_kms,
.open = radeon_driver_open_kms,
.postclose = radeon_driver_postclose_kms,
.lastclose = radeon_driver_lastclose_kms,
.unload = radeon_driver_unload_kms,
- .get_vblank_counter = radeon_get_vblank_counter_kms,
- .enable_vblank = radeon_enable_vblank_kms,
- .disable_vblank = radeon_disable_vblank_kms,
- .get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
- .get_scanout_position = radeon_get_crtc_scanout_position,
.irq_preinstall = radeon_driver_irq_preinstall_kms,
.irq_postinstall = radeon_driver_irq_postinstall_kms,
.irq_uninstall = radeon_driver_irq_uninstall_kms,
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index ec0b7d6c994d..cf3156a65fc1 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -354,15 +354,10 @@ int radeon_fbdev_init(struct radeon_device *rdev)
drm_fb_helper_prepare(rdev->ddev, &rfbdev->helper,
&radeon_fb_helper_funcs);
- ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper,
- RADEONFB_CONN_LIMIT);
+ ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper);
if (ret)
goto free;
- ret = drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
- if (ret)
- goto fini;
-
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(rdev->ddev);
@@ -404,15 +399,3 @@ bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
return true;
return false;
}
-
-void radeon_fb_add_connector(struct radeon_device *rdev, struct drm_connector *connector)
-{
- if (rdev->mode_info.rfbdev)
- drm_fb_helper_add_one_connector(&rdev->mode_info.rfbdev->helper, connector);
-}
-
-void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector *connector)
-{
- if (rdev->mode_info.rfbdev)
- drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector);
-}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index d24f23a81656..58176db85952 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -32,6 +32,7 @@
#include <linux/uaccess.h>
#include <linux/vga_switcheroo.h>
+#include <drm/drm_agpsupport.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
@@ -77,6 +78,11 @@ void radeon_driver_unload_kms(struct drm_device *dev)
radeon_modeset_fini(rdev);
radeon_device_fini(rdev);
+ if (dev->agp)
+ arch_phys_wc_del(dev->agp->agp_mtrr);
+ kfree(dev->agp);
+ dev->agp = NULL;
+
done_free:
kfree(rdev);
dev->dev_private = NULL;
@@ -739,14 +745,15 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
/**
* radeon_get_vblank_counter_kms - get frame count
*
- * @dev: drm dev pointer
- * @pipe: crtc to get the frame count from
+ * @crtc: crtc to get the frame count from
*
* Gets the frame count on the requested crtc (all asics).
* Returns frame count on success, -EINVAL on failure.
*/
-u32 radeon_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
+u32 radeon_get_vblank_counter_kms(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
int vpos, hpos, stat;
u32 count;
struct radeon_device *rdev = dev->dev_private;
@@ -808,25 +815,26 @@ u32 radeon_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
/**
* radeon_enable_vblank_kms - enable vblank interrupt
*
- * @dev: drm dev pointer
* @crtc: crtc to enable vblank interrupt for
*
* Enable the interrupt on the requested crtc (all asics).
* Returns 0 on success, -EINVAL on failure.
*/
-int radeon_enable_vblank_kms(struct drm_device *dev, int crtc)
+int radeon_enable_vblank_kms(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct radeon_device *rdev = dev->dev_private;
unsigned long irqflags;
int r;
- if (crtc < 0 || crtc >= rdev->num_crtc) {
- DRM_ERROR("Invalid crtc %d\n", crtc);
+ if (pipe < 0 || pipe >= rdev->num_crtc) {
+ DRM_ERROR("Invalid crtc %d\n", pipe);
return -EINVAL;
}
spin_lock_irqsave(&rdev->irq.lock, irqflags);
- rdev->irq.crtc_vblank_int[crtc] = true;
+ rdev->irq.crtc_vblank_int[pipe] = true;
r = radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
return r;
@@ -835,23 +843,24 @@ int radeon_enable_vblank_kms(struct drm_device *dev, int crtc)
/**
* radeon_disable_vblank_kms - disable vblank interrupt
*
- * @dev: drm dev pointer
* @crtc: crtc to disable vblank interrupt for
*
* Disable the interrupt on the requested crtc (all asics).
*/
-void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
+void radeon_disable_vblank_kms(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct radeon_device *rdev = dev->dev_private;
unsigned long irqflags;
- if (crtc < 0 || crtc >= rdev->num_crtc) {
- DRM_ERROR("Invalid crtc %d\n", crtc);
+ if (pipe < 0 || pipe >= rdev->num_crtc) {
+ DRM_ERROR("Invalid crtc %d\n", pipe);
return;
}
spin_lock_irqsave(&rdev->irq.lock, irqflags);
- rdev->irq.crtc_vblank_int[crtc] = false;
+ rdev->irq.crtc_vblank_int[pipe] = false;
radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index a1985a552794..8817fd033cd0 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -1111,7 +1111,8 @@ static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
.mode_set_base_atomic = radeon_crtc_set_base_atomic,
.prepare = radeon_crtc_prepare,
.commit = radeon_crtc_commit,
- .disable = radeon_crtc_disable
+ .disable = radeon_crtc_disable,
+ .get_scanout_position = radeon_get_crtc_scanout_position,
};
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 96565171d13e..c7f223743d46 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -880,6 +880,12 @@ extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
ktime_t *stime, ktime_t *etime,
const struct drm_display_mode *mode);
+extern bool
+radeon_get_crtc_scanout_position(struct drm_crtc *crtc, bool in_vblank_irq,
+ int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode);
+
extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev);
extern struct edid *
radeon_bios_get_hardcoded_edid(struct radeon_device *rdev);
@@ -980,9 +986,6 @@ bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id);
-void radeon_fb_add_connector(struct radeon_device *rdev, struct drm_connector *connector);
-void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector *connector);
-
void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id);
int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index f4af67035673..badf1b6d1549 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -66,11 +66,6 @@ static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
return rdev;
}
-static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
-{
- return 0;
-}
-
static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man)
{
@@ -443,7 +438,7 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
mem->bus.size);
else
mem->bus.addr =
- ioremap_nocache(mem->bus.base + mem->bus.offset,
+ ioremap(mem->bus.base + mem->bus.offset,
mem->bus.size);
if (!mem->bus.addr)
return -ENOMEM;
@@ -774,7 +769,6 @@ static struct ttm_bo_driver radeon_bo_driver = {
.ttm_tt_create = &radeon_ttm_tt_create,
.ttm_tt_populate = &radeon_ttm_tt_populate,
.ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
- .invalidate_caches = &radeon_invalidate_caches,
.init_mem_type = &radeon_init_mem_type,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = &radeon_evict_flags,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index 3cd83a030a04..c07c6a88aff0 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -121,7 +121,7 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
* Attach the bridge to the encoder. The bridge will create the
* connector.
*/
- ret = drm_bridge_attach(encoder, bridge, NULL);
+ ret = drm_bridge_attach(encoder, bridge, NULL, 0);
if (ret) {
drm_encoder_cleanup(encoder);
return ret;
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index 8ffa4fbbdeb3..ab0d49618cf9 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -23,6 +23,7 @@
#include <drm/drm_bridge.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "rcar_lvds.h"
@@ -590,8 +591,9 @@ static void __rcar_lvds_atomic_enable(struct drm_bridge *bridge,
}
static void rcar_lvds_atomic_enable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+ struct drm_bridge_state *old_bridge_state)
{
+ struct drm_atomic_state *state = old_bridge_state->base.state;
struct drm_connector *connector;
struct drm_crtc *crtc;
@@ -603,7 +605,7 @@ static void rcar_lvds_atomic_enable(struct drm_bridge *bridge,
}
static void rcar_lvds_atomic_disable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+ struct drm_bridge_state *old_bridge_state)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
@@ -618,7 +620,8 @@ static void rcar_lvds_atomic_disable(struct drm_bridge *bridge,
/* Disable the companion LVDS encoder in dual-link mode. */
if (lvds->link_type != RCAR_LVDS_SINGLE_LINK && lvds->companion)
- lvds->companion->funcs->atomic_disable(lvds->companion, state);
+ lvds->companion->funcs->atomic_disable(lvds->companion,
+ old_bridge_state);
clk_disable_unprepare(lvds->clocks.mod);
}
@@ -641,7 +644,8 @@ static bool rcar_lvds_mode_fixup(struct drm_bridge *bridge,
return true;
}
-static int rcar_lvds_attach(struct drm_bridge *bridge)
+static int rcar_lvds_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
struct drm_connector *connector = &lvds->connector;
@@ -651,7 +655,12 @@ static int rcar_lvds_attach(struct drm_bridge *bridge)
/* If we have a next bridge just attach it. */
if (lvds->next_bridge)
return drm_bridge_attach(bridge->encoder, lvds->next_bridge,
- bridge);
+ bridge, flags);
+
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
/* Otherwise if we have a panel, create a connector. */
if (!lvds->panel)
@@ -682,6 +691,9 @@ static void rcar_lvds_detach(struct drm_bridge *bridge)
static const struct drm_bridge_funcs rcar_lvds_bridge_ops = {
.attach = rcar_lvds_attach,
.detach = rcar_lvds_detach,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_enable = rcar_lvds_atomic_enable,
.atomic_disable = rcar_lvds_atomic_disable,
.mode_fixup = rcar_lvds_mode_fixup,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
index 521fe42ac5e2..2fdc455c4ad7 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
@@ -124,7 +124,7 @@ int rockchip_drm_fbdev_init(struct drm_device *dev)
drm_fb_helper_prepare(dev, helper, &rockchip_drm_fb_helper_funcs);
- ret = drm_fb_helper_init(dev, helper, ROCKCHIP_MAX_CONNECTOR);
+ ret = drm_fb_helper_init(dev, helper);
if (ret < 0) {
DRM_DEV_ERROR(dev->dev,
"Failed to initialize drm fb helper - %d.\n",
@@ -132,13 +132,6 @@ int rockchip_drm_fbdev_init(struct drm_device *dev)
return ret;
}
- ret = drm_fb_helper_single_add_all_connectors(helper);
- if (ret < 0) {
- DRM_DEV_ERROR(dev->dev,
- "Failed to add connectors - %d.\n", ret);
- goto err_drm_fb_helper_fini;
- }
-
ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
if (ret < 0) {
DRM_DEV_ERROR(dev->dev,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index 7582d0e6a60a..0d1884684dcb 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -6,6 +6,7 @@
#include <linux/dma-buf.h>
#include <linux/iommu.h>
+#include <linux/vmalloc.h>
#include <drm/drm.h>
#include <drm/drm_gem.h>
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index d04b3492bdac..cecb2cc781f5 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -724,7 +724,7 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
int max_scale = win->phy->scl ? FRAC_16_16(8, 1) :
DRM_PLANE_HELPER_NO_SCALING;
- if (!crtc || !fb)
+ if (!crtc || WARN_ON(!fb))
return 0;
crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 0b3d18c457b2..cc672620d6e0 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -328,7 +328,7 @@ static inline uint16_t scl_get_bili_dn_vskip(int src_h, int dst_h,
{
int act_height;
- act_height = (src_h + vskiplines - 1) / vskiplines;
+ act_height = DIV_ROUND_UP(src_h, vskiplines);
if (act_height == dst_h)
return GET_SCL_FT_BILI_DN(src_h, dst_h) / vskiplines;
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index f25a36743cbd..449a62908d21 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -646,7 +646,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
goto err_free_connector;
}
} else {
- ret = drm_bridge_attach(encoder, lvds->bridge, NULL);
+ ret = drm_bridge_attach(encoder, lvds->bridge, NULL, 0);
if (ret) {
DRM_DEV_ERROR(drm_dev->dev,
"failed to attach bridge: %d\n", ret);
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c
index ae730275a34f..90784781e515 100644
--- a/drivers/gpu/drm/rockchip/rockchip_rgb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c
@@ -98,7 +98,8 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
if (of_property_read_u32(endpoint, "reg", &endpoint_id))
endpoint_id = 0;
- if (rockchip_drm_endpoint_is_subdriver(endpoint) > 0)
+ /* if subdriver (> 0) or error case (< 0), ignore entry */
+ if (rockchip_drm_endpoint_is_subdriver(endpoint) != 0)
continue;
child_count++;
@@ -144,7 +145,7 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
rgb->bridge = bridge;
- ret = drm_bridge_attach(encoder, rgb->bridge, NULL);
+ ret = drm_bridge_attach(encoder, rgb->bridge, NULL, 0);
if (ret) {
DRM_DEV_ERROR(drm_dev->dev,
"failed to attach bridge: %d\n", ret);
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
index d79086498aff..877ce9b127f1 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
@@ -59,6 +59,33 @@ TRACE_EVENT(drm_sched_job,
__entry->job_count, __entry->hw_job_count)
);
+TRACE_EVENT(drm_run_job,
+ TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
+ TP_ARGS(sched_job, entity),
+ TP_STRUCT__entry(
+ __field(struct drm_sched_entity *, entity)
+ __field(struct dma_fence *, fence)
+ __field(const char *, name)
+ __field(uint64_t, id)
+ __field(u32, job_count)
+ __field(int, hw_job_count)
+ ),
+
+ TP_fast_assign(
+ __entry->entity = entity;
+ __entry->id = sched_job->id;
+ __entry->fence = &sched_job->s_fence->finished;
+ __entry->name = sched_job->sched->name;
+ __entry->job_count = spsc_queue_count(&entity->job_queue);
+ __entry->hw_job_count = atomic_read(
+ &sched_job->sched->hw_rq_count);
+ ),
+ TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d",
+ __entry->entity, __entry->id,
+ __entry->fence, __entry->name,
+ __entry->job_count, __entry->hw_job_count)
+);
+
TRACE_EVENT(drm_sched_process_job,
TP_PROTO(struct drm_sched_fence *fence),
TP_ARGS(fence),
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 63bccd201b97..c803e14eed91 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -84,6 +84,24 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
EXPORT_SYMBOL(drm_sched_entity_init);
/**
+ * drm_sched_entity_modify_sched - Modify sched of an entity
+ * @entity: scheduler entity to init
+ * @sched_list: the list of new drm scheds which will replace
+ * existing entity->sched_list
+ * @num_sched_list: number of drm sched in sched_list
+ */
+void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
+ struct drm_gpu_scheduler **sched_list,
+ unsigned int num_sched_list)
+{
+ WARN_ON(!num_sched_list || !sched_list);
+
+ entity->sched_list = sched_list;
+ entity->num_sched_list = num_sched_list;
+}
+EXPORT_SYMBOL(drm_sched_entity_modify_sched);
+
+/**
* drm_sched_entity_is_idle - Check if entity is idle
*
* @entity: scheduler entity
@@ -120,38 +138,6 @@ bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
}
/**
- * drm_sched_entity_get_free_sched - Get the rq from rq_list with least load
- *
- * @entity: scheduler entity
- *
- * Return the pointer to the rq with least load.
- */
-static struct drm_sched_rq *
-drm_sched_entity_get_free_sched(struct drm_sched_entity *entity)
-{
- struct drm_sched_rq *rq = NULL;
- unsigned int min_score = UINT_MAX, num_score;
- int i;
-
- for (i = 0; i < entity->num_sched_list; ++i) {
- struct drm_gpu_scheduler *sched = entity->sched_list[i];
-
- if (!entity->sched_list[i]->ready) {
- DRM_WARN("sched%s is not ready, skipping", sched->name);
- continue;
- }
-
- num_score = atomic_read(&sched->score);
- if (num_score < min_score) {
- min_score = num_score;
- rq = &entity->sched_list[i]->sched_rq[entity->priority];
- }
- }
-
- return rq;
-}
-
-/**
* drm_sched_entity_flush - Flush a context entity
*
* @entity: scheduler entity
@@ -461,6 +447,7 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
{
struct dma_fence *fence;
+ struct drm_gpu_scheduler *sched;
struct drm_sched_rq *rq;
if (spsc_queue_count(&entity->job_queue) || entity->num_sched_list <= 1)
@@ -471,7 +458,8 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
return;
spin_lock(&entity->rq_lock);
- rq = drm_sched_entity_get_free_sched(entity);
+ sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
+ rq = sched ? &sched->sched_rq[entity->priority] : NULL;
if (rq != entity->rq) {
drm_sched_rq_remove_entity(entity->rq, entity);
entity->rq = rq;
@@ -498,7 +486,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
bool first;
trace_drm_sched_job(sched_job, entity);
- atomic_inc(&entity->rq->sched->score);
+ atomic_inc(&entity->rq->sched->num_jobs);
WRITE_ONCE(entity->last_user, current->group_leader);
first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 71ce6215956f..a18eabf692e4 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -92,7 +92,6 @@ void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
if (!list_empty(&entity->list))
return;
spin_lock(&rq->lock);
- atomic_inc(&rq->sched->score);
list_add_tail(&entity->list, &rq->entities);
spin_unlock(&rq->lock);
}
@@ -111,7 +110,6 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
if (list_empty(&entity->list))
return;
spin_lock(&rq->lock);
- atomic_dec(&rq->sched->score);
list_del_init(&entity->list);
if (rq->current_entity == entity)
rq->current_entity = NULL;
@@ -222,8 +220,7 @@ EXPORT_SYMBOL(drm_sched_fault);
*
* Suspend the delayed work timeout for the scheduler. This is done by
* modifying the delayed work timeout to an arbitrary large value,
- * MAX_SCHEDULE_TIMEOUT in this case. Note that this function can be
- * called from an IRQ context.
+ * MAX_SCHEDULE_TIMEOUT in this case.
*
* Returns the timeout remaining
*
@@ -252,46 +249,41 @@ EXPORT_SYMBOL(drm_sched_suspend_timeout);
* @sched: scheduler instance for which to resume the timeout
* @remaining: remaining timeout
*
- * Resume the delayed work timeout for the scheduler. Note that
- * this function can be called from an IRQ context.
+ * Resume the delayed work timeout for the scheduler.
*/
void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
unsigned long remaining)
{
- unsigned long flags;
-
- spin_lock_irqsave(&sched->job_list_lock, flags);
+ spin_lock(&sched->job_list_lock);
if (list_empty(&sched->ring_mirror_list))
cancel_delayed_work(&sched->work_tdr);
else
mod_delayed_work(system_wq, &sched->work_tdr, remaining);
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ spin_unlock(&sched->job_list_lock);
}
EXPORT_SYMBOL(drm_sched_resume_timeout);
static void drm_sched_job_begin(struct drm_sched_job *s_job)
{
struct drm_gpu_scheduler *sched = s_job->sched;
- unsigned long flags;
- spin_lock_irqsave(&sched->job_list_lock, flags);
+ spin_lock(&sched->job_list_lock);
list_add_tail(&s_job->node, &sched->ring_mirror_list);
drm_sched_start_timeout(sched);
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ spin_unlock(&sched->job_list_lock);
}
static void drm_sched_job_timedout(struct work_struct *work)
{
struct drm_gpu_scheduler *sched;
struct drm_sched_job *job;
- unsigned long flags;
sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
/* Protects against concurrent deletion in drm_sched_get_cleanup_job */
- spin_lock_irqsave(&sched->job_list_lock, flags);
+ spin_lock(&sched->job_list_lock);
job = list_first_entry_or_null(&sched->ring_mirror_list,
struct drm_sched_job, node);
@@ -302,7 +294,7 @@ static void drm_sched_job_timedout(struct work_struct *work)
* is parked at which point it's safe.
*/
list_del_init(&job->node);
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ spin_unlock(&sched->job_list_lock);
job->sched->ops->timedout_job(job);
@@ -315,12 +307,12 @@ static void drm_sched_job_timedout(struct work_struct *work)
sched->free_guilty = false;
}
} else {
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ spin_unlock(&sched->job_list_lock);
}
- spin_lock_irqsave(&sched->job_list_lock, flags);
+ spin_lock(&sched->job_list_lock);
drm_sched_start_timeout(sched);
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ spin_unlock(&sched->job_list_lock);
}
/**
@@ -383,7 +375,6 @@ EXPORT_SYMBOL(drm_sched_increase_karma);
void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
{
struct drm_sched_job *s_job, *tmp;
- unsigned long flags;
kthread_park(sched->thread);
@@ -417,9 +408,9 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
* remove job from ring_mirror_list.
* Locking here is for concurrent resume timeout
*/
- spin_lock_irqsave(&sched->job_list_lock, flags);
+ spin_lock(&sched->job_list_lock);
list_del_init(&s_job->node);
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ spin_unlock(&sched->job_list_lock);
/*
* Wait for job's HW fence callback to finish using s_job
@@ -462,7 +453,6 @@ EXPORT_SYMBOL(drm_sched_stop);
void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
{
struct drm_sched_job *s_job, *tmp;
- unsigned long flags;
int r;
/*
@@ -491,9 +481,9 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
}
if (full_recovery) {
- spin_lock_irqsave(&sched->job_list_lock, flags);
+ spin_lock(&sched->job_list_lock);
drm_sched_start_timeout(sched);
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ spin_unlock(&sched->job_list_lock);
}
kthread_unpark(sched->thread);
@@ -657,7 +647,7 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
struct drm_gpu_scheduler *sched = s_fence->sched;
atomic_dec(&sched->hw_rq_count);
- atomic_dec(&sched->score);
+ atomic_dec(&sched->num_jobs);
trace_drm_sched_process_job(s_fence);
@@ -677,7 +667,6 @@ static struct drm_sched_job *
drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
{
struct drm_sched_job *job;
- unsigned long flags;
/*
* Don't destroy jobs while the timeout worker is running OR thread
@@ -688,7 +677,7 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
__kthread_should_park(sched->thread))
return NULL;
- spin_lock_irqsave(&sched->job_list_lock, flags);
+ spin_lock(&sched->job_list_lock);
job = list_first_entry_or_null(&sched->ring_mirror_list,
struct drm_sched_job, node);
@@ -702,12 +691,48 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
drm_sched_start_timeout(sched);
}
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ spin_unlock(&sched->job_list_lock);
return job;
}
/**
+ * drm_sched_pick_best - Get a drm sched from a sched_list with the least load
+ * @sched_list: list of drm_gpu_schedulers
+ * @num_sched_list: number of drm_gpu_schedulers in the sched_list
+ *
+ * Returns pointer of the sched with the least load or NULL if none of the
+ * drm_gpu_schedulers are ready
+ */
+struct drm_gpu_scheduler *
+drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
+ unsigned int num_sched_list)
+{
+ struct drm_gpu_scheduler *sched, *picked_sched = NULL;
+ int i;
+ unsigned int min_jobs = UINT_MAX, num_jobs;
+
+ for (i = 0; i < num_sched_list; ++i) {
+ sched = sched_list[i];
+
+ if (!sched->ready) {
+ DRM_WARN("scheduler %s is not ready, skipping",
+ sched->name);
+ continue;
+ }
+
+ num_jobs = atomic_read(&sched->num_jobs);
+ if (num_jobs < min_jobs) {
+ min_jobs = num_jobs;
+ picked_sched = sched;
+ }
+ }
+
+ return picked_sched;
+}
+EXPORT_SYMBOL(drm_sched_pick_best);
+
+/**
* drm_sched_blocked - check if the scheduler is blocked
*
* @sched: scheduler instance
@@ -773,6 +798,7 @@ static int drm_sched_main(void *param)
atomic_inc(&sched->hw_rq_count);
drm_sched_job_begin(sched_job);
+ trace_drm_run_job(sched_job, entity);
fence = sched->ops->run_job(sched_job);
drm_sched_fence_scheduled(s_fence);
@@ -832,7 +858,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
spin_lock_init(&sched->job_list_lock);
atomic_set(&sched->hw_rq_count, 0);
INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
- atomic_set(&sched->score, 0);
+ atomic_set(&sched->num_jobs, 0);
atomic64_set(&sched->job_id_count, 0);
/* Each scheduler will run on a seperate kernel thread */
diff --git a/drivers/gpu/drm/selftests/drm_cmdline_selftests.h b/drivers/gpu/drm/selftests/drm_cmdline_selftests.h
index ceac7af9a172..29e367db6118 100644
--- a/drivers/gpu/drm/selftests/drm_cmdline_selftests.h
+++ b/drivers/gpu/drm/selftests/drm_cmdline_selftests.h
@@ -53,6 +53,7 @@ cmdline_test(drm_cmdline_test_rotate_0)
cmdline_test(drm_cmdline_test_rotate_90)
cmdline_test(drm_cmdline_test_rotate_180)
cmdline_test(drm_cmdline_test_rotate_270)
+cmdline_test(drm_cmdline_test_rotate_multiple)
cmdline_test(drm_cmdline_test_rotate_invalid_val)
cmdline_test(drm_cmdline_test_rotate_truncated)
cmdline_test(drm_cmdline_test_hmirror)
diff --git a/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c b/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c
index 520f3e66a384..d96cd890def6 100644
--- a/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c
+++ b/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c
@@ -856,6 +856,17 @@ static int drm_cmdline_test_rotate_270(void *ignored)
return 0;
}
+static int drm_cmdline_test_rotate_multiple(void *ignored)
+{
+ struct drm_cmdline_mode mode = { };
+
+ FAIL_ON(drm_mode_parse_command_line_for_connector("720x480,rotate=0,rotate=90",
+ &no_connector,
+ &mode));
+
+ return 0;
+}
+
static int drm_cmdline_test_rotate_invalid_val(void *ignored)
{
struct drm_cmdline_mode mode = { };
@@ -888,7 +899,7 @@ static int drm_cmdline_test_hmirror(void *ignored)
FAIL_ON(!mode.specified);
FAIL_ON(mode.xres != 720);
FAIL_ON(mode.yres != 480);
- FAIL_ON(mode.rotation_reflection != DRM_MODE_REFLECT_X);
+ FAIL_ON(mode.rotation_reflection != (DRM_MODE_ROTATE_0 | DRM_MODE_REFLECT_X));
FAIL_ON(mode.refresh_specified);
@@ -913,7 +924,7 @@ static int drm_cmdline_test_vmirror(void *ignored)
FAIL_ON(!mode.specified);
FAIL_ON(mode.xres != 720);
FAIL_ON(mode.yres != 480);
- FAIL_ON(mode.rotation_reflection != DRM_MODE_REFLECT_Y);
+ FAIL_ON(mode.rotation_reflection != (DRM_MODE_ROTATE_0 | DRM_MODE_REFLECT_Y));
FAIL_ON(mode.refresh_specified);
diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
index dc64fbfc4e61..49e6cb8f5836 100644
--- a/drivers/gpu/drm/sti/sti_crtc.c
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -279,12 +279,13 @@ int sti_crtc_vblank_cb(struct notifier_block *nb,
return 0;
}
-int sti_crtc_enable_vblank(struct drm_device *dev, unsigned int pipe)
+static int sti_crtc_enable_vblank(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct sti_private *dev_priv = dev->dev_private;
struct sti_compositor *compo = dev_priv->compo;
struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb[pipe];
- struct drm_crtc *crtc = &compo->mixer[pipe]->drm_crtc;
struct sti_vtg *vtg = compo->vtg[pipe];
DRM_DEBUG_DRIVER("\n");
@@ -297,8 +298,10 @@ int sti_crtc_enable_vblank(struct drm_device *dev, unsigned int pipe)
return 0;
}
-void sti_crtc_disable_vblank(struct drm_device *drm_dev, unsigned int pipe)
+static void sti_crtc_disable_vblank(struct drm_crtc *crtc)
{
+ struct drm_device *drm_dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct sti_private *priv = drm_dev->dev_private;
struct sti_compositor *compo = priv->compo;
struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb[pipe];
@@ -330,6 +333,8 @@ static const struct drm_crtc_funcs sti_crtc_funcs = {
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.late_register = sti_crtc_late_register,
+ .enable_vblank = sti_crtc_enable_vblank,
+ .disable_vblank = sti_crtc_disable_vblank,
};
bool sti_crtc_is_main(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/sti/sti_crtc.h b/drivers/gpu/drm/sti/sti_crtc.h
index df489ab14e2b..1132b4586712 100644
--- a/drivers/gpu/drm/sti/sti_crtc.h
+++ b/drivers/gpu/drm/sti/sti_crtc.h
@@ -15,8 +15,6 @@ struct sti_mixer;
int sti_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
struct drm_plane *primary, struct drm_plane *cursor);
-int sti_crtc_enable_vblank(struct drm_device *dev, unsigned int pipe);
-void sti_crtc_disable_vblank(struct drm_device *dev, unsigned int pipe);
int sti_crtc_vblank_cb(struct notifier_block *nb,
unsigned long event, void *data);
bool sti_crtc_is_main(struct drm_crtc *drm_crtc);
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index a39fc36f815b..50870d8cbb76 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -21,7 +21,6 @@
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
-#include "sti_crtc.h"
#include "sti_drv.h"
#include "sti_plane.h"
@@ -146,9 +145,6 @@ static struct drm_driver sti_driver = {
.dumb_create = drm_gem_cma_dumb_create,
.fops = &sti_driver_fops,
- .enable_vblank = sti_crtc_enable_vblank,
- .disable_vblank = sti_crtc_disable_vblank,
-
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index df2ee86cd4c1..3d04bfca21a0 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -467,7 +467,7 @@ static int sti_dvo_bind(struct device *dev, struct device *master, void *data)
bridge->of_node = dvo->dev.of_node;
drm_bridge_add(bridge);
- err = drm_bridge_attach(encoder, bridge, NULL);
+ err = drm_bridge_attach(encoder, bridge, NULL, 0);
if (err) {
DRM_ERROR("Failed to attach bridge\n");
return err;
@@ -534,7 +534,7 @@ static int sti_dvo_probe(struct platform_device *pdev)
DRM_ERROR("Invalid dvo resource\n");
return -ENOMEM;
}
- dvo->regs = devm_ioremap_nocache(dev, res->start,
+ dvo->regs = devm_ioremap(dev, res->start,
resource_size(res));
if (!dvo->regs)
return -ENOMEM;
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index 8f7bf33815fd..f3f28d79b0e4 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -701,7 +701,7 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data)
bridge->driver_private = hda;
bridge->funcs = &sti_hda_bridge_funcs;
- drm_bridge_attach(encoder, bridge, NULL);
+ drm_bridge_attach(encoder, bridge, NULL, 0);
connector->encoder = encoder;
@@ -759,14 +759,14 @@ static int sti_hda_probe(struct platform_device *pdev)
DRM_ERROR("Invalid hda resource\n");
return -ENOMEM;
}
- hda->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ hda->regs = devm_ioremap(dev, res->start, resource_size(res));
if (!hda->regs)
return -ENOMEM;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"video-dacs-ctrl");
if (res) {
- hda->video_dacs_ctrl = devm_ioremap_nocache(dev, res->start,
+ hda->video_dacs_ctrl = devm_ioremap(dev, res->start,
resource_size(res));
if (!hda->video_dacs_ctrl)
return -ENOMEM;
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 814560ead4e1..18eaf786ffa4 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -1281,7 +1281,7 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
bridge->driver_private = hdmi;
bridge->funcs = &sti_hdmi_bridge_funcs;
- drm_bridge_attach(encoder, bridge, NULL);
+ drm_bridge_attach(encoder, bridge, NULL, 0);
connector->encoder = encoder;
@@ -1393,7 +1393,7 @@ static int sti_hdmi_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto release_adapter;
}
- hdmi->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ hdmi->regs = devm_ioremap(dev, res->start, resource_size(res));
if (!hdmi->regs) {
ret = -ENOMEM;
goto release_adapter;
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index 5767e93dd1cd..c36a8da373cb 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -860,7 +860,7 @@ static int sti_tvout_probe(struct platform_device *pdev)
DRM_ERROR("Invalid glue resource\n");
return -ENOMEM;
}
- tvout->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ tvout->regs = devm_ioremap(dev, res->start, resource_size(res));
if (!tvout->regs)
return -ENOMEM;
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
index 0b17ac8a3faa..5e5f82b6a5d9 100644
--- a/drivers/gpu/drm/sti/sti_vtg.c
+++ b/drivers/gpu/drm/sti/sti_vtg.c
@@ -393,7 +393,7 @@ static int vtg_probe(struct platform_device *pdev)
DRM_ERROR("Get memory resource failed\n");
return -ENOMEM;
}
- vtg->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ vtg->regs = devm_ioremap(dev, res->start, resource_size(res));
if (!vtg->regs) {
DRM_ERROR("failed to remap I/O memory\n");
return -ENOMEM;
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index 5a9f9aca8bc2..ea9fcbdc68b3 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -72,8 +72,6 @@ static struct drm_driver drv_driver = {
.gem_prime_vmap = drm_gem_cma_prime_vmap,
.gem_prime_vunmap = drm_gem_cma_prime_vunmap,
.gem_prime_mmap = drm_gem_cma_prime_mmap,
- .get_scanout_position = ltdc_crtc_scanoutpos,
- .get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
};
static int drv_load(struct drm_device *ddev)
diff --git a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
index 4b165635b2d4..2e1f2664495d 100644
--- a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
+++ b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
@@ -377,7 +377,9 @@ static int dw_mipi_dsi_stm_probe(struct platform_device *pdev)
dsi->pllref_clk = devm_clk_get(dev, "ref");
if (IS_ERR(dsi->pllref_clk)) {
ret = PTR_ERR(dsi->pllref_clk);
- DRM_ERROR("Unable to get pll reference clock: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ DRM_ERROR("Unable to get pll reference clock: %d\n",
+ ret);
goto err_clk_get;
}
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index c2815e8ae1da..df585fe64f61 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -636,38 +636,13 @@ static void ltdc_crtc_atomic_flush(struct drm_crtc *crtc,
}
}
-static const struct drm_crtc_helper_funcs ltdc_crtc_helper_funcs = {
- .mode_valid = ltdc_crtc_mode_valid,
- .mode_fixup = ltdc_crtc_mode_fixup,
- .mode_set_nofb = ltdc_crtc_mode_set_nofb,
- .atomic_flush = ltdc_crtc_atomic_flush,
- .atomic_enable = ltdc_crtc_atomic_enable,
- .atomic_disable = ltdc_crtc_atomic_disable,
-};
-
-static int ltdc_crtc_enable_vblank(struct drm_crtc *crtc)
-{
- struct ltdc_device *ldev = crtc_to_ltdc(crtc);
-
- DRM_DEBUG_DRIVER("\n");
- reg_set(ldev->regs, LTDC_IER, IER_LIE);
-
- return 0;
-}
-
-static void ltdc_crtc_disable_vblank(struct drm_crtc *crtc)
-{
- struct ltdc_device *ldev = crtc_to_ltdc(crtc);
-
- DRM_DEBUG_DRIVER("\n");
- reg_clear(ldev->regs, LTDC_IER, IER_LIE);
-}
-
-bool ltdc_crtc_scanoutpos(struct drm_device *ddev, unsigned int pipe,
- bool in_vblank_irq, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode)
+static bool ltdc_crtc_get_scanout_position(struct drm_crtc *crtc,
+ bool in_vblank_irq,
+ int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
{
+ struct drm_device *ddev = crtc->dev;
struct ltdc_device *ldev = ddev->dev_private;
int line, vactive_start, vactive_end, vtotal;
@@ -710,6 +685,39 @@ bool ltdc_crtc_scanoutpos(struct drm_device *ddev, unsigned int pipe,
return true;
}
+static const struct drm_crtc_helper_funcs ltdc_crtc_helper_funcs = {
+ .mode_valid = ltdc_crtc_mode_valid,
+ .mode_fixup = ltdc_crtc_mode_fixup,
+ .mode_set_nofb = ltdc_crtc_mode_set_nofb,
+ .atomic_flush = ltdc_crtc_atomic_flush,
+ .atomic_enable = ltdc_crtc_atomic_enable,
+ .atomic_disable = ltdc_crtc_atomic_disable,
+ .get_scanout_position = ltdc_crtc_get_scanout_position,
+};
+
+static int ltdc_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct ltdc_device *ldev = crtc_to_ltdc(crtc);
+ struct drm_crtc_state *state = crtc->state;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ if (state->enable)
+ reg_set(ldev->regs, LTDC_IER, IER_LIE);
+ else
+ return -EPERM;
+
+ return 0;
+}
+
+static void ltdc_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct ltdc_device *ldev = crtc_to_ltdc(crtc);
+
+ DRM_DEBUG_DRIVER("\n");
+ reg_clear(ldev->regs, LTDC_IER, IER_LIE);
+}
+
static const struct drm_crtc_funcs ltdc_crtc_funcs = {
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
@@ -719,6 +727,7 @@ static const struct drm_crtc_funcs ltdc_crtc_funcs = {
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.enable_vblank = ltdc_crtc_enable_vblank,
.disable_vblank = ltdc_crtc_disable_vblank,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
.gamma_set = drm_atomic_helper_legacy_gamma_set,
};
@@ -1100,7 +1109,7 @@ static int ltdc_encoder_init(struct drm_device *ddev, struct drm_bridge *bridge)
drm_encoder_helper_add(encoder, &ltdc_encoder_helper_funcs);
- ret = drm_bridge_attach(encoder, bridge, NULL);
+ ret = drm_bridge_attach(encoder, bridge, NULL, 0);
if (ret) {
drm_encoder_cleanup(encoder);
return -EINVAL;
@@ -1146,12 +1155,14 @@ static int ltdc_get_caps(struct drm_device *ddev)
ldev->caps.pad_max_freq_hz = 90000000;
if (ldev->caps.hw_version == HWVER_10200)
ldev->caps.pad_max_freq_hz = 65000000;
+ ldev->caps.nb_irq = 2;
break;
case HWVER_20101:
ldev->caps.reg_ofs = REG_OFS_4;
ldev->caps.pix_fmt_hw = ltdc_pix_fmt_a1;
ldev->caps.non_alpha_only_l1 = false;
ldev->caps.pad_max_freq_hz = 150000000;
+ ldev->caps.nb_irq = 4;
break;
default:
return -ENODEV;
@@ -1251,13 +1262,21 @@ int ltdc_load(struct drm_device *ddev)
reg_clear(ldev->regs, LTDC_IER,
IER_LIE | IER_RRIE | IER_FUIE | IER_TERRIE);
- for (i = 0; i < MAX_IRQ; i++) {
+ ret = ltdc_get_caps(ddev);
+ if (ret) {
+ DRM_ERROR("hardware identifier (0x%08x) not supported!\n",
+ ldev->caps.hw_version);
+ goto err;
+ }
+
+ DRM_DEBUG_DRIVER("ltdc hw version 0x%08x\n", ldev->caps.hw_version);
+
+ for (i = 0; i < ldev->caps.nb_irq; i++) {
irq = platform_get_irq(pdev, i);
- if (irq == -EPROBE_DEFER)
+ if (irq < 0) {
+ ret = irq;
goto err;
-
- if (irq < 0)
- continue;
+ }
ret = devm_request_threaded_irq(dev, irq, ltdc_irq,
ltdc_irq_thread, IRQF_ONESHOT,
@@ -1268,16 +1287,6 @@ int ltdc_load(struct drm_device *ddev)
}
}
-
- ret = ltdc_get_caps(ddev);
- if (ret) {
- DRM_ERROR("hardware identifier (0x%08x) not supported!\n",
- ldev->caps.hw_version);
- goto err;
- }
-
- DRM_DEBUG_DRIVER("ltdc hw version 0x%08x\n", ldev->caps.hw_version);
-
/* Add endpoints panels or bridges if any */
for (i = 0; i < MAX_ENDPOINTS; i++) {
if (panel[i]) {
diff --git a/drivers/gpu/drm/stm/ltdc.h b/drivers/gpu/drm/stm/ltdc.h
index a1ad0ae3b006..f153b908c70e 100644
--- a/drivers/gpu/drm/stm/ltdc.h
+++ b/drivers/gpu/drm/stm/ltdc.h
@@ -19,6 +19,7 @@ struct ltdc_caps {
const u32 *pix_fmt_hw; /* supported pixel formats */
bool non_alpha_only_l1; /* non-native no-alpha formats on layer 1 */
int pad_max_freq_hz; /* max frequency supported by pad */
+ int nb_irq; /* number of hardware interrupts */
};
#define LTDC_MAX_LAYER 4
@@ -39,11 +40,6 @@ struct ltdc_device {
struct drm_atomic_state *suspend_state;
};
-bool ltdc_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
- bool in_vblank_irq, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode);
-
int ltdc_load(struct drm_device *ddev);
void ltdc_unload(struct drm_device *ddev);
void ltdc_suspend(struct drm_device *ddev);
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 5ae67d526b1d..328272ff77d8 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -85,7 +85,6 @@ static int sun4i_drv_bind(struct device *dev)
}
drm_mode_config_init(drm);
- drm->mode_config.allow_fb_modifiers = true;
ret = component_bind_all(drm->dev, drm);
if (ret) {
diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.c b/drivers/gpu/drm/sun4i/sun4i_lvds.c
index 65b7a8739666..26e5c7ceb8ff 100644
--- a/drivers/gpu/drm/sun4i/sun4i_lvds.c
+++ b/drivers/gpu/drm/sun4i/sun4i_lvds.c
@@ -156,7 +156,7 @@ int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon)
}
if (bridge) {
- ret = drm_bridge_attach(encoder, bridge, NULL);
+ ret = drm_bridge_attach(encoder, bridge, NULL, 0);
if (ret) {
dev_err(drm->dev, "Couldn't attach our bridge\n");
goto err_cleanup_connector;
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index b27f16af50f5..3b23d5be3cf3 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -253,7 +253,7 @@ int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon)
}
if (rgb->bridge) {
- ret = drm_bridge_attach(encoder, rgb->bridge, NULL);
+ ret = drm_bridge_attach(encoder, rgb->bridge, NULL, 0);
if (ret) {
dev_err(drm->dev, "Couldn't attach our bridge\n");
goto err_cleanup_connector;
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index c81cdce6ed55..624437b27cdc 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -114,46 +114,71 @@ static void sun4i_tcon_channel_set_status(struct sun4i_tcon *tcon, int channel,
}
}
+static void sun4i_tcon_setup_lvds_phy(struct sun4i_tcon *tcon,
+ const struct drm_encoder *encoder)
+{
+ regmap_write(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
+ SUN4I_TCON0_LVDS_ANA0_CK_EN |
+ SUN4I_TCON0_LVDS_ANA0_REG_V |
+ SUN4I_TCON0_LVDS_ANA0_REG_C |
+ SUN4I_TCON0_LVDS_ANA0_EN_MB |
+ SUN4I_TCON0_LVDS_ANA0_PD |
+ SUN4I_TCON0_LVDS_ANA0_DCHS);
+
+ udelay(2); /* delay at least 1200 ns */
+ regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA1_REG,
+ SUN4I_TCON0_LVDS_ANA1_INIT,
+ SUN4I_TCON0_LVDS_ANA1_INIT);
+ udelay(1); /* delay at least 120 ns */
+ regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA1_REG,
+ SUN4I_TCON0_LVDS_ANA1_UPDATE,
+ SUN4I_TCON0_LVDS_ANA1_UPDATE);
+ regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
+ SUN4I_TCON0_LVDS_ANA0_EN_MB,
+ SUN4I_TCON0_LVDS_ANA0_EN_MB);
+}
+
+static void sun6i_tcon_setup_lvds_phy(struct sun4i_tcon *tcon,
+ const struct drm_encoder *encoder)
+{
+ u8 val;
+
+ regmap_write(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
+ SUN6I_TCON0_LVDS_ANA0_C(2) |
+ SUN6I_TCON0_LVDS_ANA0_V(3) |
+ SUN6I_TCON0_LVDS_ANA0_PD(2) |
+ SUN6I_TCON0_LVDS_ANA0_EN_LDO);
+ udelay(2);
+
+ regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
+ SUN6I_TCON0_LVDS_ANA0_EN_MB,
+ SUN6I_TCON0_LVDS_ANA0_EN_MB);
+ udelay(2);
+
+ regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
+ SUN6I_TCON0_LVDS_ANA0_EN_DRVC,
+ SUN6I_TCON0_LVDS_ANA0_EN_DRVC);
+
+ if (sun4i_tcon_get_pixel_depth(encoder) == 18)
+ val = 7;
+ else
+ val = 0xf;
+
+ regmap_write_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
+ SUN6I_TCON0_LVDS_ANA0_EN_DRVD(0xf),
+ SUN6I_TCON0_LVDS_ANA0_EN_DRVD(val));
+}
+
static void sun4i_tcon_lvds_set_status(struct sun4i_tcon *tcon,
const struct drm_encoder *encoder,
bool enabled)
{
if (enabled) {
- u8 val;
-
regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_IF_REG,
SUN4I_TCON0_LVDS_IF_EN,
SUN4I_TCON0_LVDS_IF_EN);
-
- /*
- * As their name suggest, these values only apply to the A31
- * and later SoCs. We'll have to rework this when merging
- * support for the older SoCs.
- */
- regmap_write(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
- SUN6I_TCON0_LVDS_ANA0_C(2) |
- SUN6I_TCON0_LVDS_ANA0_V(3) |
- SUN6I_TCON0_LVDS_ANA0_PD(2) |
- SUN6I_TCON0_LVDS_ANA0_EN_LDO);
- udelay(2);
-
- regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
- SUN6I_TCON0_LVDS_ANA0_EN_MB,
- SUN6I_TCON0_LVDS_ANA0_EN_MB);
- udelay(2);
-
- regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
- SUN6I_TCON0_LVDS_ANA0_EN_DRVC,
- SUN6I_TCON0_LVDS_ANA0_EN_DRVC);
-
- if (sun4i_tcon_get_pixel_depth(encoder) == 18)
- val = 7;
- else
- val = 0xf;
-
- regmap_write_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
- SUN6I_TCON0_LVDS_ANA0_EN_DRVD(0xf),
- SUN6I_TCON0_LVDS_ANA0_EN_DRVD(val));
+ if (tcon->quirks->setup_lvds_phy)
+ tcon->quirks->setup_lvds_phy(tcon, encoder);
} else {
regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_IF_REG,
SUN4I_TCON0_LVDS_IF_EN, 0);
@@ -1453,6 +1478,16 @@ static const struct sun4i_tcon_quirks sun6i_a31s_quirks = {
.dclk_min_div = 1,
};
+static const struct sun4i_tcon_quirks sun7i_a20_tcon0_quirks = {
+ .supports_lvds = true,
+ .has_channel_0 = true,
+ .has_channel_1 = true,
+ .dclk_min_div = 4,
+ /* Same display pipeline structure as A10 */
+ .set_mux = sun4i_a10_tcon_set_mux,
+ .setup_lvds_phy = sun4i_tcon_setup_lvds_phy,
+};
+
static const struct sun4i_tcon_quirks sun7i_a20_quirks = {
.has_channel_0 = true,
.has_channel_1 = true,
@@ -1465,12 +1500,15 @@ static const struct sun4i_tcon_quirks sun8i_a33_quirks = {
.has_channel_0 = true,
.has_lvds_alt = true,
.dclk_min_div = 1,
+ .setup_lvds_phy = sun6i_tcon_setup_lvds_phy,
+ .supports_lvds = true,
};
static const struct sun4i_tcon_quirks sun8i_a83t_lcd_quirks = {
.supports_lvds = true,
.has_channel_0 = true,
.dclk_min_div = 1,
+ .setup_lvds_phy = sun6i_tcon_setup_lvds_phy,
};
static const struct sun4i_tcon_quirks sun8i_a83t_tv_quirks = {
@@ -1505,6 +1543,8 @@ const struct of_device_id sun4i_tcon_of_table[] = {
{ .compatible = "allwinner,sun6i-a31-tcon", .data = &sun6i_a31_quirks },
{ .compatible = "allwinner,sun6i-a31s-tcon", .data = &sun6i_a31s_quirks },
{ .compatible = "allwinner,sun7i-a20-tcon", .data = &sun7i_a20_quirks },
+ { .compatible = "allwinner,sun7i-a20-tcon0", .data = &sun7i_a20_tcon0_quirks },
+ { .compatible = "allwinner,sun7i-a20-tcon1", .data = &sun7i_a20_quirks },
{ .compatible = "allwinner,sun8i-a23-tcon", .data = &sun8i_a33_quirks },
{ .compatible = "allwinner,sun8i-a33-tcon", .data = &sun8i_a33_quirks },
{ .compatible = "allwinner,sun8i-a83t-tcon-lcd", .data = &sun8i_a83t_lcd_quirks },
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index a62ec826ae71..cfbf4e6c1679 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -193,6 +193,13 @@
#define SUN4I_TCON_MUX_CTRL_REG 0x200
#define SUN4I_TCON0_LVDS_ANA0_REG 0x220
+#define SUN4I_TCON0_LVDS_ANA0_DCHS BIT(16)
+#define SUN4I_TCON0_LVDS_ANA0_PD (BIT(20) | BIT(21))
+#define SUN4I_TCON0_LVDS_ANA0_EN_MB BIT(22)
+#define SUN4I_TCON0_LVDS_ANA0_REG_C (BIT(24) | BIT(25))
+#define SUN4I_TCON0_LVDS_ANA0_REG_V (BIT(26) | BIT(27))
+#define SUN4I_TCON0_LVDS_ANA0_CK_EN (BIT(29) | BIT(28))
+
#define SUN6I_TCON0_LVDS_ANA0_EN_MB BIT(31)
#define SUN6I_TCON0_LVDS_ANA0_EN_LDO BIT(30)
#define SUN6I_TCON0_LVDS_ANA0_EN_DRVC BIT(24)
@@ -201,6 +208,10 @@
#define SUN6I_TCON0_LVDS_ANA0_V(x) (((x) & 3) << 8)
#define SUN6I_TCON0_LVDS_ANA0_PD(x) (((x) & 3) << 4)
+#define SUN4I_TCON0_LVDS_ANA1_REG 0x224
+#define SUN4I_TCON0_LVDS_ANA1_INIT (0x1f << 26 | 0x1f << 10)
+#define SUN4I_TCON0_LVDS_ANA1_UPDATE (0x1f << 16 | 0x1f << 00)
+
#define SUN4I_TCON1_FILL_CTL_REG 0x300
#define SUN4I_TCON1_FILL_BEG0_REG 0x304
#define SUN4I_TCON1_FILL_END0_REG 0x308
@@ -228,6 +239,9 @@ struct sun4i_tcon_quirks {
/* callback to handle tcon muxing options */
int (*set_mux)(struct sun4i_tcon *, const struct drm_encoder *);
+ /* handler for LVDS setup routine */
+ void (*setup_lvds_phy)(struct sun4i_tcon *tcon,
+ const struct drm_encoder *encoder);
};
struct sun4i_tcon {
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index a75fcb113172..059939789730 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -14,7 +14,6 @@
#include <linux/phy/phy-mipi-dphy.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
@@ -27,7 +26,6 @@
#include <drm/drm_probe_helper.h>
#include "sun4i_crtc.h"
-#include "sun4i_drv.h"
#include "sun4i_tcon.h"
#include "sun6i_mipi_dsi.h"
@@ -722,10 +720,31 @@ static void sun6i_dsi_encoder_enable(struct drm_encoder *encoder)
union phy_configure_opts opts = { 0 };
struct phy_configure_opts_mipi_dphy *cfg = &opts.mipi_dphy;
u16 delay;
+ int err;
DRM_DEBUG_DRIVER("Enabling DSI output\n");
- pm_runtime_get_sync(dsi->dev);
+ err = regulator_enable(dsi->regulator);
+ if (err)
+ dev_warn(dsi->dev, "failed to enable VCC-DSI supply: %d\n", err);
+
+ reset_control_deassert(dsi->reset);
+ clk_prepare_enable(dsi->mod_clk);
+
+ /*
+ * Enable the DSI block.
+ */
+ regmap_write(dsi->regs, SUN6I_DSI_CTL_REG, SUN6I_DSI_CTL_EN);
+
+ regmap_write(dsi->regs, SUN6I_DSI_BASIC_CTL0_REG,
+ SUN6I_DSI_BASIC_CTL0_ECC_EN | SUN6I_DSI_BASIC_CTL0_CRC_EN);
+
+ regmap_write(dsi->regs, SUN6I_DSI_TRANS_START_REG, 10);
+ regmap_write(dsi->regs, SUN6I_DSI_TRANS_ZERO_REG, 0);
+
+ sun6i_dsi_inst_init(dsi, dsi->device);
+
+ regmap_write(dsi->regs, SUN6I_DSI_DEBUG_DATA_REG, 0xff);
delay = sun6i_dsi_get_video_start_delay(dsi, mode);
regmap_write(dsi->regs, SUN6I_DSI_BASIC_CTL1_REG,
@@ -749,7 +768,7 @@ static void sun6i_dsi_encoder_enable(struct drm_encoder *encoder)
phy_configure(dsi->dphy, &opts);
phy_power_on(dsi->dphy);
- if (!IS_ERR(dsi->panel))
+ if (dsi->panel)
drm_panel_prepare(dsi->panel);
/*
@@ -764,7 +783,7 @@ static void sun6i_dsi_encoder_enable(struct drm_encoder *encoder)
* ordering on the panels I've tested it with, so I guess this
* will do for now, until that IP is better understood.
*/
- if (!IS_ERR(dsi->panel))
+ if (dsi->panel)
drm_panel_enable(dsi->panel);
sun6i_dsi_start(dsi, DSI_START_HSC);
@@ -780,7 +799,7 @@ static void sun6i_dsi_encoder_disable(struct drm_encoder *encoder)
DRM_DEBUG_DRIVER("Disabling DSI output\n");
- if (!IS_ERR(dsi->panel)) {
+ if (dsi->panel) {
drm_panel_disable(dsi->panel);
drm_panel_unprepare(dsi->panel);
}
@@ -788,7 +807,9 @@ static void sun6i_dsi_encoder_disable(struct drm_encoder *encoder)
phy_power_off(dsi->dphy);
phy_exit(dsi->dphy);
- pm_runtime_put(dsi->dev);
+ clk_disable_unprepare(dsi->mod_clk);
+ reset_control_assert(dsi->reset);
+ regulator_disable(dsi->regulator);
}
static int sun6i_dsi_get_modes(struct drm_connector *connector)
@@ -805,7 +826,10 @@ static struct drm_connector_helper_funcs sun6i_dsi_connector_helper_funcs = {
static enum drm_connector_status
sun6i_dsi_connector_detect(struct drm_connector *connector, bool force)
{
- return connector_status_connected;
+ struct sun6i_dsi *dsi = connector_to_sun6i_dsi(connector);
+
+ return dsi->panel ? connector_status_connected :
+ connector_status_disconnected;
}
static const struct drm_connector_funcs sun6i_dsi_connector_funcs = {
@@ -942,11 +966,18 @@ static int sun6i_dsi_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
struct sun6i_dsi *dsi = host_to_sun6i_dsi(host);
+ struct drm_panel *panel = of_drm_find_panel(device->dev.of_node);
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
+ if (!dsi->drm || !dsi->drm->registered)
+ return -EPROBE_DEFER;
+
+ dsi->panel = panel;
dsi->device = device;
- dsi->panel = of_drm_find_panel(device->dev.of_node);
- if (IS_ERR(dsi->panel))
- return PTR_ERR(dsi->panel);
+
+ drm_panel_attach(dsi->panel, &dsi->connector);
+ drm_kms_helper_hotplug_event(dsi->drm);
dev_info(host->dev, "Attached device %s\n", device->name);
@@ -957,10 +988,14 @@ static int sun6i_dsi_detach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
struct sun6i_dsi *dsi = host_to_sun6i_dsi(host);
+ struct drm_panel *panel = dsi->panel;
dsi->panel = NULL;
dsi->device = NULL;
+ drm_panel_detach(panel);
+ drm_kms_helper_hotplug_event(dsi->drm);
+
return 0;
}
@@ -1022,15 +1057,9 @@ static int sun6i_dsi_bind(struct device *dev, struct device *master,
void *data)
{
struct drm_device *drm = data;
- struct sun4i_drv *drv = drm->dev_private;
struct sun6i_dsi *dsi = dev_get_drvdata(dev);
int ret;
- if (!dsi->panel)
- return -EPROBE_DEFER;
-
- dsi->drv = drv;
-
drm_encoder_helper_add(&dsi->encoder,
&sun6i_dsi_enc_helper_funcs);
ret = drm_encoder_init(drm,
@@ -1056,7 +1085,8 @@ static int sun6i_dsi_bind(struct device *dev, struct device *master,
}
drm_connector_attach_encoder(&dsi->connector, &dsi->encoder);
- drm_panel_attach(dsi->panel, &dsi->connector);
+
+ dsi->drm = drm;
return 0;
@@ -1070,7 +1100,7 @@ static void sun6i_dsi_unbind(struct device *dev, struct device *master,
{
struct sun6i_dsi *dsi = dev_get_drvdata(dev);
- drm_panel_detach(dsi->panel);
+ dsi->drm = NULL;
}
static const struct component_ops sun6i_dsi_ops = {
@@ -1157,12 +1187,10 @@ static int sun6i_dsi_probe(struct platform_device *pdev)
goto err_unprotect_clk;
}
- pm_runtime_enable(dev);
-
ret = mipi_dsi_host_register(&dsi->host);
if (ret) {
dev_err(dev, "Couldn't register MIPI-DSI host\n");
- goto err_pm_disable;
+ goto err_unprotect_clk;
}
ret = component_add(&pdev->dev, &sun6i_dsi_ops);
@@ -1175,8 +1203,6 @@ static int sun6i_dsi_probe(struct platform_device *pdev)
err_remove_dsi_host:
mipi_dsi_host_unregister(&dsi->host);
-err_pm_disable:
- pm_runtime_disable(dev);
err_unprotect_clk:
clk_rate_exclusive_put(dsi->mod_clk);
err_attach_clk:
@@ -1192,7 +1218,6 @@ static int sun6i_dsi_remove(struct platform_device *pdev)
component_del(&pdev->dev, &sun6i_dsi_ops);
mipi_dsi_host_unregister(&dsi->host);
- pm_runtime_disable(dev);
clk_rate_exclusive_put(dsi->mod_clk);
if (!IS_ERR(dsi->bus_clk))
@@ -1201,59 +1226,6 @@ static int sun6i_dsi_remove(struct platform_device *pdev)
return 0;
}
-static int __maybe_unused sun6i_dsi_runtime_resume(struct device *dev)
-{
- struct sun6i_dsi *dsi = dev_get_drvdata(dev);
- int err;
-
- err = regulator_enable(dsi->regulator);
- if (err) {
- dev_err(dsi->dev, "failed to enable VCC-DSI supply: %d\n", err);
- return err;
- }
-
- reset_control_deassert(dsi->reset);
- clk_prepare_enable(dsi->mod_clk);
-
- /*
- * Enable the DSI block.
- *
- * Some part of it can only be done once we get a number of
- * lanes, see sun6i_dsi_inst_init
- */
- regmap_write(dsi->regs, SUN6I_DSI_CTL_REG, SUN6I_DSI_CTL_EN);
-
- regmap_write(dsi->regs, SUN6I_DSI_BASIC_CTL0_REG,
- SUN6I_DSI_BASIC_CTL0_ECC_EN | SUN6I_DSI_BASIC_CTL0_CRC_EN);
-
- regmap_write(dsi->regs, SUN6I_DSI_TRANS_START_REG, 10);
- regmap_write(dsi->regs, SUN6I_DSI_TRANS_ZERO_REG, 0);
-
- if (dsi->device)
- sun6i_dsi_inst_init(dsi, dsi->device);
-
- regmap_write(dsi->regs, SUN6I_DSI_DEBUG_DATA_REG, 0xff);
-
- return 0;
-}
-
-static int __maybe_unused sun6i_dsi_runtime_suspend(struct device *dev)
-{
- struct sun6i_dsi *dsi = dev_get_drvdata(dev);
-
- clk_disable_unprepare(dsi->mod_clk);
- reset_control_assert(dsi->reset);
- regulator_disable(dsi->regulator);
-
- return 0;
-}
-
-static const struct dev_pm_ops sun6i_dsi_pm_ops = {
- SET_RUNTIME_PM_OPS(sun6i_dsi_runtime_suspend,
- sun6i_dsi_runtime_resume,
- NULL)
-};
-
static const struct of_device_id sun6i_dsi_of_table[] = {
{ .compatible = "allwinner,sun6i-a31-mipi-dsi" },
{ .compatible = "allwinner,sun50i-a64-mipi-dsi" },
@@ -1267,7 +1239,6 @@ static struct platform_driver sun6i_dsi_platform_driver = {
.driver = {
.name = "sun6i-mipi-dsi",
.of_match_table = sun6i_dsi_of_table,
- .pm = &sun6i_dsi_pm_ops,
},
};
module_platform_driver(sun6i_dsi_platform_driver);
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
index 3f4846f581ef..c863900ae3b4 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
@@ -28,8 +28,8 @@ struct sun6i_dsi {
struct phy *dphy;
struct device *dev;
- struct sun4i_drv *drv;
struct mipi_dsi_device *device;
+ struct drm_device *drm;
struct drm_panel *panel;
};
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
index 7c24f8f832a5..4a64f7ae437a 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
@@ -107,48 +107,128 @@ static const struct de2_fmt_info de2_formats[] = {
.csc = SUN8I_CSC_MODE_OFF,
},
{
+ /* for DE2 VI layer which ignores alpha */
+ .drm_fmt = DRM_FORMAT_XRGB4444,
+ .de2_fmt = SUN8I_MIXER_FBFMT_ARGB4444,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
.drm_fmt = DRM_FORMAT_ABGR4444,
.de2_fmt = SUN8I_MIXER_FBFMT_ABGR4444,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
+ /* for DE2 VI layer which ignores alpha */
+ .drm_fmt = DRM_FORMAT_XBGR4444,
+ .de2_fmt = SUN8I_MIXER_FBFMT_ABGR4444,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
.drm_fmt = DRM_FORMAT_RGBA4444,
.de2_fmt = SUN8I_MIXER_FBFMT_RGBA4444,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
+ /* for DE2 VI layer which ignores alpha */
+ .drm_fmt = DRM_FORMAT_RGBX4444,
+ .de2_fmt = SUN8I_MIXER_FBFMT_RGBA4444,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
.drm_fmt = DRM_FORMAT_BGRA4444,
.de2_fmt = SUN8I_MIXER_FBFMT_BGRA4444,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
+ /* for DE2 VI layer which ignores alpha */
+ .drm_fmt = DRM_FORMAT_BGRX4444,
+ .de2_fmt = SUN8I_MIXER_FBFMT_BGRA4444,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
.drm_fmt = DRM_FORMAT_ARGB1555,
.de2_fmt = SUN8I_MIXER_FBFMT_ARGB1555,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
+ /* for DE2 VI layer which ignores alpha */
+ .drm_fmt = DRM_FORMAT_XRGB1555,
+ .de2_fmt = SUN8I_MIXER_FBFMT_ARGB1555,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
.drm_fmt = DRM_FORMAT_ABGR1555,
.de2_fmt = SUN8I_MIXER_FBFMT_ABGR1555,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
+ /* for DE2 VI layer which ignores alpha */
+ .drm_fmt = DRM_FORMAT_XBGR1555,
+ .de2_fmt = SUN8I_MIXER_FBFMT_ABGR1555,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
.drm_fmt = DRM_FORMAT_RGBA5551,
.de2_fmt = SUN8I_MIXER_FBFMT_RGBA5551,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
+ /* for DE2 VI layer which ignores alpha */
+ .drm_fmt = DRM_FORMAT_RGBX5551,
+ .de2_fmt = SUN8I_MIXER_FBFMT_RGBA5551,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
.drm_fmt = DRM_FORMAT_BGRA5551,
.de2_fmt = SUN8I_MIXER_FBFMT_BGRA5551,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
+ /* for DE2 VI layer which ignores alpha */
+ .drm_fmt = DRM_FORMAT_BGRX5551,
+ .de2_fmt = SUN8I_MIXER_FBFMT_BGRA5551,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_ARGB2101010,
+ .de2_fmt = SUN8I_MIXER_FBFMT_ARGB2101010,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_ABGR2101010,
+ .de2_fmt = SUN8I_MIXER_FBFMT_ABGR2101010,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_RGBA1010102,
+ .de2_fmt = SUN8I_MIXER_FBFMT_RGBA1010102,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_BGRA1010102,
+ .de2_fmt = SUN8I_MIXER_FBFMT_BGRA1010102,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
.drm_fmt = DRM_FORMAT_UYVY,
.de2_fmt = SUN8I_MIXER_FBFMT_UYVY,
.rgb = false,
@@ -197,12 +277,6 @@ static const struct de2_fmt_info de2_formats[] = {
.csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
- .drm_fmt = DRM_FORMAT_YUV444,
- .de2_fmt = SUN8I_MIXER_FBFMT_RGB888,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_YUV2RGB,
- },
- {
.drm_fmt = DRM_FORMAT_YUV422,
.de2_fmt = SUN8I_MIXER_FBFMT_YUV422,
.rgb = false,
@@ -221,12 +295,6 @@ static const struct de2_fmt_info de2_formats[] = {
.csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
- .drm_fmt = DRM_FORMAT_YVU444,
- .de2_fmt = SUN8I_MIXER_FBFMT_RGB888,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_YVU2RGB,
- },
- {
.drm_fmt = DRM_FORMAT_YVU422,
.de2_fmt = SUN8I_MIXER_FBFMT_YUV422,
.rgb = false,
@@ -244,6 +312,18 @@ static const struct de2_fmt_info de2_formats[] = {
.rgb = false,
.csc = SUN8I_CSC_MODE_YVU2RGB,
},
+ {
+ .drm_fmt = DRM_FORMAT_P010,
+ .de2_fmt = SUN8I_MIXER_FBFMT_P010_YUV,
+ .rgb = false,
+ .csc = SUN8I_CSC_MODE_YUV2RGB,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_P210,
+ .de2_fmt = SUN8I_MIXER_FBFMT_P210_YUV,
+ .rgb = false,
+ .csc = SUN8I_CSC_MODE_YUV2RGB,
+ },
};
const struct de2_fmt_info *sun8i_mixer_format_info(u32 format)
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h
index c6cc94057faf..345b28b0a80a 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.h
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h
@@ -93,6 +93,10 @@
#define SUN8I_MIXER_FBFMT_ABGR1555 17
#define SUN8I_MIXER_FBFMT_RGBA5551 18
#define SUN8I_MIXER_FBFMT_BGRA5551 19
+#define SUN8I_MIXER_FBFMT_ARGB2101010 20
+#define SUN8I_MIXER_FBFMT_ABGR2101010 21
+#define SUN8I_MIXER_FBFMT_RGBA1010102 22
+#define SUN8I_MIXER_FBFMT_BGRA1010102 23
#define SUN8I_MIXER_FBFMT_YUYV 0
#define SUN8I_MIXER_FBFMT_UYVY 1
@@ -109,6 +113,13 @@
/* format 12 is semi-planar YUV411 UVUV */
/* format 13 is semi-planar YUV411 VUVU */
#define SUN8I_MIXER_FBFMT_YUV411 14
+/* format 15 doesn't exist */
+/* format 16 is P010 YVU */
+#define SUN8I_MIXER_FBFMT_P010_YUV 17
+/* format 18 is P210 YVU */
+#define SUN8I_MIXER_FBFMT_P210_YUV 19
+/* format 20 is packed YVU444 10-bit */
+/* format 21 is packed YUV444 10-bit */
/*
* Sub-engines listed bellow are unused for now. The EN registers are here only
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
index 42d445d23773..b8398ca18b0f 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
@@ -398,24 +398,66 @@ static const struct drm_plane_funcs sun8i_vi_layer_funcs = {
};
/*
- * While all RGB formats are supported, VI planes don't support
- * alpha blending, so there is no point having formats with alpha
- * channel if their opaque analog exist.
+ * While DE2 VI layer supports same RGB formats as UI layer, alpha
+ * channel is ignored. This structure lists all unique variants
+ * where alpha channel is replaced with "don't care" (X) channel.
*/
static const u32 sun8i_vi_layer_formats[] = {
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_BGRX4444,
+ DRM_FORMAT_BGRX5551,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGBX4444,
+ DRM_FORMAT_RGBX5551,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_XBGR4444,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_XRGB8888,
+
+ DRM_FORMAT_NV16,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV21,
+ DRM_FORMAT_NV61,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_YUV411,
+ DRM_FORMAT_YUV420,
+ DRM_FORMAT_YUV422,
+ DRM_FORMAT_YVU411,
+ DRM_FORMAT_YVU420,
+ DRM_FORMAT_YVU422,
+};
+
+static const u32 sun8i_vi_layer_de3_formats[] = {
DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_ABGR2101010,
DRM_FORMAT_ABGR4444,
+ DRM_FORMAT_ABGR8888,
DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_ARGB2101010,
DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ARGB8888,
DRM_FORMAT_BGR565,
DRM_FORMAT_BGR888,
+ DRM_FORMAT_BGRA1010102,
DRM_FORMAT_BGRA5551,
DRM_FORMAT_BGRA4444,
+ DRM_FORMAT_BGRA8888,
DRM_FORMAT_BGRX8888,
DRM_FORMAT_RGB565,
DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGBA1010102,
DRM_FORMAT_RGBA4444,
DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_RGBA8888,
DRM_FORMAT_RGBX8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888,
@@ -424,6 +466,8 @@ static const u32 sun8i_vi_layer_formats[] = {
DRM_FORMAT_NV12,
DRM_FORMAT_NV21,
DRM_FORMAT_NV61,
+ DRM_FORMAT_P010,
+ DRM_FORMAT_P210,
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
DRM_FORMAT_YUYV,
@@ -431,11 +475,9 @@ static const u32 sun8i_vi_layer_formats[] = {
DRM_FORMAT_YUV411,
DRM_FORMAT_YUV420,
DRM_FORMAT_YUV422,
- DRM_FORMAT_YUV444,
DRM_FORMAT_YVU411,
DRM_FORMAT_YVU420,
DRM_FORMAT_YVU422,
- DRM_FORMAT_YVU444,
};
struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
@@ -443,19 +485,27 @@ struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
int index)
{
u32 supported_encodings, supported_ranges;
+ unsigned int plane_cnt, format_count;
struct sun8i_vi_layer *layer;
- unsigned int plane_cnt;
+ const u32 *formats;
int ret;
layer = devm_kzalloc(drm->dev, sizeof(*layer), GFP_KERNEL);
if (!layer)
return ERR_PTR(-ENOMEM);
+ if (mixer->cfg->is_de3) {
+ formats = sun8i_vi_layer_de3_formats;
+ format_count = ARRAY_SIZE(sun8i_vi_layer_de3_formats);
+ } else {
+ formats = sun8i_vi_layer_formats;
+ format_count = ARRAY_SIZE(sun8i_vi_layer_formats);
+ }
+
/* possible crtcs are set later */
ret = drm_universal_plane_init(drm, &layer->plane, 0,
&sun8i_vi_layer_funcs,
- sun8i_vi_layer_formats,
- ARRAY_SIZE(sun8i_vi_layer_formats),
+ formats, format_count,
NULL, DRM_PLANE_TYPE_OVERLAY, NULL);
if (ret) {
dev_err(drm->dev, "Couldn't initialize layer\n");
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 7c70fd31a4c2..1a7b08f35776 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -2503,7 +2503,6 @@ static int tegra_dc_couple(struct tegra_dc *dc)
static int tegra_dc_probe(struct platform_device *pdev)
{
- struct resource *regs;
struct tegra_dc *dc;
int err;
@@ -2560,8 +2559,7 @@ static int tegra_dc_probe(struct platform_device *pdev)
tegra_powergate_power_off(dc->powergate);
}
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dc->regs = devm_ioremap_resource(&pdev->dev, regs);
+ dc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dc->regs))
return PTR_ERR(dc->regs);
@@ -2573,7 +2571,13 @@ static int tegra_dc_probe(struct platform_device *pdev)
err = tegra_dc_rgb_probe(dc);
if (err < 0 && err != -ENODEV) {
- dev_err(&pdev->dev, "failed to probe RGB output: %d\n", err);
+ const char *level = KERN_ERR;
+
+ if (err == -EPROBE_DEFER)
+ level = KERN_DEBUG;
+
+ dev_printk(level, dc->dev, "failed to probe RGB output: %d\n",
+ err);
return err;
}
@@ -2588,10 +2592,16 @@ static int tegra_dc_probe(struct platform_device *pdev)
if (err < 0) {
dev_err(&pdev->dev, "failed to register host1x client: %d\n",
err);
- return err;
+ goto disable_pm;
}
return 0;
+
+disable_pm:
+ pm_runtime_disable(&pdev->dev);
+ tegra_dc_rgb_remove(dc);
+
+ return err;
}
static int tegra_dc_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 84f0e01e3428..b8a328f53862 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -314,19 +314,13 @@ static int tegra_fbdev_init(struct tegra_fbdev *fbdev,
struct drm_device *drm = fbdev->base.dev;
int err;
- err = drm_fb_helper_init(drm, &fbdev->base, max_connectors);
+ err = drm_fb_helper_init(drm, &fbdev->base);
if (err < 0) {
dev_err(drm->dev, "failed to initialize DRM FB helper: %d\n",
err);
return err;
}
- err = drm_fb_helper_single_add_all_connectors(&fbdev->base);
- if (err < 0) {
- dev_err(drm->dev, "failed to add connectors: %d\n", err);
- goto fini;
- }
-
err = drm_fb_helper_initial_config(&fbdev->base, preferred_bpp);
if (err < 0) {
dev_err(drm->dev, "failed to set initial configuration: %d\n",
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 6f117628f257..38252c0f068d 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -1648,6 +1648,7 @@ static irqreturn_t tegra_hdmi_irq(int irq, void *data)
static int tegra_hdmi_probe(struct platform_device *pdev)
{
+ const char *level = KERN_ERR;
struct tegra_hdmi *hdmi;
struct resource *regs;
int err;
@@ -1686,21 +1687,36 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
}
hdmi->hdmi = devm_regulator_get(&pdev->dev, "hdmi");
- if (IS_ERR(hdmi->hdmi)) {
- dev_err(&pdev->dev, "failed to get HDMI regulator\n");
- return PTR_ERR(hdmi->hdmi);
+ err = PTR_ERR_OR_ZERO(hdmi->hdmi);
+ if (err) {
+ if (err == -EPROBE_DEFER)
+ level = KERN_DEBUG;
+
+ dev_printk(level, &pdev->dev,
+ "failed to get HDMI regulator: %d\n", err);
+ return err;
}
hdmi->pll = devm_regulator_get(&pdev->dev, "pll");
- if (IS_ERR(hdmi->pll)) {
- dev_err(&pdev->dev, "failed to get PLL regulator\n");
- return PTR_ERR(hdmi->pll);
+ err = PTR_ERR_OR_ZERO(hdmi->pll);
+ if (err) {
+ if (err == -EPROBE_DEFER)
+ level = KERN_DEBUG;
+
+ dev_printk(level, &pdev->dev,
+ "failed to get PLL regulator: %d\n", err);
+ return err;
}
hdmi->vdd = devm_regulator_get(&pdev->dev, "vdd");
- if (IS_ERR(hdmi->vdd)) {
- dev_err(&pdev->dev, "failed to get VDD regulator\n");
- return PTR_ERR(hdmi->vdd);
+ err = PTR_ERR_OR_ZERO(hdmi->vdd);
+ if (err) {
+ if (err == -EPROBE_DEFER)
+ level = KERN_DEBUG;
+
+ dev_printk(level, &pdev->dev,
+ "failed to get VDD regulator: %d\n", err);
+ return err;
}
hdmi->output.dev = &pdev->dev;
diff --git a/drivers/gpu/drm/tidss/Kconfig b/drivers/gpu/drm/tidss/Kconfig
new file mode 100644
index 000000000000..f790a5215302
--- /dev/null
+++ b/drivers/gpu/drm/tidss/Kconfig
@@ -0,0 +1,14 @@
+config DRM_TIDSS
+ tristate "DRM Support for TI Keystone"
+ depends on DRM && OF
+ depends on ARM || ARM64 || COMPILE_TEST
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
+ help
+ The TI Keystone family SoCs introduced a new generation of
+ Display SubSystem. There is currently three Keystone family
+ SoCs released with DSS. Each with somewhat different version
+ of it. The SoCs are 66AK2Gx, AM65x, and J721E. Set this to Y
+ or M to add display support for TI Keystone family
+ platforms.
diff --git a/drivers/gpu/drm/tidss/Makefile b/drivers/gpu/drm/tidss/Makefile
new file mode 100644
index 000000000000..312645271014
--- /dev/null
+++ b/drivers/gpu/drm/tidss/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+
+tidss-y := tidss_crtc.o \
+ tidss_drv.o \
+ tidss_encoder.o \
+ tidss_kms.o \
+ tidss_irq.o \
+ tidss_plane.o \
+ tidss_scale_coefs.o \
+ tidss_dispc.o
+
+obj-$(CONFIG_DRM_TIDSS) += tidss.o
diff --git a/drivers/gpu/drm/tidss/tidss_crtc.c b/drivers/gpu/drm/tidss/tidss_crtc.c
new file mode 100644
index 000000000000..d4ce9bab8c7e
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_crtc.c
@@ -0,0 +1,432 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <[email protected]>
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "tidss_crtc.h"
+#include "tidss_dispc.h"
+#include "tidss_drv.h"
+#include "tidss_irq.h"
+#include "tidss_plane.h"
+
+/* Page flip and frame done IRQs */
+
+static void tidss_crtc_finish_page_flip(struct tidss_crtc *tcrtc)
+{
+ struct drm_device *ddev = tcrtc->crtc.dev;
+ struct tidss_device *tidss = ddev->dev_private;
+ struct drm_pending_vblank_event *event;
+ unsigned long flags;
+ bool busy;
+
+ spin_lock_irqsave(&ddev->event_lock, flags);
+
+ /*
+ * New settings are taken into use at VFP, and GO bit is cleared at
+ * the same time. This happens before the vertical blank interrupt.
+ * So there is a small change that the driver sets GO bit after VFP, but
+ * before vblank, and we have to check for that case here.
+ */
+ busy = dispc_vp_go_busy(tidss->dispc, tcrtc->hw_videoport);
+ if (busy) {
+ spin_unlock_irqrestore(&ddev->event_lock, flags);
+ return;
+ }
+
+ event = tcrtc->event;
+ tcrtc->event = NULL;
+
+ if (!event) {
+ spin_unlock_irqrestore(&ddev->event_lock, flags);
+ return;
+ }
+
+ drm_crtc_send_vblank_event(&tcrtc->crtc, event);
+
+ spin_unlock_irqrestore(&ddev->event_lock, flags);
+
+ drm_crtc_vblank_put(&tcrtc->crtc);
+}
+
+void tidss_crtc_vblank_irq(struct drm_crtc *crtc)
+{
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+
+ drm_crtc_handle_vblank(crtc);
+
+ tidss_crtc_finish_page_flip(tcrtc);
+}
+
+void tidss_crtc_framedone_irq(struct drm_crtc *crtc)
+{
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+
+ complete(&tcrtc->framedone_completion);
+}
+
+void tidss_crtc_error_irq(struct drm_crtc *crtc, u64 irqstatus)
+{
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+
+ dev_err_ratelimited(crtc->dev->dev, "CRTC%u SYNC LOST: (irq %llx)\n",
+ tcrtc->hw_videoport, irqstatus);
+}
+
+/* drm_crtc_helper_funcs */
+
+static int tidss_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct drm_device *ddev = crtc->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+ struct dispc_device *dispc = tidss->dispc;
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+ u32 hw_videoport = tcrtc->hw_videoport;
+ const struct drm_display_mode *mode;
+ enum drm_mode_status ok;
+
+ dev_dbg(ddev->dev, "%s\n", __func__);
+
+ if (!state->enable)
+ return 0;
+
+ mode = &state->adjusted_mode;
+
+ ok = dispc_vp_mode_valid(dispc, hw_videoport, mode);
+ if (ok != MODE_OK) {
+ dev_dbg(ddev->dev, "%s: bad mode: %ux%u pclk %u kHz\n",
+ __func__, mode->hdisplay, mode->vdisplay, mode->clock);
+ return -EINVAL;
+ }
+
+ return dispc_vp_bus_check(dispc, hw_videoport, state);
+}
+
+/*
+ * This needs all affected planes to be present in the atomic
+ * state. The untouched planes are added to the state in
+ * tidss_atomic_check().
+ */
+static void tidss_crtc_position_planes(struct tidss_device *tidss,
+ struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state,
+ bool newmodeset)
+{
+ struct drm_atomic_state *ostate = old_state->state;
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+ struct drm_crtc_state *cstate = crtc->state;
+ int layer;
+
+ if (!newmodeset && !cstate->zpos_changed &&
+ !to_tidss_crtc_state(cstate)->plane_pos_changed)
+ return;
+
+ for (layer = 0; layer < tidss->feat->num_planes; layer++) {
+ struct drm_plane_state *pstate;
+ struct drm_plane *plane;
+ bool layer_active = false;
+ int i;
+
+ for_each_new_plane_in_state(ostate, plane, pstate, i) {
+ if (pstate->crtc != crtc || !pstate->visible)
+ continue;
+
+ if (pstate->normalized_zpos == layer) {
+ layer_active = true;
+ break;
+ }
+ }
+
+ if (layer_active) {
+ struct tidss_plane *tplane = to_tidss_plane(plane);
+
+ dispc_ovr_set_plane(tidss->dispc, tplane->hw_plane_id,
+ tcrtc->hw_videoport,
+ pstate->crtc_x, pstate->crtc_y,
+ layer);
+ }
+ dispc_ovr_enable_layer(tidss->dispc, tcrtc->hw_videoport, layer,
+ layer_active);
+ }
+}
+
+static void tidss_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+ struct drm_device *ddev = crtc->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+ unsigned long flags;
+
+ dev_dbg(ddev->dev,
+ "%s: %s enabled %d, needs modeset %d, event %p\n", __func__,
+ crtc->name, drm_atomic_crtc_needs_modeset(crtc->state),
+ crtc->state->enable, crtc->state->event);
+
+ /* There is nothing to do if CRTC is not going to be enabled. */
+ if (!crtc->state->enable)
+ return;
+
+ /*
+ * Flush CRTC changes with go bit only if new modeset is not
+ * coming, so CRTC is enabled trough out the commit.
+ */
+ if (drm_atomic_crtc_needs_modeset(crtc->state))
+ return;
+
+ /* If the GO bit is stuck we better quit here. */
+ if (WARN_ON(dispc_vp_go_busy(tidss->dispc, tcrtc->hw_videoport)))
+ return;
+
+ /* We should have event if CRTC is enabled through out this commit. */
+ if (WARN_ON(!crtc->state->event))
+ return;
+
+ /* Write vp properties to HW if needed. */
+ dispc_vp_setup(tidss->dispc, tcrtc->hw_videoport, crtc->state, false);
+
+ /* Update plane positions if needed. */
+ tidss_crtc_position_planes(tidss, crtc, old_crtc_state, false);
+
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
+ spin_lock_irqsave(&ddev->event_lock, flags);
+ dispc_vp_go(tidss->dispc, tcrtc->hw_videoport);
+
+ WARN_ON(tcrtc->event);
+
+ tcrtc->event = crtc->state->event;
+ crtc->state->event = NULL;
+
+ spin_unlock_irqrestore(&ddev->event_lock, flags);
+}
+
+static void tidss_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+ struct drm_device *ddev = crtc->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+ const struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ unsigned long flags;
+ int r;
+
+ dev_dbg(ddev->dev, "%s, event %p\n", __func__, crtc->state->event);
+
+ tidss_runtime_get(tidss);
+
+ r = dispc_vp_set_clk_rate(tidss->dispc, tcrtc->hw_videoport,
+ mode->clock * 1000);
+ if (r != 0)
+ return;
+
+ r = dispc_vp_enable_clk(tidss->dispc, tcrtc->hw_videoport);
+ if (r != 0)
+ return;
+
+ dispc_vp_setup(tidss->dispc, tcrtc->hw_videoport, crtc->state, true);
+ tidss_crtc_position_planes(tidss, crtc, old_state, true);
+
+ /* Turn vertical blanking interrupt reporting on. */
+ drm_crtc_vblank_on(crtc);
+
+ dispc_vp_prepare(tidss->dispc, tcrtc->hw_videoport, crtc->state);
+
+ dispc_vp_enable(tidss->dispc, tcrtc->hw_videoport, crtc->state);
+
+ spin_lock_irqsave(&ddev->event_lock, flags);
+
+ if (crtc->state->event) {
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+
+ spin_unlock_irqrestore(&ddev->event_lock, flags);
+}
+
+static void tidss_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+ struct drm_device *ddev = crtc->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+ unsigned long flags;
+
+ dev_dbg(ddev->dev, "%s, event %p\n", __func__, crtc->state->event);
+
+ reinit_completion(&tcrtc->framedone_completion);
+
+ dispc_vp_disable(tidss->dispc, tcrtc->hw_videoport);
+
+ if (!wait_for_completion_timeout(&tcrtc->framedone_completion,
+ msecs_to_jiffies(500)))
+ dev_err(tidss->dev, "Timeout waiting for framedone on crtc %d",
+ tcrtc->hw_videoport);
+
+ dispc_vp_unprepare(tidss->dispc, tcrtc->hw_videoport);
+
+ spin_lock_irqsave(&ddev->event_lock, flags);
+ if (crtc->state->event) {
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+ spin_unlock_irqrestore(&ddev->event_lock, flags);
+
+ drm_crtc_vblank_off(crtc);
+
+ dispc_vp_disable_clk(tidss->dispc, tcrtc->hw_videoport);
+
+ tidss_runtime_put(tidss);
+}
+
+static
+enum drm_mode_status tidss_crtc_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+ struct drm_device *ddev = crtc->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+
+ return dispc_vp_mode_valid(tidss->dispc, tcrtc->hw_videoport, mode);
+}
+
+static const struct drm_crtc_helper_funcs tidss_crtc_helper_funcs = {
+ .atomic_check = tidss_crtc_atomic_check,
+ .atomic_flush = tidss_crtc_atomic_flush,
+ .atomic_enable = tidss_crtc_atomic_enable,
+ .atomic_disable = tidss_crtc_atomic_disable,
+
+ .mode_valid = tidss_crtc_mode_valid,
+};
+
+/* drm_crtc_funcs */
+
+static int tidss_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct drm_device *ddev = crtc->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+
+ dev_dbg(ddev->dev, "%s\n", __func__);
+
+ tidss_runtime_get(tidss);
+
+ tidss_irq_enable_vblank(crtc);
+
+ return 0;
+}
+
+static void tidss_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct drm_device *ddev = crtc->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+
+ dev_dbg(ddev->dev, "%s\n", __func__);
+
+ tidss_irq_disable_vblank(crtc);
+
+ tidss_runtime_put(tidss);
+}
+
+static void tidss_crtc_reset(struct drm_crtc *crtc)
+{
+ struct tidss_crtc_state *tcrtc;
+
+ if (crtc->state)
+ __drm_atomic_helper_crtc_destroy_state(crtc->state);
+
+ kfree(crtc->state);
+
+ tcrtc = kzalloc(sizeof(*tcrtc), GFP_KERNEL);
+ if (!tcrtc) {
+ crtc->state = NULL;
+ return;
+ }
+
+ crtc->state = &tcrtc->base;
+ crtc->state->crtc = crtc;
+}
+
+static struct drm_crtc_state *tidss_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+ struct tidss_crtc_state *state, *current_state;
+
+ if (WARN_ON(!crtc->state))
+ return NULL;
+
+ current_state = to_tidss_crtc_state(crtc->state);
+
+ state = kmalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
+
+ state->plane_pos_changed = false;
+
+ state->bus_format = current_state->bus_format;
+ state->bus_flags = current_state->bus_flags;
+
+ return &state->base;
+}
+
+static const struct drm_crtc_funcs tidss_crtc_funcs = {
+ .reset = tidss_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = tidss_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .enable_vblank = tidss_crtc_enable_vblank,
+ .disable_vblank = tidss_crtc_disable_vblank,
+};
+
+struct tidss_crtc *tidss_crtc_create(struct tidss_device *tidss,
+ u32 hw_videoport,
+ struct drm_plane *primary)
+{
+ struct tidss_crtc *tcrtc;
+ struct drm_crtc *crtc;
+ unsigned int gamma_lut_size = 0;
+ bool has_ctm = tidss->feat->vp_feat.color.has_ctm;
+ int ret;
+
+ tcrtc = devm_kzalloc(tidss->dev, sizeof(*tcrtc), GFP_KERNEL);
+ if (!tcrtc)
+ return ERR_PTR(-ENOMEM);
+
+ tcrtc->hw_videoport = hw_videoport;
+ init_completion(&tcrtc->framedone_completion);
+
+ crtc = &tcrtc->crtc;
+
+ ret = drm_crtc_init_with_planes(&tidss->ddev, crtc, primary,
+ NULL, &tidss_crtc_funcs, NULL);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ drm_crtc_helper_add(crtc, &tidss_crtc_helper_funcs);
+
+ /*
+ * The dispc gamma functions adapt to what ever size we ask
+ * from it no matter what HW supports. X-server assumes 256
+ * element gamma tables so lets use that.
+ */
+ if (tidss->feat->vp_feat.color.gamma_size)
+ gamma_lut_size = 256;
+
+ drm_crtc_enable_color_mgmt(crtc, 0, has_ctm, gamma_lut_size);
+ if (gamma_lut_size)
+ drm_mode_crtc_set_gamma_size(crtc, gamma_lut_size);
+
+ return tcrtc;
+}
diff --git a/drivers/gpu/drm/tidss/tidss_crtc.h b/drivers/gpu/drm/tidss/tidss_crtc.h
new file mode 100644
index 000000000000..09e773666228
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_crtc.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <[email protected]>
+ */
+
+#ifndef __TIDSS_CRTC_H__
+#define __TIDSS_CRTC_H__
+
+#include <linux/completion.h>
+#include <linux/wait.h>
+
+#include <drm/drm_crtc.h>
+
+#define to_tidss_crtc(c) container_of((c), struct tidss_crtc, crtc)
+
+struct tidss_device;
+
+struct tidss_crtc {
+ struct drm_crtc crtc;
+
+ u32 hw_videoport;
+
+ struct drm_pending_vblank_event *event;
+
+ struct completion framedone_completion;
+};
+
+#define to_tidss_crtc_state(x) container_of(x, struct tidss_crtc_state, base)
+
+struct tidss_crtc_state {
+ /* Must be first. */
+ struct drm_crtc_state base;
+
+ bool plane_pos_changed;
+
+ u32 bus_format;
+ u32 bus_flags;
+};
+
+void tidss_crtc_vblank_irq(struct drm_crtc *crtc);
+void tidss_crtc_framedone_irq(struct drm_crtc *crtc);
+void tidss_crtc_error_irq(struct drm_crtc *crtc, u64 irqstatus);
+
+struct tidss_crtc *tidss_crtc_create(struct tidss_device *tidss,
+ u32 hw_videoport,
+ struct drm_plane *primary);
+#endif
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c
new file mode 100644
index 000000000000..29f42768e294
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_dispc.c
@@ -0,0 +1,2753 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016-2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Jyri Sarha <[email protected]>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_fourcc.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_panel.h>
+
+#include "tidss_crtc.h"
+#include "tidss_dispc.h"
+#include "tidss_drv.h"
+#include "tidss_irq.h"
+#include "tidss_plane.h"
+
+#include "tidss_dispc_regs.h"
+#include "tidss_scale_coefs.h"
+
+static const u16 tidss_k2g_common_regs[DISPC_COMMON_REG_TABLE_LEN] = {
+ [DSS_REVISION_OFF] = 0x00,
+ [DSS_SYSCONFIG_OFF] = 0x04,
+ [DSS_SYSSTATUS_OFF] = 0x08,
+ [DISPC_IRQ_EOI_OFF] = 0x20,
+ [DISPC_IRQSTATUS_RAW_OFF] = 0x24,
+ [DISPC_IRQSTATUS_OFF] = 0x28,
+ [DISPC_IRQENABLE_SET_OFF] = 0x2c,
+ [DISPC_IRQENABLE_CLR_OFF] = 0x30,
+
+ [DISPC_GLOBAL_MFLAG_ATTRIBUTE_OFF] = 0x40,
+ [DISPC_GLOBAL_BUFFER_OFF] = 0x44,
+
+ [DISPC_DBG_CONTROL_OFF] = 0x4c,
+ [DISPC_DBG_STATUS_OFF] = 0x50,
+
+ [DISPC_CLKGATING_DISABLE_OFF] = 0x54,
+};
+
+const struct dispc_features dispc_k2g_feats = {
+ .min_pclk_khz = 4375,
+
+ .max_pclk_khz = {
+ [DISPC_VP_DPI] = 150000,
+ },
+
+ /*
+ * XXX According TRM the RGB input buffer width up to 2560 should
+ * work on 3 taps, but in practice it only works up to 1280.
+ */
+ .scaling = {
+ .in_width_max_5tap_rgb = 1280,
+ .in_width_max_3tap_rgb = 1280,
+ .in_width_max_5tap_yuv = 2560,
+ .in_width_max_3tap_yuv = 2560,
+ .upscale_limit = 16,
+ .downscale_limit_5tap = 4,
+ .downscale_limit_3tap = 2,
+ /*
+ * The max supported pixel inc value is 255. The value
+ * of pixel inc is calculated like this: 1+(xinc-1)*bpp.
+ * The maximum bpp of all formats supported by the HW
+ * is 8. So the maximum supported xinc value is 32,
+ * because 1+(32-1)*8 < 255 < 1+(33-1)*4.
+ */
+ .xinc_max = 32,
+ },
+
+ .subrev = DISPC_K2G,
+
+ .common = "common",
+
+ .common_regs = tidss_k2g_common_regs,
+
+ .num_vps = 1,
+ .vp_name = { "vp1" },
+ .ovr_name = { "ovr1" },
+ .vpclk_name = { "vp1" },
+ .vp_bus_type = { DISPC_VP_DPI },
+
+ .vp_feat = { .color = {
+ .has_ctm = true,
+ .gamma_size = 256,
+ .gamma_type = TIDSS_GAMMA_8BIT,
+ },
+ },
+
+ .num_planes = 1,
+ .vid_name = { "vid1" },
+ .vid_lite = { false },
+ .vid_order = { 0 },
+};
+
+static const u16 tidss_am65x_common_regs[DISPC_COMMON_REG_TABLE_LEN] = {
+ [DSS_REVISION_OFF] = 0x4,
+ [DSS_SYSCONFIG_OFF] = 0x8,
+ [DSS_SYSSTATUS_OFF] = 0x20,
+ [DISPC_IRQ_EOI_OFF] = 0x24,
+ [DISPC_IRQSTATUS_RAW_OFF] = 0x28,
+ [DISPC_IRQSTATUS_OFF] = 0x2c,
+ [DISPC_IRQENABLE_SET_OFF] = 0x30,
+ [DISPC_IRQENABLE_CLR_OFF] = 0x40,
+ [DISPC_VID_IRQENABLE_OFF] = 0x44,
+ [DISPC_VID_IRQSTATUS_OFF] = 0x58,
+ [DISPC_VP_IRQENABLE_OFF] = 0x70,
+ [DISPC_VP_IRQSTATUS_OFF] = 0x7c,
+
+ [WB_IRQENABLE_OFF] = 0x88,
+ [WB_IRQSTATUS_OFF] = 0x8c,
+
+ [DISPC_GLOBAL_MFLAG_ATTRIBUTE_OFF] = 0x90,
+ [DISPC_GLOBAL_OUTPUT_ENABLE_OFF] = 0x94,
+ [DISPC_GLOBAL_BUFFER_OFF] = 0x98,
+ [DSS_CBA_CFG_OFF] = 0x9c,
+ [DISPC_DBG_CONTROL_OFF] = 0xa0,
+ [DISPC_DBG_STATUS_OFF] = 0xa4,
+ [DISPC_CLKGATING_DISABLE_OFF] = 0xa8,
+ [DISPC_SECURE_DISABLE_OFF] = 0xac,
+};
+
+const struct dispc_features dispc_am65x_feats = {
+ .max_pclk_khz = {
+ [DISPC_VP_DPI] = 165000,
+ [DISPC_VP_OLDI] = 165000,
+ },
+
+ .scaling = {
+ .in_width_max_5tap_rgb = 1280,
+ .in_width_max_3tap_rgb = 2560,
+ .in_width_max_5tap_yuv = 2560,
+ .in_width_max_3tap_yuv = 4096,
+ .upscale_limit = 16,
+ .downscale_limit_5tap = 4,
+ .downscale_limit_3tap = 2,
+ /*
+ * The max supported pixel inc value is 255. The value
+ * of pixel inc is calculated like this: 1+(xinc-1)*bpp.
+ * The maximum bpp of all formats supported by the HW
+ * is 8. So the maximum supported xinc value is 32,
+ * because 1+(32-1)*8 < 255 < 1+(33-1)*4.
+ */
+ .xinc_max = 32,
+ },
+
+ .subrev = DISPC_AM65X,
+
+ .common = "common",
+ .common_regs = tidss_am65x_common_regs,
+
+ .num_vps = 2,
+ .vp_name = { "vp1", "vp2" },
+ .ovr_name = { "ovr1", "ovr2" },
+ .vpclk_name = { "vp1", "vp2" },
+ .vp_bus_type = { DISPC_VP_OLDI, DISPC_VP_DPI },
+
+ .vp_feat = { .color = {
+ .has_ctm = true,
+ .gamma_size = 256,
+ .gamma_type = TIDSS_GAMMA_8BIT,
+ },
+ },
+
+ .num_planes = 2,
+ /* note: vid is plane_id 0 and vidl1 is plane_id 1 */
+ .vid_name = { "vid", "vidl1" },
+ .vid_lite = { false, true, },
+ .vid_order = { 1, 0 },
+
+ .errata = {
+ .i2000 = true,
+ },
+};
+
+static const u16 tidss_j721e_common_regs[DISPC_COMMON_REG_TABLE_LEN] = {
+ [DSS_REVISION_OFF] = 0x4,
+ [DSS_SYSCONFIG_OFF] = 0x8,
+ [DSS_SYSSTATUS_OFF] = 0x20,
+ [DISPC_IRQ_EOI_OFF] = 0x80,
+ [DISPC_IRQSTATUS_RAW_OFF] = 0x28,
+ [DISPC_IRQSTATUS_OFF] = 0x2c,
+ [DISPC_IRQENABLE_SET_OFF] = 0x30,
+ [DISPC_IRQENABLE_CLR_OFF] = 0x34,
+ [DISPC_VID_IRQENABLE_OFF] = 0x38,
+ [DISPC_VID_IRQSTATUS_OFF] = 0x48,
+ [DISPC_VP_IRQENABLE_OFF] = 0x58,
+ [DISPC_VP_IRQSTATUS_OFF] = 0x68,
+
+ [WB_IRQENABLE_OFF] = 0x78,
+ [WB_IRQSTATUS_OFF] = 0x7c,
+
+ [DISPC_GLOBAL_MFLAG_ATTRIBUTE_OFF] = 0x98,
+ [DISPC_GLOBAL_OUTPUT_ENABLE_OFF] = 0x9c,
+ [DISPC_GLOBAL_BUFFER_OFF] = 0xa0,
+ [DSS_CBA_CFG_OFF] = 0xa4,
+ [DISPC_DBG_CONTROL_OFF] = 0xa8,
+ [DISPC_DBG_STATUS_OFF] = 0xac,
+ [DISPC_CLKGATING_DISABLE_OFF] = 0xb0,
+ [DISPC_SECURE_DISABLE_OFF] = 0x90,
+
+ [FBDC_REVISION_1_OFF] = 0xb8,
+ [FBDC_REVISION_2_OFF] = 0xbc,
+ [FBDC_REVISION_3_OFF] = 0xc0,
+ [FBDC_REVISION_4_OFF] = 0xc4,
+ [FBDC_REVISION_5_OFF] = 0xc8,
+ [FBDC_REVISION_6_OFF] = 0xcc,
+ [FBDC_COMMON_CONTROL_OFF] = 0xd0,
+ [FBDC_CONSTANT_COLOR_0_OFF] = 0xd4,
+ [FBDC_CONSTANT_COLOR_1_OFF] = 0xd8,
+ [DISPC_CONNECTIONS_OFF] = 0xe4,
+ [DISPC_MSS_VP1_OFF] = 0xe8,
+ [DISPC_MSS_VP3_OFF] = 0xec,
+};
+
+const struct dispc_features dispc_j721e_feats = {
+ .max_pclk_khz = {
+ [DISPC_VP_DPI] = 170000,
+ [DISPC_VP_INTERNAL] = 600000,
+ },
+
+ .scaling = {
+ .in_width_max_5tap_rgb = 2048,
+ .in_width_max_3tap_rgb = 4096,
+ .in_width_max_5tap_yuv = 4096,
+ .in_width_max_3tap_yuv = 4096,
+ .upscale_limit = 16,
+ .downscale_limit_5tap = 4,
+ .downscale_limit_3tap = 2,
+ /*
+ * The max supported pixel inc value is 255. The value
+ * of pixel inc is calculated like this: 1+(xinc-1)*bpp.
+ * The maximum bpp of all formats supported by the HW
+ * is 8. So the maximum supported xinc value is 32,
+ * because 1+(32-1)*8 < 255 < 1+(33-1)*4.
+ */
+ .xinc_max = 32,
+ },
+
+ .subrev = DISPC_J721E,
+
+ .common = "common_m",
+ .common_regs = tidss_j721e_common_regs,
+
+ .num_vps = 4,
+ .vp_name = { "vp1", "vp2", "vp3", "vp4" },
+ .ovr_name = { "ovr1", "ovr2", "ovr3", "ovr4" },
+ .vpclk_name = { "vp1", "vp2", "vp3", "vp4" },
+ /* Currently hard coded VP routing (see dispc_initial_config()) */
+ .vp_bus_type = { DISPC_VP_INTERNAL, DISPC_VP_DPI,
+ DISPC_VP_INTERNAL, DISPC_VP_DPI, },
+ .vp_feat = { .color = {
+ .has_ctm = true,
+ .gamma_size = 1024,
+ .gamma_type = TIDSS_GAMMA_10BIT,
+ },
+ },
+ .num_planes = 4,
+ .vid_name = { "vid1", "vidl1", "vid2", "vidl2" },
+ .vid_lite = { 0, 1, 0, 1, },
+ .vid_order = { 1, 3, 0, 2 },
+};
+
+static const u16 *dispc_common_regmap;
+
+struct dss_vp_data {
+ u32 *gamma_table;
+};
+
+struct dispc_device {
+ struct tidss_device *tidss;
+ struct device *dev;
+
+ void __iomem *base_common;
+ void __iomem *base_vid[TIDSS_MAX_PLANES];
+ void __iomem *base_ovr[TIDSS_MAX_PORTS];
+ void __iomem *base_vp[TIDSS_MAX_PORTS];
+
+ struct regmap *oldi_io_ctrl;
+
+ struct clk *vp_clk[TIDSS_MAX_PORTS];
+
+ const struct dispc_features *feat;
+
+ struct clk *fclk;
+
+ bool is_enabled;
+
+ struct dss_vp_data vp_data[TIDSS_MAX_PORTS];
+
+ u32 *fourccs;
+ u32 num_fourccs;
+
+ u32 memory_bandwidth_limit;
+};
+
+static void dispc_write(struct dispc_device *dispc, u16 reg, u32 val)
+{
+ iowrite32(val, dispc->base_common + reg);
+}
+
+static u32 dispc_read(struct dispc_device *dispc, u16 reg)
+{
+ return ioread32(dispc->base_common + reg);
+}
+
+static
+void dispc_vid_write(struct dispc_device *dispc, u32 hw_plane, u16 reg, u32 val)
+{
+ void __iomem *base = dispc->base_vid[hw_plane];
+
+ iowrite32(val, base + reg);
+}
+
+static u32 dispc_vid_read(struct dispc_device *dispc, u32 hw_plane, u16 reg)
+{
+ void __iomem *base = dispc->base_vid[hw_plane];
+
+ return ioread32(base + reg);
+}
+
+static void dispc_ovr_write(struct dispc_device *dispc, u32 hw_videoport,
+ u16 reg, u32 val)
+{
+ void __iomem *base = dispc->base_ovr[hw_videoport];
+
+ iowrite32(val, base + reg);
+}
+
+static u32 dispc_ovr_read(struct dispc_device *dispc, u32 hw_videoport, u16 reg)
+{
+ void __iomem *base = dispc->base_ovr[hw_videoport];
+
+ return ioread32(base + reg);
+}
+
+static void dispc_vp_write(struct dispc_device *dispc, u32 hw_videoport,
+ u16 reg, u32 val)
+{
+ void __iomem *base = dispc->base_vp[hw_videoport];
+
+ iowrite32(val, base + reg);
+}
+
+static u32 dispc_vp_read(struct dispc_device *dispc, u32 hw_videoport, u16 reg)
+{
+ void __iomem *base = dispc->base_vp[hw_videoport];
+
+ return ioread32(base + reg);
+}
+
+/*
+ * TRM gives bitfields as start:end, where start is the higher bit
+ * number. For example 7:0
+ */
+
+static u32 FLD_MASK(u32 start, u32 end)
+{
+ return ((1 << (start - end + 1)) - 1) << end;
+}
+
+static u32 FLD_VAL(u32 val, u32 start, u32 end)
+{
+ return (val << end) & FLD_MASK(start, end);
+}
+
+static u32 FLD_GET(u32 val, u32 start, u32 end)
+{
+ return (val & FLD_MASK(start, end)) >> end;
+}
+
+static u32 FLD_MOD(u32 orig, u32 val, u32 start, u32 end)
+{
+ return (orig & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end);
+}
+
+static u32 REG_GET(struct dispc_device *dispc, u32 idx, u32 start, u32 end)
+{
+ return FLD_GET(dispc_read(dispc, idx), start, end);
+}
+
+static void REG_FLD_MOD(struct dispc_device *dispc, u32 idx, u32 val,
+ u32 start, u32 end)
+{
+ dispc_write(dispc, idx, FLD_MOD(dispc_read(dispc, idx), val,
+ start, end));
+}
+
+static u32 VID_REG_GET(struct dispc_device *dispc, u32 hw_plane, u32 idx,
+ u32 start, u32 end)
+{
+ return FLD_GET(dispc_vid_read(dispc, hw_plane, idx), start, end);
+}
+
+static void VID_REG_FLD_MOD(struct dispc_device *dispc, u32 hw_plane, u32 idx,
+ u32 val, u32 start, u32 end)
+{
+ dispc_vid_write(dispc, hw_plane, idx,
+ FLD_MOD(dispc_vid_read(dispc, hw_plane, idx),
+ val, start, end));
+}
+
+static u32 VP_REG_GET(struct dispc_device *dispc, u32 vp, u32 idx,
+ u32 start, u32 end)
+{
+ return FLD_GET(dispc_vp_read(dispc, vp, idx), start, end);
+}
+
+static void VP_REG_FLD_MOD(struct dispc_device *dispc, u32 vp, u32 idx, u32 val,
+ u32 start, u32 end)
+{
+ dispc_vp_write(dispc, vp, idx, FLD_MOD(dispc_vp_read(dispc, vp, idx),
+ val, start, end));
+}
+
+__maybe_unused
+static u32 OVR_REG_GET(struct dispc_device *dispc, u32 ovr, u32 idx,
+ u32 start, u32 end)
+{
+ return FLD_GET(dispc_ovr_read(dispc, ovr, idx), start, end);
+}
+
+static void OVR_REG_FLD_MOD(struct dispc_device *dispc, u32 ovr, u32 idx,
+ u32 val, u32 start, u32 end)
+{
+ dispc_ovr_write(dispc, ovr, idx,
+ FLD_MOD(dispc_ovr_read(dispc, ovr, idx),
+ val, start, end));
+}
+
+static dispc_irq_t dispc_vp_irq_from_raw(u32 stat, u32 hw_videoport)
+{
+ dispc_irq_t vp_stat = 0;
+
+ if (stat & BIT(0))
+ vp_stat |= DSS_IRQ_VP_FRAME_DONE(hw_videoport);
+ if (stat & BIT(1))
+ vp_stat |= DSS_IRQ_VP_VSYNC_EVEN(hw_videoport);
+ if (stat & BIT(2))
+ vp_stat |= DSS_IRQ_VP_VSYNC_ODD(hw_videoport);
+ if (stat & BIT(4))
+ vp_stat |= DSS_IRQ_VP_SYNC_LOST(hw_videoport);
+
+ return vp_stat;
+}
+
+static u32 dispc_vp_irq_to_raw(dispc_irq_t vpstat, u32 hw_videoport)
+{
+ u32 stat = 0;
+
+ if (vpstat & DSS_IRQ_VP_FRAME_DONE(hw_videoport))
+ stat |= BIT(0);
+ if (vpstat & DSS_IRQ_VP_VSYNC_EVEN(hw_videoport))
+ stat |= BIT(1);
+ if (vpstat & DSS_IRQ_VP_VSYNC_ODD(hw_videoport))
+ stat |= BIT(2);
+ if (vpstat & DSS_IRQ_VP_SYNC_LOST(hw_videoport))
+ stat |= BIT(4);
+
+ return stat;
+}
+
+static dispc_irq_t dispc_vid_irq_from_raw(u32 stat, u32 hw_plane)
+{
+ dispc_irq_t vid_stat = 0;
+
+ if (stat & BIT(0))
+ vid_stat |= DSS_IRQ_PLANE_FIFO_UNDERFLOW(hw_plane);
+
+ return vid_stat;
+}
+
+static u32 dispc_vid_irq_to_raw(dispc_irq_t vidstat, u32 hw_plane)
+{
+ u32 stat = 0;
+
+ if (vidstat & DSS_IRQ_PLANE_FIFO_UNDERFLOW(hw_plane))
+ stat |= BIT(0);
+
+ return stat;
+}
+
+static dispc_irq_t dispc_k2g_vp_read_irqstatus(struct dispc_device *dispc,
+ u32 hw_videoport)
+{
+ u32 stat = dispc_vp_read(dispc, hw_videoport, DISPC_VP_K2G_IRQSTATUS);
+
+ return dispc_vp_irq_from_raw(stat, hw_videoport);
+}
+
+static void dispc_k2g_vp_write_irqstatus(struct dispc_device *dispc,
+ u32 hw_videoport, dispc_irq_t vpstat)
+{
+ u32 stat = dispc_vp_irq_to_raw(vpstat, hw_videoport);
+
+ dispc_vp_write(dispc, hw_videoport, DISPC_VP_K2G_IRQSTATUS, stat);
+}
+
+static dispc_irq_t dispc_k2g_vid_read_irqstatus(struct dispc_device *dispc,
+ u32 hw_plane)
+{
+ u32 stat = dispc_vid_read(dispc, hw_plane, DISPC_VID_K2G_IRQSTATUS);
+
+ return dispc_vid_irq_from_raw(stat, hw_plane);
+}
+
+static void dispc_k2g_vid_write_irqstatus(struct dispc_device *dispc,
+ u32 hw_plane, dispc_irq_t vidstat)
+{
+ u32 stat = dispc_vid_irq_to_raw(vidstat, hw_plane);
+
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_K2G_IRQSTATUS, stat);
+}
+
+static dispc_irq_t dispc_k2g_vp_read_irqenable(struct dispc_device *dispc,
+ u32 hw_videoport)
+{
+ u32 stat = dispc_vp_read(dispc, hw_videoport, DISPC_VP_K2G_IRQENABLE);
+
+ return dispc_vp_irq_from_raw(stat, hw_videoport);
+}
+
+static void dispc_k2g_vp_set_irqenable(struct dispc_device *dispc,
+ u32 hw_videoport, dispc_irq_t vpstat)
+{
+ u32 stat = dispc_vp_irq_to_raw(vpstat, hw_videoport);
+
+ dispc_vp_write(dispc, hw_videoport, DISPC_VP_K2G_IRQENABLE, stat);
+}
+
+static dispc_irq_t dispc_k2g_vid_read_irqenable(struct dispc_device *dispc,
+ u32 hw_plane)
+{
+ u32 stat = dispc_vid_read(dispc, hw_plane, DISPC_VID_K2G_IRQENABLE);
+
+ return dispc_vid_irq_from_raw(stat, hw_plane);
+}
+
+static void dispc_k2g_vid_set_irqenable(struct dispc_device *dispc,
+ u32 hw_plane, dispc_irq_t vidstat)
+{
+ u32 stat = dispc_vid_irq_to_raw(vidstat, hw_plane);
+
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_K2G_IRQENABLE, stat);
+}
+
+static void dispc_k2g_clear_irqstatus(struct dispc_device *dispc,
+ dispc_irq_t mask)
+{
+ dispc_k2g_vp_write_irqstatus(dispc, 0, mask);
+ dispc_k2g_vid_write_irqstatus(dispc, 0, mask);
+}
+
+static
+dispc_irq_t dispc_k2g_read_and_clear_irqstatus(struct dispc_device *dispc)
+{
+ dispc_irq_t stat = 0;
+
+ /* always clear the top level irqstatus */
+ dispc_write(dispc, DISPC_IRQSTATUS,
+ dispc_read(dispc, DISPC_IRQSTATUS));
+
+ stat |= dispc_k2g_vp_read_irqstatus(dispc, 0);
+ stat |= dispc_k2g_vid_read_irqstatus(dispc, 0);
+
+ dispc_k2g_clear_irqstatus(dispc, stat);
+
+ return stat;
+}
+
+static dispc_irq_t dispc_k2g_read_irqenable(struct dispc_device *dispc)
+{
+ dispc_irq_t stat = 0;
+
+ stat |= dispc_k2g_vp_read_irqenable(dispc, 0);
+ stat |= dispc_k2g_vid_read_irqenable(dispc, 0);
+
+ return stat;
+}
+
+static
+void dispc_k2g_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask)
+{
+ dispc_irq_t old_mask = dispc_k2g_read_irqenable(dispc);
+
+ /* clear the irqstatus for newly enabled irqs */
+ dispc_k2g_clear_irqstatus(dispc, (mask ^ old_mask) & mask);
+
+ dispc_k2g_vp_set_irqenable(dispc, 0, mask);
+ dispc_k2g_vid_set_irqenable(dispc, 0, mask);
+
+ dispc_write(dispc, DISPC_IRQENABLE_SET, (1 << 0) | (1 << 7));
+
+ /* flush posted write */
+ dispc_k2g_read_irqenable(dispc);
+}
+
+static dispc_irq_t dispc_k3_vp_read_irqstatus(struct dispc_device *dispc,
+ u32 hw_videoport)
+{
+ u32 stat = dispc_read(dispc, DISPC_VP_IRQSTATUS(hw_videoport));
+
+ return dispc_vp_irq_from_raw(stat, hw_videoport);
+}
+
+static void dispc_k3_vp_write_irqstatus(struct dispc_device *dispc,
+ u32 hw_videoport, dispc_irq_t vpstat)
+{
+ u32 stat = dispc_vp_irq_to_raw(vpstat, hw_videoport);
+
+ dispc_write(dispc, DISPC_VP_IRQSTATUS(hw_videoport), stat);
+}
+
+static dispc_irq_t dispc_k3_vid_read_irqstatus(struct dispc_device *dispc,
+ u32 hw_plane)
+{
+ u32 stat = dispc_read(dispc, DISPC_VID_IRQSTATUS(hw_plane));
+
+ return dispc_vid_irq_from_raw(stat, hw_plane);
+}
+
+static void dispc_k3_vid_write_irqstatus(struct dispc_device *dispc,
+ u32 hw_plane, dispc_irq_t vidstat)
+{
+ u32 stat = dispc_vid_irq_to_raw(vidstat, hw_plane);
+
+ dispc_write(dispc, DISPC_VID_IRQSTATUS(hw_plane), stat);
+}
+
+static dispc_irq_t dispc_k3_vp_read_irqenable(struct dispc_device *dispc,
+ u32 hw_videoport)
+{
+ u32 stat = dispc_read(dispc, DISPC_VP_IRQENABLE(hw_videoport));
+
+ return dispc_vp_irq_from_raw(stat, hw_videoport);
+}
+
+static void dispc_k3_vp_set_irqenable(struct dispc_device *dispc,
+ u32 hw_videoport, dispc_irq_t vpstat)
+{
+ u32 stat = dispc_vp_irq_to_raw(vpstat, hw_videoport);
+
+ dispc_write(dispc, DISPC_VP_IRQENABLE(hw_videoport), stat);
+}
+
+static dispc_irq_t dispc_k3_vid_read_irqenable(struct dispc_device *dispc,
+ u32 hw_plane)
+{
+ u32 stat = dispc_read(dispc, DISPC_VID_IRQENABLE(hw_plane));
+
+ return dispc_vid_irq_from_raw(stat, hw_plane);
+}
+
+static void dispc_k3_vid_set_irqenable(struct dispc_device *dispc,
+ u32 hw_plane, dispc_irq_t vidstat)
+{
+ u32 stat = dispc_vid_irq_to_raw(vidstat, hw_plane);
+
+ dispc_write(dispc, DISPC_VID_IRQENABLE(hw_plane), stat);
+}
+
+static
+void dispc_k3_clear_irqstatus(struct dispc_device *dispc, dispc_irq_t clearmask)
+{
+ unsigned int i;
+ u32 top_clear = 0;
+
+ for (i = 0; i < dispc->feat->num_vps; ++i) {
+ if (clearmask & DSS_IRQ_VP_MASK(i)) {
+ dispc_k3_vp_write_irqstatus(dispc, i, clearmask);
+ top_clear |= BIT(i);
+ }
+ }
+ for (i = 0; i < dispc->feat->num_planes; ++i) {
+ if (clearmask & DSS_IRQ_PLANE_MASK(i)) {
+ dispc_k3_vid_write_irqstatus(dispc, i, clearmask);
+ top_clear |= BIT(4 + i);
+ }
+ }
+ if (dispc->feat->subrev == DISPC_K2G)
+ return;
+
+ dispc_write(dispc, DISPC_IRQSTATUS, top_clear);
+
+ /* Flush posted writes */
+ dispc_read(dispc, DISPC_IRQSTATUS);
+}
+
+static
+dispc_irq_t dispc_k3_read_and_clear_irqstatus(struct dispc_device *dispc)
+{
+ dispc_irq_t status = 0;
+ unsigned int i;
+
+ for (i = 0; i < dispc->feat->num_vps; ++i)
+ status |= dispc_k3_vp_read_irqstatus(dispc, i);
+
+ for (i = 0; i < dispc->feat->num_planes; ++i)
+ status |= dispc_k3_vid_read_irqstatus(dispc, i);
+
+ dispc_k3_clear_irqstatus(dispc, status);
+
+ return status;
+}
+
+static dispc_irq_t dispc_k3_read_irqenable(struct dispc_device *dispc)
+{
+ dispc_irq_t enable = 0;
+ unsigned int i;
+
+ for (i = 0; i < dispc->feat->num_vps; ++i)
+ enable |= dispc_k3_vp_read_irqenable(dispc, i);
+
+ for (i = 0; i < dispc->feat->num_planes; ++i)
+ enable |= dispc_k3_vid_read_irqenable(dispc, i);
+
+ return enable;
+}
+
+static void dispc_k3_set_irqenable(struct dispc_device *dispc,
+ dispc_irq_t mask)
+{
+ unsigned int i;
+ u32 main_enable = 0, main_disable = 0;
+ dispc_irq_t old_mask;
+
+ old_mask = dispc_k3_read_irqenable(dispc);
+
+ /* clear the irqstatus for newly enabled irqs */
+ dispc_k3_clear_irqstatus(dispc, (old_mask ^ mask) & mask);
+
+ for (i = 0; i < dispc->feat->num_vps; ++i) {
+ dispc_k3_vp_set_irqenable(dispc, i, mask);
+ if (mask & DSS_IRQ_VP_MASK(i))
+ main_enable |= BIT(i); /* VP IRQ */
+ else
+ main_disable |= BIT(i); /* VP IRQ */
+ }
+
+ for (i = 0; i < dispc->feat->num_planes; ++i) {
+ dispc_k3_vid_set_irqenable(dispc, i, mask);
+ if (mask & DSS_IRQ_PLANE_MASK(i))
+ main_enable |= BIT(i + 4); /* VID IRQ */
+ else
+ main_disable |= BIT(i + 4); /* VID IRQ */
+ }
+
+ if (main_enable)
+ dispc_write(dispc, DISPC_IRQENABLE_SET, main_enable);
+
+ if (main_disable)
+ dispc_write(dispc, DISPC_IRQENABLE_CLR, main_disable);
+
+ /* Flush posted writes */
+ dispc_read(dispc, DISPC_IRQENABLE_SET);
+}
+
+dispc_irq_t dispc_read_and_clear_irqstatus(struct dispc_device *dispc)
+{
+ switch (dispc->feat->subrev) {
+ case DISPC_K2G:
+ return dispc_k2g_read_and_clear_irqstatus(dispc);
+ case DISPC_AM65X:
+ case DISPC_J721E:
+ return dispc_k3_read_and_clear_irqstatus(dispc);
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+}
+
+void dispc_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask)
+{
+ switch (dispc->feat->subrev) {
+ case DISPC_K2G:
+ dispc_k2g_set_irqenable(dispc, mask);
+ break;
+ case DISPC_AM65X:
+ case DISPC_J721E:
+ dispc_k3_set_irqenable(dispc, mask);
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+enum dispc_oldi_mode_reg_val { SPWG_18 = 0, JEIDA_24 = 1, SPWG_24 = 2 };
+
+struct dispc_bus_format {
+ u32 bus_fmt;
+ u32 data_width;
+ bool is_oldi_fmt;
+ enum dispc_oldi_mode_reg_val oldi_mode_reg_val;
+};
+
+static const struct dispc_bus_format dispc_bus_formats[] = {
+ { MEDIA_BUS_FMT_RGB444_1X12, 12, false, 0 },
+ { MEDIA_BUS_FMT_RGB565_1X16, 16, false, 0 },
+ { MEDIA_BUS_FMT_RGB666_1X18, 18, false, 0 },
+ { MEDIA_BUS_FMT_RGB888_1X24, 24, false, 0 },
+ { MEDIA_BUS_FMT_RGB101010_1X30, 30, false, 0 },
+ { MEDIA_BUS_FMT_RGB121212_1X36, 36, false, 0 },
+ { MEDIA_BUS_FMT_RGB666_1X7X3_SPWG, 18, true, SPWG_18 },
+ { MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, 24, true, SPWG_24 },
+ { MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA, 24, true, JEIDA_24 },
+};
+
+static const
+struct dispc_bus_format *dispc_vp_find_bus_fmt(struct dispc_device *dispc,
+ u32 hw_videoport,
+ u32 bus_fmt, u32 bus_flags)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(dispc_bus_formats); ++i) {
+ if (dispc_bus_formats[i].bus_fmt == bus_fmt)
+ return &dispc_bus_formats[i];
+ }
+
+ return NULL;
+}
+
+int dispc_vp_bus_check(struct dispc_device *dispc, u32 hw_videoport,
+ const struct drm_crtc_state *state)
+{
+ const struct tidss_crtc_state *tstate = to_tidss_crtc_state(state);
+ const struct dispc_bus_format *fmt;
+
+ fmt = dispc_vp_find_bus_fmt(dispc, hw_videoport, tstate->bus_format,
+ tstate->bus_flags);
+ if (!fmt) {
+ dev_dbg(dispc->dev, "%s: Unsupported bus format: %u\n",
+ __func__, tstate->bus_format);
+ return -EINVAL;
+ }
+
+ if (dispc->feat->vp_bus_type[hw_videoport] != DISPC_VP_OLDI &&
+ fmt->is_oldi_fmt) {
+ dev_dbg(dispc->dev, "%s: %s is not OLDI-port\n",
+ __func__, dispc->feat->vp_name[hw_videoport]);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void dispc_oldi_tx_power(struct dispc_device *dispc, bool power)
+{
+ u32 val = power ? 0 : OLDI_PWRDN_TX;
+
+ if (WARN_ON(!dispc->oldi_io_ctrl))
+ return;
+
+ regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT0_IO_CTRL,
+ OLDI_PWRDN_TX, val);
+ regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT1_IO_CTRL,
+ OLDI_PWRDN_TX, val);
+ regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT2_IO_CTRL,
+ OLDI_PWRDN_TX, val);
+ regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT3_IO_CTRL,
+ OLDI_PWRDN_TX, val);
+ regmap_update_bits(dispc->oldi_io_ctrl, OLDI_CLK_IO_CTRL,
+ OLDI_PWRDN_TX, val);
+}
+
+static void dispc_set_num_datalines(struct dispc_device *dispc,
+ u32 hw_videoport, int num_lines)
+{
+ int v;
+
+ switch (num_lines) {
+ case 12:
+ v = 0; break;
+ case 16:
+ v = 1; break;
+ case 18:
+ v = 2; break;
+ case 24:
+ v = 3; break;
+ case 30:
+ v = 4; break;
+ case 36:
+ v = 5; break;
+ default:
+ WARN_ON(1);
+ v = 3;
+ }
+
+ VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, v, 10, 8);
+}
+
+static void dispc_enable_oldi(struct dispc_device *dispc, u32 hw_videoport,
+ const struct dispc_bus_format *fmt)
+{
+ u32 oldi_cfg = 0;
+ u32 oldi_reset_bit = BIT(5 + hw_videoport);
+ int count = 0;
+
+ /*
+ * For the moment DUALMODESYNC, MASTERSLAVE, MODE, and SRC
+ * bits of DISPC_VP_DSS_OLDI_CFG are set statically to 0.
+ */
+
+ if (fmt->data_width == 24)
+ oldi_cfg |= BIT(8); /* MSB */
+ else if (fmt->data_width != 18)
+ dev_warn(dispc->dev, "%s: %d port width not supported\n",
+ __func__, fmt->data_width);
+
+ oldi_cfg |= BIT(7); /* DEPOL */
+
+ oldi_cfg = FLD_MOD(oldi_cfg, fmt->oldi_mode_reg_val, 3, 1);
+
+ oldi_cfg |= BIT(12); /* SOFTRST */
+
+ oldi_cfg |= BIT(0); /* ENABLE */
+
+ dispc_vp_write(dispc, hw_videoport, DISPC_VP_DSS_OLDI_CFG, oldi_cfg);
+
+ while (!(oldi_reset_bit & dispc_read(dispc, DSS_SYSSTATUS)) &&
+ count < 10000)
+ count++;
+
+ if (!(oldi_reset_bit & dispc_read(dispc, DSS_SYSSTATUS)))
+ dev_warn(dispc->dev, "%s: timeout waiting OLDI reset done\n",
+ __func__);
+}
+
+void dispc_vp_prepare(struct dispc_device *dispc, u32 hw_videoport,
+ const struct drm_crtc_state *state)
+{
+ const struct tidss_crtc_state *tstate = to_tidss_crtc_state(state);
+ const struct dispc_bus_format *fmt;
+
+ fmt = dispc_vp_find_bus_fmt(dispc, hw_videoport, tstate->bus_format,
+ tstate->bus_flags);
+
+ if (WARN_ON(!fmt))
+ return;
+
+ if (dispc->feat->vp_bus_type[hw_videoport] == DISPC_VP_OLDI) {
+ dispc_oldi_tx_power(dispc, true);
+
+ dispc_enable_oldi(dispc, hw_videoport, fmt);
+ }
+}
+
+void dispc_vp_enable(struct dispc_device *dispc, u32 hw_videoport,
+ const struct drm_crtc_state *state)
+{
+ const struct drm_display_mode *mode = &state->adjusted_mode;
+ const struct tidss_crtc_state *tstate = to_tidss_crtc_state(state);
+ bool align, onoff, rf, ieo, ipc, ihs, ivs;
+ const struct dispc_bus_format *fmt;
+ u32 hsw, hfp, hbp, vsw, vfp, vbp;
+
+ fmt = dispc_vp_find_bus_fmt(dispc, hw_videoport, tstate->bus_format,
+ tstate->bus_flags);
+
+ if (WARN_ON(!fmt))
+ return;
+
+ dispc_set_num_datalines(dispc, hw_videoport, fmt->data_width);
+
+ hfp = mode->hsync_start - mode->hdisplay;
+ hsw = mode->hsync_end - mode->hsync_start;
+ hbp = mode->htotal - mode->hsync_end;
+
+ vfp = mode->vsync_start - mode->vdisplay;
+ vsw = mode->vsync_end - mode->vsync_start;
+ vbp = mode->vtotal - mode->vsync_end;
+
+ dispc_vp_write(dispc, hw_videoport, DISPC_VP_TIMING_H,
+ FLD_VAL(hsw - 1, 7, 0) |
+ FLD_VAL(hfp - 1, 19, 8) |
+ FLD_VAL(hbp - 1, 31, 20));
+
+ dispc_vp_write(dispc, hw_videoport, DISPC_VP_TIMING_V,
+ FLD_VAL(vsw - 1, 7, 0) |
+ FLD_VAL(vfp, 19, 8) |
+ FLD_VAL(vbp, 31, 20));
+
+ ivs = !!(mode->flags & DRM_MODE_FLAG_NVSYNC);
+
+ ihs = !!(mode->flags & DRM_MODE_FLAG_NHSYNC);
+
+ ieo = !!(tstate->bus_flags & DRM_BUS_FLAG_DE_LOW);
+
+ ipc = !!(tstate->bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE);
+
+ /* always use the 'rf' setting */
+ onoff = true;
+
+ rf = !!(tstate->bus_flags & DRM_BUS_FLAG_SYNC_POSEDGE);
+
+ /* always use aligned syncs */
+ align = true;
+
+ /* always use DE_HIGH for OLDI */
+ if (dispc->feat->vp_bus_type[hw_videoport] == DISPC_VP_OLDI)
+ ieo = false;
+
+ dispc_vp_write(dispc, hw_videoport, DISPC_VP_POL_FREQ,
+ FLD_VAL(align, 18, 18) |
+ FLD_VAL(onoff, 17, 17) |
+ FLD_VAL(rf, 16, 16) |
+ FLD_VAL(ieo, 15, 15) |
+ FLD_VAL(ipc, 14, 14) |
+ FLD_VAL(ihs, 13, 13) |
+ FLD_VAL(ivs, 12, 12));
+
+ dispc_vp_write(dispc, hw_videoport, DISPC_VP_SIZE_SCREEN,
+ FLD_VAL(mode->hdisplay - 1, 11, 0) |
+ FLD_VAL(mode->vdisplay - 1, 27, 16));
+
+ VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 1, 0, 0);
+}
+
+void dispc_vp_disable(struct dispc_device *dispc, u32 hw_videoport)
+{
+ VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 0, 0, 0);
+}
+
+void dispc_vp_unprepare(struct dispc_device *dispc, u32 hw_videoport)
+{
+ if (dispc->feat->vp_bus_type[hw_videoport] == DISPC_VP_OLDI) {
+ dispc_vp_write(dispc, hw_videoport, DISPC_VP_DSS_OLDI_CFG, 0);
+
+ dispc_oldi_tx_power(dispc, false);
+ }
+}
+
+bool dispc_vp_go_busy(struct dispc_device *dispc, u32 hw_videoport)
+{
+ return VP_REG_GET(dispc, hw_videoport, DISPC_VP_CONTROL, 5, 5);
+}
+
+void dispc_vp_go(struct dispc_device *dispc, u32 hw_videoport)
+{
+ WARN_ON(VP_REG_GET(dispc, hw_videoport, DISPC_VP_CONTROL, 5, 5));
+ VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 1, 5, 5);
+}
+
+enum c8_to_c12_mode { C8_TO_C12_REPLICATE, C8_TO_C12_MAX, C8_TO_C12_MIN };
+
+static u16 c8_to_c12(u8 c8, enum c8_to_c12_mode mode)
+{
+ u16 c12;
+
+ c12 = c8 << 4;
+
+ switch (mode) {
+ case C8_TO_C12_REPLICATE:
+ /* Copy c8 4 MSB to 4 LSB for full scale c12 */
+ c12 |= c8 >> 4;
+ break;
+ case C8_TO_C12_MAX:
+ c12 |= 0xF;
+ break;
+ default:
+ case C8_TO_C12_MIN:
+ break;
+ }
+
+ return c12;
+}
+
+static u64 argb8888_to_argb12121212(u32 argb8888, enum c8_to_c12_mode m)
+{
+ u8 a, r, g, b;
+ u64 v;
+
+ a = (argb8888 >> 24) & 0xff;
+ r = (argb8888 >> 16) & 0xff;
+ g = (argb8888 >> 8) & 0xff;
+ b = (argb8888 >> 0) & 0xff;
+
+ v = ((u64)c8_to_c12(a, m) << 36) | ((u64)c8_to_c12(r, m) << 24) |
+ ((u64)c8_to_c12(g, m) << 12) | (u64)c8_to_c12(b, m);
+
+ return v;
+}
+
+static void dispc_vp_set_default_color(struct dispc_device *dispc,
+ u32 hw_videoport, u32 default_color)
+{
+ u64 v;
+
+ v = argb8888_to_argb12121212(default_color, C8_TO_C12_REPLICATE);
+
+ dispc_ovr_write(dispc, hw_videoport,
+ DISPC_OVR_DEFAULT_COLOR, v & 0xffffffff);
+ dispc_ovr_write(dispc, hw_videoport,
+ DISPC_OVR_DEFAULT_COLOR2, (v >> 32) & 0xffff);
+}
+
+enum drm_mode_status dispc_vp_mode_valid(struct dispc_device *dispc,
+ u32 hw_videoport,
+ const struct drm_display_mode *mode)
+{
+ u32 hsw, hfp, hbp, vsw, vfp, vbp;
+ enum dispc_vp_bus_type bus_type;
+ int max_pclk;
+
+ bus_type = dispc->feat->vp_bus_type[hw_videoport];
+
+ max_pclk = dispc->feat->max_pclk_khz[bus_type];
+
+ if (WARN_ON(max_pclk == 0))
+ return MODE_BAD;
+
+ if (mode->clock < dispc->feat->min_pclk_khz)
+ return MODE_CLOCK_LOW;
+
+ if (mode->clock > max_pclk)
+ return MODE_CLOCK_HIGH;
+
+ if (mode->hdisplay > 4096)
+ return MODE_BAD;
+
+ if (mode->vdisplay > 4096)
+ return MODE_BAD;
+
+ /* TODO: add interlace support */
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ return MODE_NO_INTERLACE;
+
+ /*
+ * Enforce the output width is divisible by 2. Actually this
+ * is only needed in following cases:
+ * - YUV output selected (BT656, BT1120)
+ * - Dithering enabled
+ * - TDM with TDMCycleFormat == 3
+ * But for simplicity we enforce that always.
+ */
+ if ((mode->hdisplay % 2) != 0)
+ return MODE_BAD_HVALUE;
+
+ hfp = mode->hsync_start - mode->hdisplay;
+ hsw = mode->hsync_end - mode->hsync_start;
+ hbp = mode->htotal - mode->hsync_end;
+
+ vfp = mode->vsync_start - mode->vdisplay;
+ vsw = mode->vsync_end - mode->vsync_start;
+ vbp = mode->vtotal - mode->vsync_end;
+
+ if (hsw < 1 || hsw > 256 ||
+ hfp < 1 || hfp > 4096 ||
+ hbp < 1 || hbp > 4096)
+ return MODE_BAD_HVALUE;
+
+ if (vsw < 1 || vsw > 256 ||
+ vfp > 4095 || vbp > 4095)
+ return MODE_BAD_VVALUE;
+
+ if (dispc->memory_bandwidth_limit) {
+ const unsigned int bpp = 4;
+ u64 bandwidth;
+
+ bandwidth = 1000 * mode->clock;
+ bandwidth = bandwidth * mode->hdisplay * mode->vdisplay * bpp;
+ bandwidth = div_u64(bandwidth, mode->htotal * mode->vtotal);
+
+ if (dispc->memory_bandwidth_limit < bandwidth)
+ return MODE_BAD;
+ }
+
+ return MODE_OK;
+}
+
+int dispc_vp_enable_clk(struct dispc_device *dispc, u32 hw_videoport)
+{
+ int ret = clk_prepare_enable(dispc->vp_clk[hw_videoport]);
+
+ if (ret)
+ dev_err(dispc->dev, "%s: enabling clk failed: %d\n", __func__,
+ ret);
+
+ return ret;
+}
+
+void dispc_vp_disable_clk(struct dispc_device *dispc, u32 hw_videoport)
+{
+ clk_disable_unprepare(dispc->vp_clk[hw_videoport]);
+}
+
+/*
+ * Calculate the percentage difference between the requested pixel clock rate
+ * and the effective rate resulting from calculating the clock divider value.
+ */
+static
+unsigned int dispc_pclk_diff(unsigned long rate, unsigned long real_rate)
+{
+ int r = rate / 100, rr = real_rate / 100;
+
+ return (unsigned int)(abs(((rr - r) * 100) / r));
+}
+
+int dispc_vp_set_clk_rate(struct dispc_device *dispc, u32 hw_videoport,
+ unsigned long rate)
+{
+ int r;
+ unsigned long new_rate;
+
+ r = clk_set_rate(dispc->vp_clk[hw_videoport], rate);
+ if (r) {
+ dev_err(dispc->dev, "vp%d: failed to set clk rate to %lu\n",
+ hw_videoport, rate);
+ return r;
+ }
+
+ new_rate = clk_get_rate(dispc->vp_clk[hw_videoport]);
+
+ if (dispc_pclk_diff(rate, new_rate) > 5)
+ dev_warn(dispc->dev,
+ "vp%d: Clock rate %lu differs over 5%% from requested %lu\n",
+ hw_videoport, new_rate, rate);
+
+ dev_dbg(dispc->dev, "vp%d: new rate %lu Hz (requested %lu Hz)\n",
+ hw_videoport, clk_get_rate(dispc->vp_clk[hw_videoport]), rate);
+
+ return 0;
+}
+
+/* OVR */
+static void dispc_k2g_ovr_set_plane(struct dispc_device *dispc,
+ u32 hw_plane, u32 hw_videoport,
+ u32 x, u32 y, u32 layer)
+{
+ /* On k2g there is only one plane and no need for ovr */
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_K2G_POSITION,
+ x | (y << 16));
+}
+
+static void dispc_am65x_ovr_set_plane(struct dispc_device *dispc,
+ u32 hw_plane, u32 hw_videoport,
+ u32 x, u32 y, u32 layer)
+{
+ OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
+ hw_plane, 4, 1);
+ OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
+ x, 17, 6);
+ OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
+ y, 30, 19);
+}
+
+static void dispc_j721e_ovr_set_plane(struct dispc_device *dispc,
+ u32 hw_plane, u32 hw_videoport,
+ u32 x, u32 y, u32 layer)
+{
+ OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
+ hw_plane, 4, 1);
+ OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES2(layer),
+ x, 13, 0);
+ OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES2(layer),
+ y, 29, 16);
+}
+
+void dispc_ovr_set_plane(struct dispc_device *dispc, u32 hw_plane,
+ u32 hw_videoport, u32 x, u32 y, u32 layer)
+{
+ switch (dispc->feat->subrev) {
+ case DISPC_K2G:
+ dispc_k2g_ovr_set_plane(dispc, hw_plane, hw_videoport,
+ x, y, layer);
+ break;
+ case DISPC_AM65X:
+ dispc_am65x_ovr_set_plane(dispc, hw_plane, hw_videoport,
+ x, y, layer);
+ break;
+ case DISPC_J721E:
+ dispc_j721e_ovr_set_plane(dispc, hw_plane, hw_videoport,
+ x, y, layer);
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+void dispc_ovr_enable_layer(struct dispc_device *dispc,
+ u32 hw_videoport, u32 layer, bool enable)
+{
+ if (dispc->feat->subrev == DISPC_K2G)
+ return;
+
+ OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
+ !!enable, 0, 0);
+}
+
+/* CSC */
+enum csc_ctm {
+ CSC_RR, CSC_RG, CSC_RB,
+ CSC_GR, CSC_GG, CSC_GB,
+ CSC_BR, CSC_BG, CSC_BB,
+};
+
+enum csc_yuv2rgb {
+ CSC_RY, CSC_RCB, CSC_RCR,
+ CSC_GY, CSC_GCB, CSC_GCR,
+ CSC_BY, CSC_BCB, CSC_BCR,
+};
+
+enum csc_rgb2yuv {
+ CSC_YR, CSC_YG, CSC_YB,
+ CSC_CBR, CSC_CBG, CSC_CBB,
+ CSC_CRR, CSC_CRG, CSC_CRB,
+};
+
+struct dispc_csc_coef {
+ void (*to_regval)(const struct dispc_csc_coef *csc, u32 *regval);
+ int m[9];
+ int preoffset[3];
+ int postoffset[3];
+ enum { CLIP_LIMITED_RANGE = 0, CLIP_FULL_RANGE = 1, } cliping;
+ const char *name;
+};
+
+#define DISPC_CSC_REGVAL_LEN 8
+
+static
+void dispc_csc_offset_regval(const struct dispc_csc_coef *csc, u32 *regval)
+{
+#define OVAL(x, y) (FLD_VAL(x, 15, 3) | FLD_VAL(y, 31, 19))
+ regval[5] = OVAL(csc->preoffset[0], csc->preoffset[1]);
+ regval[6] = OVAL(csc->preoffset[2], csc->postoffset[0]);
+ regval[7] = OVAL(csc->postoffset[1], csc->postoffset[2]);
+#undef OVAL
+}
+
+#define CVAL(x, y) (FLD_VAL(x, 10, 0) | FLD_VAL(y, 26, 16))
+static
+void dispc_csc_yuv2rgb_regval(const struct dispc_csc_coef *csc, u32 *regval)
+{
+ regval[0] = CVAL(csc->m[CSC_RY], csc->m[CSC_RCR]);
+ regval[1] = CVAL(csc->m[CSC_RCB], csc->m[CSC_GY]);
+ regval[2] = CVAL(csc->m[CSC_GCR], csc->m[CSC_GCB]);
+ regval[3] = CVAL(csc->m[CSC_BY], csc->m[CSC_BCR]);
+ regval[4] = CVAL(csc->m[CSC_BCB], 0);
+
+ dispc_csc_offset_regval(csc, regval);
+}
+
+__maybe_unused static
+void dispc_csc_rgb2yuv_regval(const struct dispc_csc_coef *csc, u32 *regval)
+{
+ regval[0] = CVAL(csc->m[CSC_YR], csc->m[CSC_YG]);
+ regval[1] = CVAL(csc->m[CSC_YB], csc->m[CSC_CRR]);
+ regval[2] = CVAL(csc->m[CSC_CRG], csc->m[CSC_CRB]);
+ regval[3] = CVAL(csc->m[CSC_CBR], csc->m[CSC_CBG]);
+ regval[4] = CVAL(csc->m[CSC_CBB], 0);
+
+ dispc_csc_offset_regval(csc, regval);
+}
+
+static void dispc_csc_cpr_regval(const struct dispc_csc_coef *csc,
+ u32 *regval)
+{
+ regval[0] = CVAL(csc->m[CSC_RR], csc->m[CSC_RG]);
+ regval[1] = CVAL(csc->m[CSC_RB], csc->m[CSC_GR]);
+ regval[2] = CVAL(csc->m[CSC_GG], csc->m[CSC_GB]);
+ regval[3] = CVAL(csc->m[CSC_BR], csc->m[CSC_BG]);
+ regval[4] = CVAL(csc->m[CSC_BB], 0);
+
+ dispc_csc_offset_regval(csc, regval);
+}
+
+#undef CVAL
+
+static void dispc_k2g_vid_write_csc(struct dispc_device *dispc, u32 hw_plane,
+ const struct dispc_csc_coef *csc)
+{
+ static const u16 dispc_vid_csc_coef_reg[] = {
+ DISPC_VID_CSC_COEF(0), DISPC_VID_CSC_COEF(1),
+ DISPC_VID_CSC_COEF(2), DISPC_VID_CSC_COEF(3),
+ DISPC_VID_CSC_COEF(4), DISPC_VID_CSC_COEF(5),
+ DISPC_VID_CSC_COEF(6), /* K2G has no post offset support */
+ };
+ u32 regval[DISPC_CSC_REGVAL_LEN];
+ unsigned int i;
+
+ csc->to_regval(csc, regval);
+
+ if (regval[7] != 0)
+ dev_warn(dispc->dev, "%s: No post offset support for %s\n",
+ __func__, csc->name);
+
+ for (i = 0; i < ARRAY_SIZE(dispc_vid_csc_coef_reg); i++)
+ dispc_vid_write(dispc, hw_plane, dispc_vid_csc_coef_reg[i],
+ regval[i]);
+}
+
+static void dispc_k3_vid_write_csc(struct dispc_device *dispc, u32 hw_plane,
+ const struct dispc_csc_coef *csc)
+{
+ static const u16 dispc_vid_csc_coef_reg[DISPC_CSC_REGVAL_LEN] = {
+ DISPC_VID_CSC_COEF(0), DISPC_VID_CSC_COEF(1),
+ DISPC_VID_CSC_COEF(2), DISPC_VID_CSC_COEF(3),
+ DISPC_VID_CSC_COEF(4), DISPC_VID_CSC_COEF(5),
+ DISPC_VID_CSC_COEF(6), DISPC_VID_CSC_COEF7,
+ };
+ u32 regval[DISPC_CSC_REGVAL_LEN];
+ unsigned int i;
+
+ csc->to_regval(csc, regval);
+
+ for (i = 0; i < ARRAY_SIZE(dispc_vid_csc_coef_reg); i++)
+ dispc_vid_write(dispc, hw_plane, dispc_vid_csc_coef_reg[i],
+ regval[i]);
+}
+
+/* YUV -> RGB, ITU-R BT.601, full range */
+static const struct dispc_csc_coef csc_yuv2rgb_bt601_full = {
+ dispc_csc_yuv2rgb_regval,
+ { 256, 0, 358, /* ry, rcb, rcr |1.000 0.000 1.402|*/
+ 256, -88, -182, /* gy, gcb, gcr |1.000 -0.344 -0.714|*/
+ 256, 452, 0, }, /* by, bcb, bcr |1.000 1.772 0.000|*/
+ { 0, -2048, -2048, }, /* full range */
+ { 0, 0, 0, },
+ CLIP_FULL_RANGE,
+ "BT.601 Full",
+};
+
+/* YUV -> RGB, ITU-R BT.601, limited range */
+static const struct dispc_csc_coef csc_yuv2rgb_bt601_lim = {
+ dispc_csc_yuv2rgb_regval,
+ { 298, 0, 409, /* ry, rcb, rcr |1.164 0.000 1.596|*/
+ 298, -100, -208, /* gy, gcb, gcr |1.164 -0.392 -0.813|*/
+ 298, 516, 0, }, /* by, bcb, bcr |1.164 2.017 0.000|*/
+ { -256, -2048, -2048, }, /* limited range */
+ { 0, 0, 0, },
+ CLIP_FULL_RANGE,
+ "BT.601 Limited",
+};
+
+/* YUV -> RGB, ITU-R BT.709, full range */
+static const struct dispc_csc_coef csc_yuv2rgb_bt709_full = {
+ dispc_csc_yuv2rgb_regval,
+ { 256, 0, 402, /* ry, rcb, rcr |1.000 0.000 1.570|*/
+ 256, -48, -120, /* gy, gcb, gcr |1.000 -0.187 -0.467|*/
+ 256, 475, 0, }, /* by, bcb, bcr |1.000 1.856 0.000|*/
+ { 0, -2048, -2048, }, /* full range */
+ { 0, 0, 0, },
+ CLIP_FULL_RANGE,
+ "BT.709 Full",
+};
+
+/* YUV -> RGB, ITU-R BT.709, limited range */
+static const struct dispc_csc_coef csc_yuv2rgb_bt709_lim = {
+ dispc_csc_yuv2rgb_regval,
+ { 298, 0, 459, /* ry, rcb, rcr |1.164 0.000 1.793|*/
+ 298, -55, -136, /* gy, gcb, gcr |1.164 -0.213 -0.533|*/
+ 298, 541, 0, }, /* by, bcb, bcr |1.164 2.112 0.000|*/
+ { -256, -2048, -2048, }, /* limited range */
+ { 0, 0, 0, },
+ CLIP_FULL_RANGE,
+ "BT.709 Limited",
+};
+
+static const struct {
+ enum drm_color_encoding encoding;
+ enum drm_color_range range;
+ const struct dispc_csc_coef *csc;
+} dispc_csc_table[] = {
+ { DRM_COLOR_YCBCR_BT601, DRM_COLOR_YCBCR_FULL_RANGE,
+ &csc_yuv2rgb_bt601_full, },
+ { DRM_COLOR_YCBCR_BT601, DRM_COLOR_YCBCR_LIMITED_RANGE,
+ &csc_yuv2rgb_bt601_lim, },
+ { DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_FULL_RANGE,
+ &csc_yuv2rgb_bt709_full, },
+ { DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE,
+ &csc_yuv2rgb_bt709_lim, },
+};
+
+static const
+struct dispc_csc_coef *dispc_find_csc(enum drm_color_encoding encoding,
+ enum drm_color_range range)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(dispc_csc_table); i++) {
+ if (dispc_csc_table[i].encoding == encoding &&
+ dispc_csc_table[i].range == range) {
+ return dispc_csc_table[i].csc;
+ }
+ }
+ return NULL;
+}
+
+static void dispc_vid_csc_setup(struct dispc_device *dispc, u32 hw_plane,
+ const struct drm_plane_state *state)
+{
+ const struct dispc_csc_coef *coef;
+
+ coef = dispc_find_csc(state->color_encoding, state->color_range);
+ if (!coef) {
+ dev_err(dispc->dev, "%s: CSC (%u,%u) not found\n",
+ __func__, state->color_encoding, state->color_range);
+ return;
+ }
+
+ if (dispc->feat->subrev == DISPC_K2G)
+ dispc_k2g_vid_write_csc(dispc, hw_plane, coef);
+ else
+ dispc_k3_vid_write_csc(dispc, hw_plane, coef);
+}
+
+static void dispc_vid_csc_enable(struct dispc_device *dispc, u32 hw_plane,
+ bool enable)
+{
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, !!enable, 9, 9);
+}
+
+/* SCALER */
+
+static u32 dispc_calc_fir_inc(u32 in, u32 out)
+{
+ return (u32)div_u64(0x200000ull * in, out);
+}
+
+enum dispc_vid_fir_coef_set {
+ DISPC_VID_FIR_COEF_HORIZ,
+ DISPC_VID_FIR_COEF_HORIZ_UV,
+ DISPC_VID_FIR_COEF_VERT,
+ DISPC_VID_FIR_COEF_VERT_UV,
+};
+
+static void dispc_vid_write_fir_coefs(struct dispc_device *dispc,
+ u32 hw_plane,
+ enum dispc_vid_fir_coef_set coef_set,
+ const struct tidss_scale_coefs *coefs)
+{
+ static const u16 c0_regs[] = {
+ [DISPC_VID_FIR_COEF_HORIZ] = DISPC_VID_FIR_COEFS_H0,
+ [DISPC_VID_FIR_COEF_HORIZ_UV] = DISPC_VID_FIR_COEFS_H0_C,
+ [DISPC_VID_FIR_COEF_VERT] = DISPC_VID_FIR_COEFS_V0,
+ [DISPC_VID_FIR_COEF_VERT_UV] = DISPC_VID_FIR_COEFS_V0_C,
+ };
+
+ static const u16 c12_regs[] = {
+ [DISPC_VID_FIR_COEF_HORIZ] = DISPC_VID_FIR_COEFS_H12,
+ [DISPC_VID_FIR_COEF_HORIZ_UV] = DISPC_VID_FIR_COEFS_H12_C,
+ [DISPC_VID_FIR_COEF_VERT] = DISPC_VID_FIR_COEFS_V12,
+ [DISPC_VID_FIR_COEF_VERT_UV] = DISPC_VID_FIR_COEFS_V12_C,
+ };
+
+ const u16 c0_base = c0_regs[coef_set];
+ const u16 c12_base = c12_regs[coef_set];
+ int phase;
+
+ if (!coefs) {
+ dev_err(dispc->dev, "%s: No coefficients given.\n", __func__);
+ return;
+ }
+
+ for (phase = 0; phase <= 8; ++phase) {
+ u16 reg = c0_base + phase * 4;
+ u16 c0 = coefs->c0[phase];
+
+ dispc_vid_write(dispc, hw_plane, reg, c0);
+ }
+
+ for (phase = 0; phase <= 15; ++phase) {
+ u16 reg = c12_base + phase * 4;
+ s16 c1, c2;
+ u32 c12;
+
+ c1 = coefs->c1[phase];
+ c2 = coefs->c2[phase];
+ c12 = FLD_VAL(c1, 19, 10) | FLD_VAL(c2, 29, 20);
+
+ dispc_vid_write(dispc, hw_plane, reg, c12);
+ }
+}
+
+static bool dispc_fourcc_is_yuv(u32 fourcc)
+{
+ switch (fourcc) {
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_NV12:
+ return true;
+ default:
+ return false;
+ }
+}
+
+struct dispc_scaling_params {
+ int xinc, yinc;
+ u32 in_w, in_h, in_w_uv, in_h_uv;
+ u32 fir_xinc, fir_yinc, fir_xinc_uv, fir_yinc_uv;
+ bool scale_x, scale_y;
+ const struct tidss_scale_coefs *xcoef, *ycoef, *xcoef_uv, *ycoef_uv;
+ bool five_taps;
+};
+
+static int dispc_vid_calc_scaling(struct dispc_device *dispc,
+ const struct drm_plane_state *state,
+ struct dispc_scaling_params *sp,
+ bool lite_plane)
+{
+ const struct dispc_features_scaling *f = &dispc->feat->scaling;
+ u32 fourcc = state->fb->format->format;
+ u32 in_width_max_5tap = f->in_width_max_5tap_rgb;
+ u32 in_width_max_3tap = f->in_width_max_3tap_rgb;
+ u32 downscale_limit;
+ u32 in_width_max;
+
+ memset(sp, 0, sizeof(*sp));
+ sp->xinc = 1;
+ sp->yinc = 1;
+ sp->in_w = state->src_w >> 16;
+ sp->in_w_uv = sp->in_w;
+ sp->in_h = state->src_h >> 16;
+ sp->in_h_uv = sp->in_h;
+
+ sp->scale_x = sp->in_w != state->crtc_w;
+ sp->scale_y = sp->in_h != state->crtc_h;
+
+ if (dispc_fourcc_is_yuv(fourcc)) {
+ in_width_max_5tap = f->in_width_max_5tap_yuv;
+ in_width_max_3tap = f->in_width_max_3tap_yuv;
+
+ sp->in_w_uv >>= 1;
+ sp->scale_x = true;
+
+ if (fourcc == DRM_FORMAT_NV12) {
+ sp->in_h_uv >>= 1;
+ sp->scale_y = true;
+ }
+ }
+
+ /* Skip the rest if no scaling is used */
+ if ((!sp->scale_x && !sp->scale_y) || lite_plane)
+ return 0;
+
+ if (sp->in_w > in_width_max_5tap) {
+ sp->five_taps = false;
+ in_width_max = in_width_max_3tap;
+ downscale_limit = f->downscale_limit_3tap;
+ } else {
+ sp->five_taps = true;
+ in_width_max = in_width_max_5tap;
+ downscale_limit = f->downscale_limit_5tap;
+ }
+
+ if (sp->scale_x) {
+ sp->fir_xinc = dispc_calc_fir_inc(sp->in_w, state->crtc_w);
+
+ if (sp->fir_xinc < dispc_calc_fir_inc(1, f->upscale_limit)) {
+ dev_dbg(dispc->dev,
+ "%s: X-scaling factor %u/%u > %u\n",
+ __func__, state->crtc_w, state->src_w >> 16,
+ f->upscale_limit);
+ return -EINVAL;
+ }
+
+ if (sp->fir_xinc >= dispc_calc_fir_inc(downscale_limit, 1)) {
+ sp->xinc = DIV_ROUND_UP(DIV_ROUND_UP(sp->in_w,
+ state->crtc_w),
+ downscale_limit);
+
+ if (sp->xinc > f->xinc_max) {
+ dev_dbg(dispc->dev,
+ "%s: X-scaling factor %u/%u < 1/%u\n",
+ __func__, state->crtc_w,
+ state->src_w >> 16,
+ downscale_limit * f->xinc_max);
+ return -EINVAL;
+ }
+
+ sp->in_w = (state->src_w >> 16) / sp->xinc;
+ }
+
+ while (sp->in_w > in_width_max) {
+ sp->xinc++;
+ sp->in_w = (state->src_w >> 16) / sp->xinc;
+ }
+
+ if (sp->xinc > f->xinc_max) {
+ dev_dbg(dispc->dev,
+ "%s: Too wide input buffer %u > %u\n", __func__,
+ state->src_w >> 16, in_width_max * f->xinc_max);
+ return -EINVAL;
+ }
+
+ /*
+ * We need even line length for YUV formats. Decimation
+ * can lead to odd length, so we need to make it even
+ * again.
+ */
+ if (dispc_fourcc_is_yuv(fourcc))
+ sp->in_w &= ~1;
+
+ sp->fir_xinc = dispc_calc_fir_inc(sp->in_w, state->crtc_w);
+ }
+
+ if (sp->scale_y) {
+ sp->fir_yinc = dispc_calc_fir_inc(sp->in_h, state->crtc_h);
+
+ if (sp->fir_yinc < dispc_calc_fir_inc(1, f->upscale_limit)) {
+ dev_dbg(dispc->dev,
+ "%s: Y-scaling factor %u/%u > %u\n",
+ __func__, state->crtc_h, state->src_h >> 16,
+ f->upscale_limit);
+ return -EINVAL;
+ }
+
+ if (sp->fir_yinc >= dispc_calc_fir_inc(downscale_limit, 1)) {
+ sp->yinc = DIV_ROUND_UP(DIV_ROUND_UP(sp->in_h,
+ state->crtc_h),
+ downscale_limit);
+
+ sp->in_h /= sp->yinc;
+ sp->fir_yinc = dispc_calc_fir_inc(sp->in_h,
+ state->crtc_h);
+ }
+ }
+
+ dev_dbg(dispc->dev,
+ "%s: %ux%u decim %ux%u -> %ux%u firinc %u.%03ux%u.%03u taps %u -> %ux%u\n",
+ __func__, state->src_w >> 16, state->src_h >> 16,
+ sp->xinc, sp->yinc, sp->in_w, sp->in_h,
+ sp->fir_xinc / 0x200000u,
+ ((sp->fir_xinc & 0x1FFFFFu) * 999u) / 0x1FFFFFu,
+ sp->fir_yinc / 0x200000u,
+ ((sp->fir_yinc & 0x1FFFFFu) * 999u) / 0x1FFFFFu,
+ sp->five_taps ? 5 : 3,
+ state->crtc_w, state->crtc_h);
+
+ if (dispc_fourcc_is_yuv(fourcc)) {
+ if (sp->scale_x) {
+ sp->in_w_uv /= sp->xinc;
+ sp->fir_xinc_uv = dispc_calc_fir_inc(sp->in_w_uv,
+ state->crtc_w);
+ sp->xcoef_uv = tidss_get_scale_coefs(dispc->dev,
+ sp->fir_xinc_uv,
+ true);
+ }
+ if (sp->scale_y) {
+ sp->in_h_uv /= sp->yinc;
+ sp->fir_yinc_uv = dispc_calc_fir_inc(sp->in_h_uv,
+ state->crtc_h);
+ sp->ycoef_uv = tidss_get_scale_coefs(dispc->dev,
+ sp->fir_yinc_uv,
+ sp->five_taps);
+ }
+ }
+
+ if (sp->scale_x)
+ sp->xcoef = tidss_get_scale_coefs(dispc->dev, sp->fir_xinc,
+ true);
+
+ if (sp->scale_y)
+ sp->ycoef = tidss_get_scale_coefs(dispc->dev, sp->fir_yinc,
+ sp->five_taps);
+
+ return 0;
+}
+
+static void dispc_vid_set_scaling(struct dispc_device *dispc,
+ u32 hw_plane,
+ struct dispc_scaling_params *sp,
+ u32 fourcc)
+{
+ /* HORIZONTAL RESIZE ENABLE */
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES,
+ sp->scale_x, 7, 7);
+
+ /* VERTICAL RESIZE ENABLE */
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES,
+ sp->scale_y, 8, 8);
+
+ /* Skip the rest if no scaling is used */
+ if (!sp->scale_x && !sp->scale_y)
+ return;
+
+ /* VERTICAL 5-TAPS */
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES,
+ sp->five_taps, 21, 21);
+
+ if (dispc_fourcc_is_yuv(fourcc)) {
+ if (sp->scale_x) {
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_FIRH2,
+ sp->fir_xinc_uv);
+ dispc_vid_write_fir_coefs(dispc, hw_plane,
+ DISPC_VID_FIR_COEF_HORIZ_UV,
+ sp->xcoef_uv);
+ }
+ if (sp->scale_y) {
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_FIRV2,
+ sp->fir_yinc_uv);
+ dispc_vid_write_fir_coefs(dispc, hw_plane,
+ DISPC_VID_FIR_COEF_VERT_UV,
+ sp->ycoef_uv);
+ }
+ }
+
+ if (sp->scale_x) {
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_FIRH, sp->fir_xinc);
+ dispc_vid_write_fir_coefs(dispc, hw_plane,
+ DISPC_VID_FIR_COEF_HORIZ,
+ sp->xcoef);
+ }
+
+ if (sp->scale_y) {
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_FIRV, sp->fir_yinc);
+ dispc_vid_write_fir_coefs(dispc, hw_plane,
+ DISPC_VID_FIR_COEF_VERT, sp->ycoef);
+ }
+}
+
+/* OTHER */
+
+static const struct {
+ u32 fourcc;
+ u8 dss_code;
+} dispc_color_formats[] = {
+ { DRM_FORMAT_ARGB4444, 0x0, },
+ { DRM_FORMAT_ABGR4444, 0x1, },
+ { DRM_FORMAT_RGBA4444, 0x2, },
+
+ { DRM_FORMAT_RGB565, 0x3, },
+ { DRM_FORMAT_BGR565, 0x4, },
+
+ { DRM_FORMAT_ARGB1555, 0x5, },
+ { DRM_FORMAT_ABGR1555, 0x6, },
+
+ { DRM_FORMAT_ARGB8888, 0x7, },
+ { DRM_FORMAT_ABGR8888, 0x8, },
+ { DRM_FORMAT_RGBA8888, 0x9, },
+ { DRM_FORMAT_BGRA8888, 0xa, },
+
+ { DRM_FORMAT_RGB888, 0xb, },
+ { DRM_FORMAT_BGR888, 0xc, },
+
+ { DRM_FORMAT_ARGB2101010, 0xe, },
+ { DRM_FORMAT_ABGR2101010, 0xf, },
+
+ { DRM_FORMAT_XRGB4444, 0x20, },
+ { DRM_FORMAT_XBGR4444, 0x21, },
+ { DRM_FORMAT_RGBX4444, 0x22, },
+
+ { DRM_FORMAT_ARGB1555, 0x25, },
+ { DRM_FORMAT_ABGR1555, 0x26, },
+
+ { DRM_FORMAT_XRGB8888, 0x27, },
+ { DRM_FORMAT_XBGR8888, 0x28, },
+ { DRM_FORMAT_RGBX8888, 0x29, },
+ { DRM_FORMAT_BGRX8888, 0x2a, },
+
+ { DRM_FORMAT_XRGB2101010, 0x2e, },
+ { DRM_FORMAT_XBGR2101010, 0x2f, },
+
+ { DRM_FORMAT_YUYV, 0x3e, },
+ { DRM_FORMAT_UYVY, 0x3f, },
+
+ { DRM_FORMAT_NV12, 0x3d, },
+};
+
+static void dispc_plane_set_pixel_format(struct dispc_device *dispc,
+ u32 hw_plane, u32 fourcc)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(dispc_color_formats); ++i) {
+ if (dispc_color_formats[i].fourcc == fourcc) {
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES,
+ dispc_color_formats[i].dss_code,
+ 6, 1);
+ return;
+ }
+ }
+
+ WARN_ON(1);
+}
+
+const u32 *dispc_plane_formats(struct dispc_device *dispc, unsigned int *len)
+{
+ WARN_ON(!dispc->fourccs);
+
+ *len = dispc->num_fourccs;
+
+ return dispc->fourccs;
+}
+
+static s32 pixinc(int pixels, u8 ps)
+{
+ if (pixels == 1)
+ return 1;
+ else if (pixels > 1)
+ return 1 + (pixels - 1) * ps;
+ else if (pixels < 0)
+ return 1 - (-pixels + 1) * ps;
+
+ WARN_ON(1);
+ return 0;
+}
+
+int dispc_plane_check(struct dispc_device *dispc, u32 hw_plane,
+ const struct drm_plane_state *state,
+ u32 hw_videoport)
+{
+ bool lite = dispc->feat->vid_lite[hw_plane];
+ u32 fourcc = state->fb->format->format;
+ bool need_scaling = state->src_w >> 16 != state->crtc_w ||
+ state->src_h >> 16 != state->crtc_h;
+ struct dispc_scaling_params scaling;
+ int ret;
+
+ if (dispc_fourcc_is_yuv(fourcc)) {
+ if (!dispc_find_csc(state->color_encoding,
+ state->color_range)) {
+ dev_dbg(dispc->dev,
+ "%s: Unsupported CSC (%u,%u) for HW plane %u\n",
+ __func__, state->color_encoding,
+ state->color_range, hw_plane);
+ return -EINVAL;
+ }
+ }
+
+ if (need_scaling) {
+ if (lite) {
+ dev_dbg(dispc->dev,
+ "%s: Lite plane %u can't scale %ux%u!=%ux%u\n",
+ __func__, hw_plane,
+ state->src_w >> 16, state->src_h >> 16,
+ state->crtc_w, state->crtc_h);
+ return -EINVAL;
+ }
+ ret = dispc_vid_calc_scaling(dispc, state, &scaling, false);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static
+dma_addr_t dispc_plane_state_paddr(const struct drm_plane_state *state)
+{
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_gem_cma_object *gem;
+ u32 x = state->src_x >> 16;
+ u32 y = state->src_y >> 16;
+
+ gem = drm_fb_cma_get_gem_obj(state->fb, 0);
+
+ return gem->paddr + fb->offsets[0] + x * fb->format->cpp[0] +
+ y * fb->pitches[0];
+}
+
+static
+dma_addr_t dispc_plane_state_p_uv_addr(const struct drm_plane_state *state)
+{
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_gem_cma_object *gem;
+ u32 x = state->src_x >> 16;
+ u32 y = state->src_y >> 16;
+
+ if (WARN_ON(state->fb->format->num_planes != 2))
+ return 0;
+
+ gem = drm_fb_cma_get_gem_obj(fb, 1);
+
+ return gem->paddr + fb->offsets[1] +
+ (x * fb->format->cpp[1] / fb->format->hsub) +
+ (y * fb->pitches[1] / fb->format->vsub);
+}
+
+int dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane,
+ const struct drm_plane_state *state,
+ u32 hw_videoport)
+{
+ bool lite = dispc->feat->vid_lite[hw_plane];
+ u32 fourcc = state->fb->format->format;
+ u16 cpp = state->fb->format->cpp[0];
+ u32 fb_width = state->fb->pitches[0] / cpp;
+ dma_addr_t paddr = dispc_plane_state_paddr(state);
+ struct dispc_scaling_params scale;
+
+ dispc_vid_calc_scaling(dispc, state, &scale, lite);
+
+ dispc_plane_set_pixel_format(dispc, hw_plane, fourcc);
+
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_0, paddr & 0xffffffff);
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_EXT_0, (u64)paddr >> 32);
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_1, paddr & 0xffffffff);
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_EXT_1, (u64)paddr >> 32);
+
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_PICTURE_SIZE,
+ (scale.in_w - 1) | ((scale.in_h - 1) << 16));
+
+ /* For YUV422 format we use the macropixel size for pixel inc */
+ if (fourcc == DRM_FORMAT_YUYV || fourcc == DRM_FORMAT_UYVY)
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_PIXEL_INC,
+ pixinc(scale.xinc, cpp * 2));
+ else
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_PIXEL_INC,
+ pixinc(scale.xinc, cpp));
+
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_ROW_INC,
+ pixinc(1 + (scale.yinc * fb_width -
+ scale.xinc * scale.in_w),
+ cpp));
+
+ if (state->fb->format->num_planes == 2) {
+ u16 cpp_uv = state->fb->format->cpp[1];
+ u32 fb_width_uv = state->fb->pitches[1] / cpp_uv;
+ dma_addr_t p_uv_addr = dispc_plane_state_p_uv_addr(state);
+
+ dispc_vid_write(dispc, hw_plane,
+ DISPC_VID_BA_UV_0, p_uv_addr & 0xffffffff);
+ dispc_vid_write(dispc, hw_plane,
+ DISPC_VID_BA_UV_EXT_0, (u64)p_uv_addr >> 32);
+ dispc_vid_write(dispc, hw_plane,
+ DISPC_VID_BA_UV_1, p_uv_addr & 0xffffffff);
+ dispc_vid_write(dispc, hw_plane,
+ DISPC_VID_BA_UV_EXT_1, (u64)p_uv_addr >> 32);
+
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_ROW_INC_UV,
+ pixinc(1 + (scale.yinc * fb_width_uv -
+ scale.xinc * scale.in_w_uv),
+ cpp_uv));
+ }
+
+ if (!lite) {
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_SIZE,
+ (state->crtc_w - 1) |
+ ((state->crtc_h - 1) << 16));
+
+ dispc_vid_set_scaling(dispc, hw_plane, &scale, fourcc);
+ }
+
+ /* enable YUV->RGB color conversion */
+ if (dispc_fourcc_is_yuv(fourcc)) {
+ dispc_vid_csc_setup(dispc, hw_plane, state);
+ dispc_vid_csc_enable(dispc, hw_plane, true);
+ } else {
+ dispc_vid_csc_enable(dispc, hw_plane, false);
+ }
+
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_GLOBAL_ALPHA,
+ 0xFF & (state->alpha >> 8));
+
+ if (state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI)
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 1,
+ 28, 28);
+ else
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 0,
+ 28, 28);
+
+ return 0;
+}
+
+int dispc_plane_enable(struct dispc_device *dispc, u32 hw_plane, bool enable)
+{
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, !!enable, 0, 0);
+
+ return 0;
+}
+
+static u32 dispc_vid_get_fifo_size(struct dispc_device *dispc, u32 hw_plane)
+{
+ return VID_REG_GET(dispc, hw_plane, DISPC_VID_BUF_SIZE_STATUS, 15, 0);
+}
+
+static void dispc_vid_set_mflag_threshold(struct dispc_device *dispc,
+ u32 hw_plane, u32 low, u32 high)
+{
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_MFLAG_THRESHOLD,
+ FLD_VAL(high, 31, 16) | FLD_VAL(low, 15, 0));
+}
+
+static void dispc_vid_set_buf_threshold(struct dispc_device *dispc,
+ u32 hw_plane, u32 low, u32 high)
+{
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_BUF_THRESHOLD,
+ FLD_VAL(high, 31, 16) | FLD_VAL(low, 15, 0));
+}
+
+static void dispc_k2g_plane_init(struct dispc_device *dispc)
+{
+ unsigned int hw_plane;
+
+ dev_dbg(dispc->dev, "%s()\n", __func__);
+
+ /* MFLAG_CTRL = ENABLED */
+ REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 2, 1, 0);
+ /* MFLAG_START = MFLAGNORMALSTARTMODE */
+ REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0, 6, 6);
+
+ for (hw_plane = 0; hw_plane < dispc->feat->num_planes; hw_plane++) {
+ u32 size = dispc_vid_get_fifo_size(dispc, hw_plane);
+ u32 thr_low, thr_high;
+ u32 mflag_low, mflag_high;
+ u32 preload;
+
+ thr_high = size - 1;
+ thr_low = size / 2;
+
+ mflag_high = size * 2 / 3;
+ mflag_low = size / 3;
+
+ preload = thr_low;
+
+ dev_dbg(dispc->dev,
+ "%s: bufsize %u, buf_threshold %u/%u, mflag threshold %u/%u preload %u\n",
+ dispc->feat->vid_name[hw_plane],
+ size,
+ thr_high, thr_low,
+ mflag_high, mflag_low,
+ preload);
+
+ dispc_vid_set_buf_threshold(dispc, hw_plane,
+ thr_low, thr_high);
+ dispc_vid_set_mflag_threshold(dispc, hw_plane,
+ mflag_low, mflag_high);
+
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_PRELOAD, preload);
+
+ /*
+ * Prefetch up to fifo high-threshold value to minimize the
+ * possibility of underflows. Note that this means the PRELOAD
+ * register is ignored.
+ */
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 1,
+ 19, 19);
+ }
+}
+
+static void dispc_k3_plane_init(struct dispc_device *dispc)
+{
+ unsigned int hw_plane;
+ u32 cba_lo_pri = 1;
+ u32 cba_hi_pri = 0;
+
+ dev_dbg(dispc->dev, "%s()\n", __func__);
+
+ REG_FLD_MOD(dispc, DSS_CBA_CFG, cba_lo_pri, 2, 0);
+ REG_FLD_MOD(dispc, DSS_CBA_CFG, cba_hi_pri, 5, 3);
+
+ /* MFLAG_CTRL = ENABLED */
+ REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 2, 1, 0);
+ /* MFLAG_START = MFLAGNORMALSTARTMODE */
+ REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0, 6, 6);
+
+ for (hw_plane = 0; hw_plane < dispc->feat->num_planes; hw_plane++) {
+ u32 size = dispc_vid_get_fifo_size(dispc, hw_plane);
+ u32 thr_low, thr_high;
+ u32 mflag_low, mflag_high;
+ u32 preload;
+
+ thr_high = size - 1;
+ thr_low = size / 2;
+
+ mflag_high = size * 2 / 3;
+ mflag_low = size / 3;
+
+ preload = thr_low;
+
+ dev_dbg(dispc->dev,
+ "%s: bufsize %u, buf_threshold %u/%u, mflag threshold %u/%u preload %u\n",
+ dispc->feat->vid_name[hw_plane],
+ size,
+ thr_high, thr_low,
+ mflag_high, mflag_low,
+ preload);
+
+ dispc_vid_set_buf_threshold(dispc, hw_plane,
+ thr_low, thr_high);
+ dispc_vid_set_mflag_threshold(dispc, hw_plane,
+ mflag_low, mflag_high);
+
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_PRELOAD, preload);
+
+ /* Prefech up to PRELOAD value */
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 0,
+ 19, 19);
+ }
+}
+
+static void dispc_plane_init(struct dispc_device *dispc)
+{
+ switch (dispc->feat->subrev) {
+ case DISPC_K2G:
+ dispc_k2g_plane_init(dispc);
+ break;
+ case DISPC_AM65X:
+ case DISPC_J721E:
+ dispc_k3_plane_init(dispc);
+ break;
+ default:
+ WARN_ON(1);
+ }
+}
+
+static void dispc_vp_init(struct dispc_device *dispc)
+{
+ unsigned int i;
+
+ dev_dbg(dispc->dev, "%s()\n", __func__);
+
+ /* Enable the gamma Shadow bit-field for all VPs*/
+ for (i = 0; i < dispc->feat->num_vps; i++)
+ VP_REG_FLD_MOD(dispc, i, DISPC_VP_CONFIG, 1, 2, 2);
+}
+
+static void dispc_initial_config(struct dispc_device *dispc)
+{
+ dispc_plane_init(dispc);
+ dispc_vp_init(dispc);
+
+ /* Note: Hardcoded DPI routing on J721E for now */
+ if (dispc->feat->subrev == DISPC_J721E) {
+ dispc_write(dispc, DISPC_CONNECTIONS,
+ FLD_VAL(2, 3, 0) | /* VP1 to DPI0 */
+ FLD_VAL(8, 7, 4) /* VP3 to DPI1 */
+ );
+ }
+}
+
+static void dispc_k2g_vp_write_gamma_table(struct dispc_device *dispc,
+ u32 hw_videoport)
+{
+ u32 *table = dispc->vp_data[hw_videoport].gamma_table;
+ u32 hwlen = dispc->feat->vp_feat.color.gamma_size;
+ unsigned int i;
+
+ dev_dbg(dispc->dev, "%s: hw_videoport %d\n", __func__, hw_videoport);
+
+ if (WARN_ON(dispc->feat->vp_feat.color.gamma_type != TIDSS_GAMMA_8BIT))
+ return;
+
+ for (i = 0; i < hwlen; ++i) {
+ u32 v = table[i];
+
+ v |= i << 24;
+
+ dispc_vp_write(dispc, hw_videoport, DISPC_VP_K2G_GAMMA_TABLE,
+ v);
+ }
+}
+
+static void dispc_am65x_vp_write_gamma_table(struct dispc_device *dispc,
+ u32 hw_videoport)
+{
+ u32 *table = dispc->vp_data[hw_videoport].gamma_table;
+ u32 hwlen = dispc->feat->vp_feat.color.gamma_size;
+ unsigned int i;
+
+ dev_dbg(dispc->dev, "%s: hw_videoport %d\n", __func__, hw_videoport);
+
+ if (WARN_ON(dispc->feat->vp_feat.color.gamma_type != TIDSS_GAMMA_8BIT))
+ return;
+
+ for (i = 0; i < hwlen; ++i) {
+ u32 v = table[i];
+
+ v |= i << 24;
+
+ dispc_vp_write(dispc, hw_videoport, DISPC_VP_GAMMA_TABLE, v);
+ }
+}
+
+static void dispc_j721e_vp_write_gamma_table(struct dispc_device *dispc,
+ u32 hw_videoport)
+{
+ u32 *table = dispc->vp_data[hw_videoport].gamma_table;
+ u32 hwlen = dispc->feat->vp_feat.color.gamma_size;
+ unsigned int i;
+
+ dev_dbg(dispc->dev, "%s: hw_videoport %d\n", __func__, hw_videoport);
+
+ if (WARN_ON(dispc->feat->vp_feat.color.gamma_type != TIDSS_GAMMA_10BIT))
+ return;
+
+ for (i = 0; i < hwlen; ++i) {
+ u32 v = table[i];
+
+ if (i == 0)
+ v |= 1 << 31;
+
+ dispc_vp_write(dispc, hw_videoport, DISPC_VP_GAMMA_TABLE, v);
+ }
+}
+
+static void dispc_vp_write_gamma_table(struct dispc_device *dispc,
+ u32 hw_videoport)
+{
+ switch (dispc->feat->subrev) {
+ case DISPC_K2G:
+ dispc_k2g_vp_write_gamma_table(dispc, hw_videoport);
+ break;
+ case DISPC_AM65X:
+ dispc_am65x_vp_write_gamma_table(dispc, hw_videoport);
+ break;
+ case DISPC_J721E:
+ dispc_j721e_vp_write_gamma_table(dispc, hw_videoport);
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+static const struct drm_color_lut dispc_vp_gamma_default_lut[] = {
+ { .red = 0, .green = 0, .blue = 0, },
+ { .red = U16_MAX, .green = U16_MAX, .blue = U16_MAX, },
+};
+
+static void dispc_vp_set_gamma(struct dispc_device *dispc,
+ u32 hw_videoport,
+ const struct drm_color_lut *lut,
+ unsigned int length)
+{
+ u32 *table = dispc->vp_data[hw_videoport].gamma_table;
+ u32 hwlen = dispc->feat->vp_feat.color.gamma_size;
+ u32 hwbits;
+ unsigned int i;
+
+ dev_dbg(dispc->dev, "%s: hw_videoport %d, lut len %u, hw len %u\n",
+ __func__, hw_videoport, length, hwlen);
+
+ if (dispc->feat->vp_feat.color.gamma_type == TIDSS_GAMMA_10BIT)
+ hwbits = 10;
+ else
+ hwbits = 8;
+
+ if (!lut || length < 2) {
+ lut = dispc_vp_gamma_default_lut;
+ length = ARRAY_SIZE(dispc_vp_gamma_default_lut);
+ }
+
+ for (i = 0; i < length - 1; ++i) {
+ unsigned int first = i * (hwlen - 1) / (length - 1);
+ unsigned int last = (i + 1) * (hwlen - 1) / (length - 1);
+ unsigned int w = last - first;
+ u16 r, g, b;
+ unsigned int j;
+
+ if (w == 0)
+ continue;
+
+ for (j = 0; j <= w; j++) {
+ r = (lut[i].red * (w - j) + lut[i + 1].red * j) / w;
+ g = (lut[i].green * (w - j) + lut[i + 1].green * j) / w;
+ b = (lut[i].blue * (w - j) + lut[i + 1].blue * j) / w;
+
+ r >>= 16 - hwbits;
+ g >>= 16 - hwbits;
+ b >>= 16 - hwbits;
+
+ table[first + j] = (r << (hwbits * 2)) |
+ (g << hwbits) | b;
+ }
+ }
+
+ dispc_vp_write_gamma_table(dispc, hw_videoport);
+}
+
+static s16 dispc_S31_32_to_s2_8(s64 coef)
+{
+ u64 sign_bit = 1ULL << 63;
+ u64 cbits = (u64)coef;
+ s16 ret;
+
+ if (cbits & sign_bit)
+ ret = -clamp_val(((cbits & ~sign_bit) >> 24), 0, 0x200);
+ else
+ ret = clamp_val(((cbits & ~sign_bit) >> 24), 0, 0x1FF);
+
+ return ret;
+}
+
+static void dispc_k2g_cpr_from_ctm(const struct drm_color_ctm *ctm,
+ struct dispc_csc_coef *cpr)
+{
+ memset(cpr, 0, sizeof(*cpr));
+
+ cpr->to_regval = dispc_csc_cpr_regval;
+ cpr->m[CSC_RR] = dispc_S31_32_to_s2_8(ctm->matrix[0]);
+ cpr->m[CSC_RG] = dispc_S31_32_to_s2_8(ctm->matrix[1]);
+ cpr->m[CSC_RB] = dispc_S31_32_to_s2_8(ctm->matrix[2]);
+ cpr->m[CSC_GR] = dispc_S31_32_to_s2_8(ctm->matrix[3]);
+ cpr->m[CSC_GG] = dispc_S31_32_to_s2_8(ctm->matrix[4]);
+ cpr->m[CSC_GB] = dispc_S31_32_to_s2_8(ctm->matrix[5]);
+ cpr->m[CSC_BR] = dispc_S31_32_to_s2_8(ctm->matrix[6]);
+ cpr->m[CSC_BG] = dispc_S31_32_to_s2_8(ctm->matrix[7]);
+ cpr->m[CSC_BB] = dispc_S31_32_to_s2_8(ctm->matrix[8]);
+}
+
+#define CVAL(xR, xG, xB) (FLD_VAL(xR, 9, 0) | FLD_VAL(xG, 20, 11) | \
+ FLD_VAL(xB, 31, 22))
+
+static void dispc_k2g_vp_csc_cpr_regval(const struct dispc_csc_coef *csc,
+ u32 *regval)
+{
+ regval[0] = CVAL(csc->m[CSC_BB], csc->m[CSC_BG], csc->m[CSC_BR]);
+ regval[1] = CVAL(csc->m[CSC_GB], csc->m[CSC_GG], csc->m[CSC_GR]);
+ regval[2] = CVAL(csc->m[CSC_RB], csc->m[CSC_RG], csc->m[CSC_RR]);
+}
+
+#undef CVAL
+
+static void dispc_k2g_vp_write_csc(struct dispc_device *dispc, u32 hw_videoport,
+ const struct dispc_csc_coef *csc)
+{
+ static const u16 dispc_vp_cpr_coef_reg[] = {
+ DISPC_VP_CSC_COEF0, DISPC_VP_CSC_COEF1, DISPC_VP_CSC_COEF2,
+ /* K2G CPR is packed to three registers. */
+ };
+ u32 regval[DISPC_CSC_REGVAL_LEN];
+ unsigned int i;
+
+ dispc_k2g_vp_csc_cpr_regval(csc, regval);
+
+ for (i = 0; i < ARRAY_SIZE(dispc_vp_cpr_coef_reg); i++)
+ dispc_vp_write(dispc, hw_videoport, dispc_vp_cpr_coef_reg[i],
+ regval[i]);
+}
+
+static void dispc_k2g_vp_set_ctm(struct dispc_device *dispc, u32 hw_videoport,
+ struct drm_color_ctm *ctm)
+{
+ u32 cprenable = 0;
+
+ if (ctm) {
+ struct dispc_csc_coef cpr;
+
+ dispc_k2g_cpr_from_ctm(ctm, &cpr);
+ dispc_k2g_vp_write_csc(dispc, hw_videoport, &cpr);
+ cprenable = 1;
+ }
+
+ VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONFIG,
+ cprenable, 15, 15);
+}
+
+static s16 dispc_S31_32_to_s3_8(s64 coef)
+{
+ u64 sign_bit = 1ULL << 63;
+ u64 cbits = (u64)coef;
+ s16 ret;
+
+ if (cbits & sign_bit)
+ ret = -clamp_val(((cbits & ~sign_bit) >> 24), 0, 0x400);
+ else
+ ret = clamp_val(((cbits & ~sign_bit) >> 24), 0, 0x3FF);
+
+ return ret;
+}
+
+static void dispc_csc_from_ctm(const struct drm_color_ctm *ctm,
+ struct dispc_csc_coef *cpr)
+{
+ memset(cpr, 0, sizeof(*cpr));
+
+ cpr->to_regval = dispc_csc_cpr_regval;
+ cpr->m[CSC_RR] = dispc_S31_32_to_s3_8(ctm->matrix[0]);
+ cpr->m[CSC_RG] = dispc_S31_32_to_s3_8(ctm->matrix[1]);
+ cpr->m[CSC_RB] = dispc_S31_32_to_s3_8(ctm->matrix[2]);
+ cpr->m[CSC_GR] = dispc_S31_32_to_s3_8(ctm->matrix[3]);
+ cpr->m[CSC_GG] = dispc_S31_32_to_s3_8(ctm->matrix[4]);
+ cpr->m[CSC_GB] = dispc_S31_32_to_s3_8(ctm->matrix[5]);
+ cpr->m[CSC_BR] = dispc_S31_32_to_s3_8(ctm->matrix[6]);
+ cpr->m[CSC_BG] = dispc_S31_32_to_s3_8(ctm->matrix[7]);
+ cpr->m[CSC_BB] = dispc_S31_32_to_s3_8(ctm->matrix[8]);
+}
+
+static void dispc_k3_vp_write_csc(struct dispc_device *dispc, u32 hw_videoport,
+ const struct dispc_csc_coef *csc)
+{
+ static const u16 dispc_vp_csc_coef_reg[DISPC_CSC_REGVAL_LEN] = {
+ DISPC_VP_CSC_COEF0, DISPC_VP_CSC_COEF1, DISPC_VP_CSC_COEF2,
+ DISPC_VP_CSC_COEF3, DISPC_VP_CSC_COEF4, DISPC_VP_CSC_COEF5,
+ DISPC_VP_CSC_COEF6, DISPC_VP_CSC_COEF7,
+ };
+ u32 regval[DISPC_CSC_REGVAL_LEN];
+ unsigned int i;
+
+ csc->to_regval(csc, regval);
+
+ for (i = 0; i < ARRAY_SIZE(regval); i++)
+ dispc_vp_write(dispc, hw_videoport, dispc_vp_csc_coef_reg[i],
+ regval[i]);
+}
+
+static void dispc_k3_vp_set_ctm(struct dispc_device *dispc, u32 hw_videoport,
+ struct drm_color_ctm *ctm)
+{
+ u32 colorconvenable = 0;
+
+ if (ctm) {
+ struct dispc_csc_coef csc;
+
+ dispc_csc_from_ctm(ctm, &csc);
+ dispc_k3_vp_write_csc(dispc, hw_videoport, &csc);
+ colorconvenable = 1;
+ }
+
+ VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONFIG,
+ colorconvenable, 24, 24);
+}
+
+static void dispc_vp_set_color_mgmt(struct dispc_device *dispc,
+ u32 hw_videoport,
+ const struct drm_crtc_state *state,
+ bool newmodeset)
+{
+ struct drm_color_lut *lut = NULL;
+ struct drm_color_ctm *ctm = NULL;
+ unsigned int length = 0;
+
+ if (!(state->color_mgmt_changed || newmodeset))
+ return;
+
+ if (state->gamma_lut) {
+ lut = (struct drm_color_lut *)state->gamma_lut->data;
+ length = state->gamma_lut->length / sizeof(*lut);
+ }
+
+ dispc_vp_set_gamma(dispc, hw_videoport, lut, length);
+
+ if (state->ctm)
+ ctm = (struct drm_color_ctm *)state->ctm->data;
+
+ if (dispc->feat->subrev == DISPC_K2G)
+ dispc_k2g_vp_set_ctm(dispc, hw_videoport, ctm);
+ else
+ dispc_k3_vp_set_ctm(dispc, hw_videoport, ctm);
+}
+
+void dispc_vp_setup(struct dispc_device *dispc, u32 hw_videoport,
+ const struct drm_crtc_state *state, bool newmodeset)
+{
+ dispc_vp_set_default_color(dispc, hw_videoport, 0);
+ dispc_vp_set_color_mgmt(dispc, hw_videoport, state, newmodeset);
+}
+
+int dispc_runtime_suspend(struct dispc_device *dispc)
+{
+ dev_dbg(dispc->dev, "suspend\n");
+
+ dispc->is_enabled = false;
+
+ clk_disable_unprepare(dispc->fclk);
+
+ return 0;
+}
+
+int dispc_runtime_resume(struct dispc_device *dispc)
+{
+ dev_dbg(dispc->dev, "resume\n");
+
+ clk_prepare_enable(dispc->fclk);
+
+ if (REG_GET(dispc, DSS_SYSSTATUS, 0, 0) == 0)
+ dev_warn(dispc->dev, "DSS FUNC RESET not done!\n");
+
+ dev_dbg(dispc->dev, "OMAP DSS7 rev 0x%x\n",
+ dispc_read(dispc, DSS_REVISION));
+
+ dev_dbg(dispc->dev, "VP RESETDONE %d,%d,%d\n",
+ REG_GET(dispc, DSS_SYSSTATUS, 1, 1),
+ REG_GET(dispc, DSS_SYSSTATUS, 2, 2),
+ REG_GET(dispc, DSS_SYSSTATUS, 3, 3));
+
+ if (dispc->feat->subrev == DISPC_AM65X)
+ dev_dbg(dispc->dev, "OLDI RESETDONE %d,%d,%d\n",
+ REG_GET(dispc, DSS_SYSSTATUS, 5, 5),
+ REG_GET(dispc, DSS_SYSSTATUS, 6, 6),
+ REG_GET(dispc, DSS_SYSSTATUS, 7, 7));
+
+ dev_dbg(dispc->dev, "DISPC IDLE %d\n",
+ REG_GET(dispc, DSS_SYSSTATUS, 9, 9));
+
+ dispc_initial_config(dispc);
+
+ dispc->is_enabled = true;
+
+ tidss_irq_resume(dispc->tidss);
+
+ return 0;
+}
+
+void dispc_remove(struct tidss_device *tidss)
+{
+ dev_dbg(tidss->dev, "%s\n", __func__);
+
+ tidss->dispc = NULL;
+}
+
+static int dispc_iomap_resource(struct platform_device *pdev, const char *name,
+ void __iomem **base)
+{
+ struct resource *res;
+ void __iomem *b;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+ if (!res) {
+ dev_err(&pdev->dev, "cannot get mem resource '%s'\n", name);
+ return -EINVAL;
+ }
+
+ b = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(b)) {
+ dev_err(&pdev->dev, "cannot ioremap resource '%s'\n", name);
+ return PTR_ERR(b);
+ }
+
+ *base = b;
+
+ return 0;
+}
+
+static int dispc_init_am65x_oldi_io_ctrl(struct device *dev,
+ struct dispc_device *dispc)
+{
+ dispc->oldi_io_ctrl =
+ syscon_regmap_lookup_by_phandle(dev->of_node,
+ "ti,am65x-oldi-io-ctrl");
+ if (PTR_ERR(dispc->oldi_io_ctrl) == -ENODEV) {
+ dispc->oldi_io_ctrl = NULL;
+ } else if (IS_ERR(dispc->oldi_io_ctrl)) {
+ dev_err(dev, "%s: syscon_regmap_lookup_by_phandle failed %ld\n",
+ __func__, PTR_ERR(dispc->oldi_io_ctrl));
+ return PTR_ERR(dispc->oldi_io_ctrl);
+ }
+ return 0;
+}
+
+int dispc_init(struct tidss_device *tidss)
+{
+ struct device *dev = tidss->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dispc_device *dispc;
+ const struct dispc_features *feat;
+ unsigned int i, num_fourccs;
+ int r = 0;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ feat = tidss->feat;
+
+ if (feat->subrev != DISPC_K2G) {
+ r = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (r)
+ dev_warn(dev, "cannot set DMA masks to 48-bit\n");
+ }
+
+ dispc = devm_kzalloc(dev, sizeof(*dispc), GFP_KERNEL);
+ if (!dispc)
+ return -ENOMEM;
+
+ dispc->fourccs = devm_kcalloc(dev, ARRAY_SIZE(dispc_color_formats),
+ sizeof(*dispc->fourccs), GFP_KERNEL);
+ if (!dispc->fourccs)
+ return -ENOMEM;
+
+ num_fourccs = 0;
+ for (i = 0; i < ARRAY_SIZE(dispc_color_formats); ++i) {
+ if (feat->errata.i2000 &&
+ dispc_fourcc_is_yuv(dispc_color_formats[i].fourcc))
+ continue;
+ dispc->fourccs[num_fourccs++] = dispc_color_formats[i].fourcc;
+ }
+ dispc->num_fourccs = num_fourccs;
+ dispc->tidss = tidss;
+ dispc->dev = dev;
+ dispc->feat = feat;
+
+ dispc_common_regmap = dispc->feat->common_regs;
+
+ r = dispc_iomap_resource(pdev, dispc->feat->common,
+ &dispc->base_common);
+ if (r)
+ return r;
+
+ for (i = 0; i < dispc->feat->num_planes; i++) {
+ r = dispc_iomap_resource(pdev, dispc->feat->vid_name[i],
+ &dispc->base_vid[i]);
+ if (r)
+ return r;
+ }
+
+ for (i = 0; i < dispc->feat->num_vps; i++) {
+ u32 gamma_size = dispc->feat->vp_feat.color.gamma_size;
+ u32 *gamma_table;
+ struct clk *clk;
+
+ r = dispc_iomap_resource(pdev, dispc->feat->ovr_name[i],
+ &dispc->base_ovr[i]);
+ if (r)
+ return r;
+
+ r = dispc_iomap_resource(pdev, dispc->feat->vp_name[i],
+ &dispc->base_vp[i]);
+ if (r)
+ return r;
+
+ clk = devm_clk_get(dev, dispc->feat->vpclk_name[i]);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "%s: Failed to get clk %s:%ld\n", __func__,
+ dispc->feat->vpclk_name[i], PTR_ERR(clk));
+ return PTR_ERR(clk);
+ }
+ dispc->vp_clk[i] = clk;
+
+ gamma_table = devm_kmalloc_array(dev, gamma_size,
+ sizeof(*gamma_table),
+ GFP_KERNEL);
+ if (!gamma_table)
+ return -ENOMEM;
+ dispc->vp_data[i].gamma_table = gamma_table;
+ }
+
+ if (feat->subrev == DISPC_AM65X) {
+ r = dispc_init_am65x_oldi_io_ctrl(dev, dispc);
+ if (r)
+ return r;
+ }
+
+ dispc->fclk = devm_clk_get(dev, "fck");
+ if (IS_ERR(dispc->fclk)) {
+ dev_err(dev, "%s: Failed to get fclk: %ld\n",
+ __func__, PTR_ERR(dispc->fclk));
+ return PTR_ERR(dispc->fclk);
+ }
+ dev_dbg(dev, "DSS fclk %lu Hz\n", clk_get_rate(dispc->fclk));
+
+ of_property_read_u32(dispc->dev->of_node, "max-memory-bandwidth",
+ &dispc->memory_bandwidth_limit);
+
+ tidss->dispc = dispc;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.h b/drivers/gpu/drm/tidss/tidss_dispc.h
new file mode 100644
index 000000000000..a4a68249e44b
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_dispc.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <[email protected]>
+ */
+
+#ifndef __TIDSS_DISPC_H__
+#define __TIDSS_DISPC_H__
+
+#include "tidss_drv.h"
+
+struct dispc_device;
+
+struct drm_crtc_state;
+
+enum tidss_gamma_type { TIDSS_GAMMA_8BIT, TIDSS_GAMMA_10BIT };
+
+struct tidss_vp_feat {
+ struct tidss_vp_color_feat {
+ u32 gamma_size;
+ enum tidss_gamma_type gamma_type;
+ bool has_ctm;
+ } color;
+};
+
+struct tidss_plane_feat {
+ struct tidss_plane_color_feat {
+ u32 encodings;
+ u32 ranges;
+ enum drm_color_encoding default_encoding;
+ enum drm_color_range default_range;
+ } color;
+ struct tidss_plane_blend_feat {
+ bool global_alpha;
+ } blend;
+};
+
+struct dispc_features_scaling {
+ u32 in_width_max_5tap_rgb;
+ u32 in_width_max_3tap_rgb;
+ u32 in_width_max_5tap_yuv;
+ u32 in_width_max_3tap_yuv;
+ u32 upscale_limit;
+ u32 downscale_limit_5tap;
+ u32 downscale_limit_3tap;
+ u32 xinc_max;
+};
+
+struct dispc_errata {
+ bool i2000; /* DSS Does Not Support YUV Pixel Data Formats */
+};
+
+enum dispc_vp_bus_type {
+ DISPC_VP_DPI, /* DPI output */
+ DISPC_VP_OLDI, /* OLDI (LVDS) output */
+ DISPC_VP_INTERNAL, /* SoC internal routing */
+ DISPC_VP_MAX_BUS_TYPE,
+};
+
+enum dispc_dss_subrevision {
+ DISPC_K2G,
+ DISPC_AM65X,
+ DISPC_J721E,
+};
+
+struct dispc_features {
+ int min_pclk_khz;
+ int max_pclk_khz[DISPC_VP_MAX_BUS_TYPE];
+
+ struct dispc_features_scaling scaling;
+
+ enum dispc_dss_subrevision subrev;
+
+ const char *common;
+ const u16 *common_regs;
+ u32 num_vps;
+ const char *vp_name[TIDSS_MAX_PORTS]; /* Should match dt reg names */
+ const char *ovr_name[TIDSS_MAX_PORTS]; /* Should match dt reg names */
+ const char *vpclk_name[TIDSS_MAX_PORTS]; /* Should match dt clk names */
+ const enum dispc_vp_bus_type vp_bus_type[TIDSS_MAX_PORTS];
+ struct tidss_vp_feat vp_feat;
+ u32 num_planes;
+ const char *vid_name[TIDSS_MAX_PLANES]; /* Should match dt reg names */
+ bool vid_lite[TIDSS_MAX_PLANES];
+ u32 vid_order[TIDSS_MAX_PLANES];
+
+ struct dispc_errata errata;
+};
+
+extern const struct dispc_features dispc_k2g_feats;
+extern const struct dispc_features dispc_am65x_feats;
+extern const struct dispc_features dispc_j721e_feats;
+
+void dispc_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask);
+dispc_irq_t dispc_read_and_clear_irqstatus(struct dispc_device *dispc);
+
+void dispc_ovr_set_plane(struct dispc_device *dispc, u32 hw_plane,
+ u32 hw_videoport, u32 x, u32 y, u32 layer);
+void dispc_ovr_enable_layer(struct dispc_device *dispc,
+ u32 hw_videoport, u32 layer, bool enable);
+
+void dispc_vp_prepare(struct dispc_device *dispc, u32 hw_videoport,
+ const struct drm_crtc_state *state);
+void dispc_vp_enable(struct dispc_device *dispc, u32 hw_videoport,
+ const struct drm_crtc_state *state);
+void dispc_vp_disable(struct dispc_device *dispc, u32 hw_videoport);
+void dispc_vp_unprepare(struct dispc_device *dispc, u32 hw_videoport);
+bool dispc_vp_go_busy(struct dispc_device *dispc, u32 hw_videoport);
+void dispc_vp_go(struct dispc_device *dispc, u32 hw_videoport);
+int dispc_vp_bus_check(struct dispc_device *dispc, u32 hw_videoport,
+ const struct drm_crtc_state *state);
+enum drm_mode_status dispc_vp_mode_valid(struct dispc_device *dispc,
+ u32 hw_videoport,
+ const struct drm_display_mode *mode);
+int dispc_vp_enable_clk(struct dispc_device *dispc, u32 hw_videoport);
+void dispc_vp_disable_clk(struct dispc_device *dispc, u32 hw_videoport);
+int dispc_vp_set_clk_rate(struct dispc_device *dispc, u32 hw_videoport,
+ unsigned long rate);
+void dispc_vp_setup(struct dispc_device *dispc, u32 hw_videoport,
+ const struct drm_crtc_state *state, bool newmodeset);
+
+int dispc_runtime_suspend(struct dispc_device *dispc);
+int dispc_runtime_resume(struct dispc_device *dispc);
+
+int dispc_plane_check(struct dispc_device *dispc, u32 hw_plane,
+ const struct drm_plane_state *state,
+ u32 hw_videoport);
+int dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane,
+ const struct drm_plane_state *state,
+ u32 hw_videoport);
+int dispc_plane_enable(struct dispc_device *dispc, u32 hw_plane, bool enable);
+const u32 *dispc_plane_formats(struct dispc_device *dispc, unsigned int *len);
+
+int dispc_init(struct tidss_device *tidss);
+void dispc_remove(struct tidss_device *tidss);
+
+#endif
diff --git a/drivers/gpu/drm/tidss/tidss_dispc_regs.h b/drivers/gpu/drm/tidss/tidss_dispc_regs.h
new file mode 100644
index 000000000000..88a83a41b6e3
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_dispc_regs.h
@@ -0,0 +1,243 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2016-2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Jyri Sarha <[email protected]>
+ */
+
+#ifndef __TIDSS_DISPC_REGS_H
+#define __TIDSS_DISPC_REGS_H
+
+enum dispc_common_regs {
+ NOT_APPLICABLE_OFF = 0,
+ DSS_REVISION_OFF,
+ DSS_SYSCONFIG_OFF,
+ DSS_SYSSTATUS_OFF,
+ DISPC_IRQ_EOI_OFF,
+ DISPC_IRQSTATUS_RAW_OFF,
+ DISPC_IRQSTATUS_OFF,
+ DISPC_IRQENABLE_SET_OFF,
+ DISPC_IRQENABLE_CLR_OFF,
+ DISPC_VID_IRQENABLE_OFF,
+ DISPC_VID_IRQSTATUS_OFF,
+ DISPC_VP_IRQENABLE_OFF,
+ DISPC_VP_IRQSTATUS_OFF,
+ WB_IRQENABLE_OFF,
+ WB_IRQSTATUS_OFF,
+ DISPC_GLOBAL_MFLAG_ATTRIBUTE_OFF,
+ DISPC_GLOBAL_OUTPUT_ENABLE_OFF,
+ DISPC_GLOBAL_BUFFER_OFF,
+ DSS_CBA_CFG_OFF,
+ DISPC_DBG_CONTROL_OFF,
+ DISPC_DBG_STATUS_OFF,
+ DISPC_CLKGATING_DISABLE_OFF,
+ DISPC_SECURE_DISABLE_OFF,
+ FBDC_REVISION_1_OFF,
+ FBDC_REVISION_2_OFF,
+ FBDC_REVISION_3_OFF,
+ FBDC_REVISION_4_OFF,
+ FBDC_REVISION_5_OFF,
+ FBDC_REVISION_6_OFF,
+ FBDC_COMMON_CONTROL_OFF,
+ FBDC_CONSTANT_COLOR_0_OFF,
+ FBDC_CONSTANT_COLOR_1_OFF,
+ DISPC_CONNECTIONS_OFF,
+ DISPC_MSS_VP1_OFF,
+ DISPC_MSS_VP3_OFF,
+ DISPC_COMMON_REG_TABLE_LEN,
+};
+
+/*
+ * dispc_common_regmap should be defined as const u16 * and pointing
+ * to a valid dss common register map for the platform, before the
+ * macros bellow can be used.
+ */
+
+#define REG(r) (dispc_common_regmap[r ## _OFF])
+
+#define DSS_REVISION REG(DSS_REVISION)
+#define DSS_SYSCONFIG REG(DSS_SYSCONFIG)
+#define DSS_SYSSTATUS REG(DSS_SYSSTATUS)
+#define DISPC_IRQ_EOI REG(DISPC_IRQ_EOI)
+#define DISPC_IRQSTATUS_RAW REG(DISPC_IRQSTATUS_RAW)
+#define DISPC_IRQSTATUS REG(DISPC_IRQSTATUS)
+#define DISPC_IRQENABLE_SET REG(DISPC_IRQENABLE_SET)
+#define DISPC_IRQENABLE_CLR REG(DISPC_IRQENABLE_CLR)
+#define DISPC_VID_IRQENABLE(n) (REG(DISPC_VID_IRQENABLE) + (n) * 4)
+#define DISPC_VID_IRQSTATUS(n) (REG(DISPC_VID_IRQSTATUS) + (n) * 4)
+#define DISPC_VP_IRQENABLE(n) (REG(DISPC_VP_IRQENABLE) + (n) * 4)
+#define DISPC_VP_IRQSTATUS(n) (REG(DISPC_VP_IRQSTATUS) + (n) * 4)
+#define WB_IRQENABLE REG(WB_IRQENABLE)
+#define WB_IRQSTATUS REG(WB_IRQSTATUS)
+
+#define DISPC_GLOBAL_MFLAG_ATTRIBUTE REG(DISPC_GLOBAL_MFLAG_ATTRIBUTE)
+#define DISPC_GLOBAL_OUTPUT_ENABLE REG(DISPC_GLOBAL_OUTPUT_ENABLE)
+#define DISPC_GLOBAL_BUFFER REG(DISPC_GLOBAL_BUFFER)
+#define DSS_CBA_CFG REG(DSS_CBA_CFG)
+#define DISPC_DBG_CONTROL REG(DISPC_DBG_CONTROL)
+#define DISPC_DBG_STATUS REG(DISPC_DBG_STATUS)
+#define DISPC_CLKGATING_DISABLE REG(DISPC_CLKGATING_DISABLE)
+#define DISPC_SECURE_DISABLE REG(DISPC_SECURE_DISABLE)
+
+#define FBDC_REVISION_1 REG(FBDC_REVISION_1)
+#define FBDC_REVISION_2 REG(FBDC_REVISION_2)
+#define FBDC_REVISION_3 REG(FBDC_REVISION_3)
+#define FBDC_REVISION_4 REG(FBDC_REVISION_4)
+#define FBDC_REVISION_5 REG(FBDC_REVISION_5)
+#define FBDC_REVISION_6 REG(FBDC_REVISION_6)
+#define FBDC_COMMON_CONTROL REG(FBDC_COMMON_CONTROL)
+#define FBDC_CONSTANT_COLOR_0 REG(FBDC_CONSTANT_COLOR_0)
+#define FBDC_CONSTANT_COLOR_1 REG(FBDC_CONSTANT_COLOR_1)
+#define DISPC_CONNECTIONS REG(DISPC_CONNECTIONS)
+#define DISPC_MSS_VP1 REG(DISPC_MSS_VP1)
+#define DISPC_MSS_VP3 REG(DISPC_MSS_VP3)
+
+/* VID */
+
+#define DISPC_VID_ACCUH_0 0x0
+#define DISPC_VID_ACCUH_1 0x4
+#define DISPC_VID_ACCUH2_0 0x8
+#define DISPC_VID_ACCUH2_1 0xc
+#define DISPC_VID_ACCUV_0 0x10
+#define DISPC_VID_ACCUV_1 0x14
+#define DISPC_VID_ACCUV2_0 0x18
+#define DISPC_VID_ACCUV2_1 0x1c
+#define DISPC_VID_ATTRIBUTES 0x20
+#define DISPC_VID_ATTRIBUTES2 0x24
+#define DISPC_VID_BA_0 0x28
+#define DISPC_VID_BA_1 0x2c
+#define DISPC_VID_BA_UV_0 0x30
+#define DISPC_VID_BA_UV_1 0x34
+#define DISPC_VID_BUF_SIZE_STATUS 0x38
+#define DISPC_VID_BUF_THRESHOLD 0x3c
+#define DISPC_VID_CSC_COEF(n) (0x40 + (n) * 4)
+
+#define DISPC_VID_FIRH 0x5c
+#define DISPC_VID_FIRH2 0x60
+#define DISPC_VID_FIRV 0x64
+#define DISPC_VID_FIRV2 0x68
+
+#define DISPC_VID_FIR_COEFS_H0 0x6c
+#define DISPC_VID_FIR_COEF_H0(phase) (0x6c + (phase) * 4)
+#define DISPC_VID_FIR_COEFS_H0_C 0x90
+#define DISPC_VID_FIR_COEF_H0_C(phase) (0x90 + (phase) * 4)
+
+#define DISPC_VID_FIR_COEFS_H12 0xb4
+#define DISPC_VID_FIR_COEF_H12(phase) (0xb4 + (phase) * 4)
+#define DISPC_VID_FIR_COEFS_H12_C 0xf4
+#define DISPC_VID_FIR_COEF_H12_C(phase) (0xf4 + (phase) * 4)
+
+#define DISPC_VID_FIR_COEFS_V0 0x134
+#define DISPC_VID_FIR_COEF_V0(phase) (0x134 + (phase) * 4)
+#define DISPC_VID_FIR_COEFS_V0_C 0x158
+#define DISPC_VID_FIR_COEF_V0_C(phase) (0x158 + (phase) * 4)
+
+#define DISPC_VID_FIR_COEFS_V12 0x17c
+#define DISPC_VID_FIR_COEF_V12(phase) (0x17c + (phase) * 4)
+#define DISPC_VID_FIR_COEFS_V12_C 0x1bc
+#define DISPC_VID_FIR_COEF_V12_C(phase) (0x1bc + (phase) * 4)
+
+#define DISPC_VID_GLOBAL_ALPHA 0x1fc
+#define DISPC_VID_K2G_IRQENABLE 0x200 /* K2G */
+#define DISPC_VID_K2G_IRQSTATUS 0x204 /* K2G */
+#define DISPC_VID_MFLAG_THRESHOLD 0x208
+#define DISPC_VID_PICTURE_SIZE 0x20c
+#define DISPC_VID_PIXEL_INC 0x210
+#define DISPC_VID_K2G_POSITION 0x214 /* K2G */
+#define DISPC_VID_PRELOAD 0x218
+#define DISPC_VID_ROW_INC 0x21c
+#define DISPC_VID_SIZE 0x220
+#define DISPC_VID_BA_EXT_0 0x22c
+#define DISPC_VID_BA_EXT_1 0x230
+#define DISPC_VID_BA_UV_EXT_0 0x234
+#define DISPC_VID_BA_UV_EXT_1 0x238
+#define DISPC_VID_CSC_COEF7 0x23c
+#define DISPC_VID_ROW_INC_UV 0x248
+#define DISPC_VID_CLUT 0x260
+#define DISPC_VID_SAFETY_ATTRIBUTES 0x2a0
+#define DISPC_VID_SAFETY_CAPT_SIGNATURE 0x2a4
+#define DISPC_VID_SAFETY_POSITION 0x2a8
+#define DISPC_VID_SAFETY_REF_SIGNATURE 0x2ac
+#define DISPC_VID_SAFETY_SIZE 0x2b0
+#define DISPC_VID_SAFETY_LFSR_SEED 0x2b4
+#define DISPC_VID_LUMAKEY 0x2b8
+#define DISPC_VID_DMA_BUFSIZE 0x2bc /* J721E */
+
+/* OVR */
+
+#define DISPC_OVR_CONFIG 0x0
+#define DISPC_OVR_VIRTVP 0x4 /* J721E */
+#define DISPC_OVR_DEFAULT_COLOR 0x8
+#define DISPC_OVR_DEFAULT_COLOR2 0xc
+#define DISPC_OVR_TRANS_COLOR_MAX 0x10
+#define DISPC_OVR_TRANS_COLOR_MAX2 0x14
+#define DISPC_OVR_TRANS_COLOR_MIN 0x18
+#define DISPC_OVR_TRANS_COLOR_MIN2 0x1c
+#define DISPC_OVR_ATTRIBUTES(n) (0x20 + (n) * 4)
+#define DISPC_OVR_ATTRIBUTES2(n) (0x34 + (n) * 4) /* J721E */
+/* VP */
+
+#define DISPC_VP_CONFIG 0x0
+#define DISPC_VP_CONTROL 0x4
+#define DISPC_VP_CSC_COEF0 0x8
+#define DISPC_VP_CSC_COEF1 0xc
+#define DISPC_VP_CSC_COEF2 0x10
+#define DISPC_VP_DATA_CYCLE_0 0x14
+#define DISPC_VP_DATA_CYCLE_1 0x18
+#define DISPC_VP_K2G_GAMMA_TABLE 0x20 /* K2G */
+#define DISPC_VP_K2G_IRQENABLE 0x3c /* K2G */
+#define DISPC_VP_K2G_IRQSTATUS 0x40 /* K2G */
+#define DISPC_VP_DATA_CYCLE_2 0x1c
+#define DISPC_VP_LINE_NUMBER 0x44
+#define DISPC_VP_POL_FREQ 0x4c
+#define DISPC_VP_SIZE_SCREEN 0x50
+#define DISPC_VP_TIMING_H 0x54
+#define DISPC_VP_TIMING_V 0x58
+#define DISPC_VP_CSC_COEF3 0x5c
+#define DISPC_VP_CSC_COEF4 0x60
+#define DISPC_VP_CSC_COEF5 0x64
+#define DISPC_VP_CSC_COEF6 0x68
+#define DISPC_VP_CSC_COEF7 0x6c
+#define DISPC_VP_SAFETY_ATTRIBUTES_0 0x70
+#define DISPC_VP_SAFETY_ATTRIBUTES_1 0x74
+#define DISPC_VP_SAFETY_ATTRIBUTES_2 0x78
+#define DISPC_VP_SAFETY_ATTRIBUTES_3 0x7c
+#define DISPC_VP_SAFETY_CAPT_SIGNATURE_0 0x90
+#define DISPC_VP_SAFETY_CAPT_SIGNATURE_1 0x94
+#define DISPC_VP_SAFETY_CAPT_SIGNATURE_2 0x98
+#define DISPC_VP_SAFETY_CAPT_SIGNATURE_3 0x9c
+#define DISPC_VP_SAFETY_POSITION_0 0xb0
+#define DISPC_VP_SAFETY_POSITION_1 0xb4
+#define DISPC_VP_SAFETY_POSITION_2 0xb8
+#define DISPC_VP_SAFETY_POSITION_3 0xbc
+#define DISPC_VP_SAFETY_REF_SIGNATURE_0 0xd0
+#define DISPC_VP_SAFETY_REF_SIGNATURE_1 0xd4
+#define DISPC_VP_SAFETY_REF_SIGNATURE_2 0xd8
+#define DISPC_VP_SAFETY_REF_SIGNATURE_3 0xdc
+#define DISPC_VP_SAFETY_SIZE_0 0xf0
+#define DISPC_VP_SAFETY_SIZE_1 0xf4
+#define DISPC_VP_SAFETY_SIZE_2 0xf8
+#define DISPC_VP_SAFETY_SIZE_3 0xfc
+#define DISPC_VP_SAFETY_LFSR_SEED 0x110
+#define DISPC_VP_GAMMA_TABLE 0x120
+#define DISPC_VP_DSS_OLDI_CFG 0x160
+#define DISPC_VP_DSS_OLDI_STATUS 0x164
+#define DISPC_VP_DSS_OLDI_LB 0x168
+#define DISPC_VP_DSS_MERGE_SPLIT 0x16c /* J721E */
+#define DISPC_VP_DSS_DMA_THREADSIZE 0x170 /* J721E */
+#define DISPC_VP_DSS_DMA_THREADSIZE_STATUS 0x174 /* J721E */
+
+/*
+ * OLDI IO_CTRL register offsets. On AM654 the registers are found
+ * from CTRL_MMR0, there the syscon regmap should map 0x14 bytes from
+ * CTRLMMR0P1_OLDI_DAT0_IO_CTRL to CTRLMMR0P1_OLDI_CLK_IO_CTRL
+ * register range.
+ */
+#define OLDI_DAT0_IO_CTRL 0x00
+#define OLDI_DAT1_IO_CTRL 0x04
+#define OLDI_DAT2_IO_CTRL 0x08
+#define OLDI_DAT3_IO_CTRL 0x0C
+#define OLDI_CLK_IO_CTRL 0x10
+
+#define OLDI_PWRDN_TX BIT(8)
+
+#endif /* __TIDSS_DISPC_REGS_H */
diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c
new file mode 100644
index 000000000000..d95e4be2c7b9
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_drv.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <[email protected]>
+ */
+
+#include <linux/console.h>
+#include <linux/of_device.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_irq.h>
+#include <drm/drm_probe_helper.h>
+
+#include "tidss_dispc.h"
+#include "tidss_drv.h"
+#include "tidss_kms.h"
+#include "tidss_irq.h"
+
+/* Power management */
+
+int tidss_runtime_get(struct tidss_device *tidss)
+{
+ int r;
+
+ dev_dbg(tidss->dev, "%s\n", __func__);
+
+ r = pm_runtime_get_sync(tidss->dev);
+ WARN_ON(r < 0);
+ return r < 0 ? r : 0;
+}
+
+void tidss_runtime_put(struct tidss_device *tidss)
+{
+ int r;
+
+ dev_dbg(tidss->dev, "%s\n", __func__);
+
+ r = pm_runtime_put_sync(tidss->dev);
+ WARN_ON(r < 0);
+}
+
+static int __maybe_unused tidss_pm_runtime_suspend(struct device *dev)
+{
+ struct tidss_device *tidss = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ return dispc_runtime_suspend(tidss->dispc);
+}
+
+static int __maybe_unused tidss_pm_runtime_resume(struct device *dev)
+{
+ struct tidss_device *tidss = dev_get_drvdata(dev);
+ int r;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ r = dispc_runtime_resume(tidss->dispc);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static int __maybe_unused tidss_suspend(struct device *dev)
+{
+ struct tidss_device *tidss = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ return drm_mode_config_helper_suspend(&tidss->ddev);
+}
+
+static int __maybe_unused tidss_resume(struct device *dev)
+{
+ struct tidss_device *tidss = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ return drm_mode_config_helper_resume(&tidss->ddev);
+}
+
+#ifdef CONFIG_PM
+
+static const struct dev_pm_ops tidss_pm_ops = {
+ .runtime_suspend = tidss_pm_runtime_suspend,
+ .runtime_resume = tidss_pm_runtime_resume,
+ SET_SYSTEM_SLEEP_PM_OPS(tidss_suspend, tidss_resume)
+};
+
+#endif /* CONFIG_PM */
+
+/* DRM device Information */
+
+static void tidss_release(struct drm_device *ddev)
+{
+ struct tidss_device *tidss = ddev->dev_private;
+
+ drm_kms_helper_poll_fini(ddev);
+
+ tidss_modeset_cleanup(tidss);
+
+ drm_dev_fini(ddev);
+
+ kfree(tidss);
+}
+
+DEFINE_DRM_GEM_CMA_FOPS(tidss_fops);
+
+static struct drm_driver tidss_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
+ .fops = &tidss_fops,
+ .release = tidss_release,
+ DRM_GEM_CMA_VMAP_DRIVER_OPS,
+ .name = "tidss",
+ .desc = "TI Keystone DSS",
+ .date = "20180215",
+ .major = 1,
+ .minor = 0,
+
+ .irq_preinstall = tidss_irq_preinstall,
+ .irq_postinstall = tidss_irq_postinstall,
+ .irq_handler = tidss_irq_handler,
+ .irq_uninstall = tidss_irq_uninstall,
+};
+
+static int tidss_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct tidss_device *tidss;
+ struct drm_device *ddev;
+ int ret;
+ int irq;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ /* Can't use devm_* since drm_device's lifetime may exceed dev's */
+ tidss = kzalloc(sizeof(*tidss), GFP_KERNEL);
+ if (!tidss)
+ return -ENOMEM;
+
+ ddev = &tidss->ddev;
+
+ ret = devm_drm_dev_init(&pdev->dev, ddev, &tidss_driver);
+ if (ret) {
+ kfree(ddev);
+ return ret;
+ }
+
+ tidss->dev = dev;
+ tidss->feat = of_device_get_match_data(dev);
+
+ platform_set_drvdata(pdev, tidss);
+
+ ddev->dev_private = tidss;
+
+ ret = dispc_init(tidss);
+ if (ret) {
+ dev_err(dev, "failed to initialize dispc: %d\n", ret);
+ return ret;
+ }
+
+ pm_runtime_enable(dev);
+
+#ifndef CONFIG_PM
+ /* If we don't have PM, we need to call resume manually */
+ dispc_runtime_resume(tidss->dispc);
+#endif
+
+ ret = tidss_modeset_init(tidss);
+ if (ret < 0) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to init DRM/KMS (%d)\n", ret);
+ goto err_runtime_suspend;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto err_runtime_suspend;
+ }
+
+ ret = drm_irq_install(ddev, irq);
+ if (ret) {
+ dev_err(dev, "drm_irq_install failed: %d\n", ret);
+ goto err_runtime_suspend;
+ }
+
+ drm_kms_helper_poll_init(ddev);
+
+ drm_mode_config_reset(ddev);
+
+ ret = drm_dev_register(ddev, 0);
+ if (ret) {
+ dev_err(dev, "failed to register DRM device\n");
+ goto err_irq_uninstall;
+ }
+
+ drm_fbdev_generic_setup(ddev, 32);
+
+ dev_dbg(dev, "%s done\n", __func__);
+
+ return 0;
+
+err_irq_uninstall:
+ drm_irq_uninstall(ddev);
+
+err_runtime_suspend:
+#ifndef CONFIG_PM
+ dispc_runtime_suspend(tidss->dispc);
+#endif
+ pm_runtime_disable(dev);
+
+ return ret;
+}
+
+static int tidss_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct tidss_device *tidss = platform_get_drvdata(pdev);
+ struct drm_device *ddev = &tidss->ddev;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ drm_dev_unregister(ddev);
+
+ drm_atomic_helper_shutdown(ddev);
+
+ drm_irq_uninstall(ddev);
+
+#ifndef CONFIG_PM
+ /* If we don't have PM, we need to call suspend manually */
+ dispc_runtime_suspend(tidss->dispc);
+#endif
+ pm_runtime_disable(dev);
+
+ /* devm allocated dispc goes away with the dev so mark it NULL */
+ dispc_remove(tidss);
+
+ dev_dbg(dev, "%s done\n", __func__);
+
+ return 0;
+}
+
+static void tidss_shutdown(struct platform_device *pdev)
+{
+ drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
+}
+
+static const struct of_device_id tidss_of_table[] = {
+ { .compatible = "ti,k2g-dss", .data = &dispc_k2g_feats, },
+ { .compatible = "ti,am65x-dss", .data = &dispc_am65x_feats, },
+ { .compatible = "ti,j721e-dss", .data = &dispc_j721e_feats, },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, tidss_of_table);
+
+static struct platform_driver tidss_platform_driver = {
+ .probe = tidss_probe,
+ .remove = tidss_remove,
+ .shutdown = tidss_shutdown,
+ .driver = {
+ .name = "tidss",
+#ifdef CONFIG_PM
+ .pm = &tidss_pm_ops,
+#endif
+ .of_match_table = tidss_of_table,
+ .suppress_bind_attrs = true,
+ },
+};
+
+module_platform_driver(tidss_platform_driver);
+
+MODULE_AUTHOR("Tomi Valkeinen <[email protected]>");
+MODULE_DESCRIPTION("TI Keystone DSS Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/tidss/tidss_drv.h b/drivers/gpu/drm/tidss/tidss_drv.h
new file mode 100644
index 000000000000..e2aa6436ad18
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_drv.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <[email protected]>
+ */
+
+#ifndef __TIDSS_DRV_H__
+#define __TIDSS_DRV_H__
+
+#include <linux/spinlock.h>
+
+#define TIDSS_MAX_PORTS 4
+#define TIDSS_MAX_PLANES 4
+
+typedef u32 dispc_irq_t;
+
+struct tidss_device {
+ struct drm_device ddev; /* DRM device for DSS */
+ struct device *dev; /* Underlying DSS device */
+
+ const struct dispc_features *feat;
+ struct dispc_device *dispc;
+
+ unsigned int num_crtcs;
+ struct drm_crtc *crtcs[TIDSS_MAX_PORTS];
+
+ unsigned int num_planes;
+ struct drm_plane *planes[TIDSS_MAX_PLANES];
+
+ spinlock_t wait_lock; /* protects the irq masks */
+ dispc_irq_t irq_mask; /* enabled irqs in addition to wait_list */
+
+ struct drm_atomic_state *saved_state;
+};
+
+int tidss_runtime_get(struct tidss_device *tidss);
+void tidss_runtime_put(struct tidss_device *tidss);
+
+#endif
diff --git a/drivers/gpu/drm/tidss/tidss_encoder.c b/drivers/gpu/drm/tidss/tidss_encoder.c
new file mode 100644
index 000000000000..83785b0a66a9
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_encoder.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <[email protected]>
+ */
+
+#include <linux/export.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_of.h>
+
+#include "tidss_crtc.h"
+#include "tidss_drv.h"
+#include "tidss_encoder.h"
+
+static int tidss_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_device *ddev = encoder->dev;
+ struct tidss_crtc_state *tcrtc_state = to_tidss_crtc_state(crtc_state);
+ struct drm_display_info *di = &conn_state->connector->display_info;
+ struct drm_bridge *bridge;
+ bool bus_flags_set = false;
+
+ dev_dbg(ddev->dev, "%s\n", __func__);
+
+ /*
+ * Take the bus_flags from the first bridge that defines
+ * bridge timings, or from the connector's display_info if no
+ * bridge defines the timings.
+ */
+ drm_for_each_bridge_in_chain(encoder, bridge) {
+ if (!bridge->timings)
+ continue;
+
+ tcrtc_state->bus_flags = bridge->timings->input_bus_flags;
+ bus_flags_set = true;
+ break;
+ }
+
+ if (!di->bus_formats || di->num_bus_formats == 0) {
+ dev_err(ddev->dev, "%s: No bus_formats in connected display\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ // XXX any cleaner way to set bus format and flags?
+ tcrtc_state->bus_format = di->bus_formats[0];
+ if (!bus_flags_set)
+ tcrtc_state->bus_flags = di->bus_flags;
+
+ return 0;
+}
+
+static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
+ .atomic_check = tidss_encoder_atomic_check,
+};
+
+static const struct drm_encoder_funcs encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+struct drm_encoder *tidss_encoder_create(struct tidss_device *tidss,
+ u32 encoder_type, u32 possible_crtcs)
+{
+ struct drm_encoder *enc;
+ int ret;
+
+ enc = devm_kzalloc(tidss->dev, sizeof(*enc), GFP_KERNEL);
+ if (!enc)
+ return ERR_PTR(-ENOMEM);
+
+ enc->possible_crtcs = possible_crtcs;
+
+ ret = drm_encoder_init(&tidss->ddev, enc, &encoder_funcs,
+ encoder_type, NULL);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ drm_encoder_helper_add(enc, &encoder_helper_funcs);
+
+ dev_dbg(tidss->dev, "Encoder create done\n");
+
+ return enc;
+}
diff --git a/drivers/gpu/drm/tidss/tidss_encoder.h b/drivers/gpu/drm/tidss/tidss_encoder.h
new file mode 100644
index 000000000000..06854d66e7e6
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_encoder.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <[email protected]>
+ */
+
+#ifndef __TIDSS_ENCODER_H__
+#define __TIDSS_ENCODER_H__
+
+#include <drm/drm_encoder.h>
+
+struct tidss_device;
+
+struct drm_encoder *tidss_encoder_create(struct tidss_device *tidss,
+ u32 encoder_type, u32 possible_crtcs);
+
+#endif
diff --git a/drivers/gpu/drm/tidss/tidss_irq.c b/drivers/gpu/drm/tidss/tidss_irq.c
new file mode 100644
index 000000000000..612c046738e5
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_irq.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <[email protected]>
+ */
+
+#include <drm/drm_print.h>
+
+#include "tidss_crtc.h"
+#include "tidss_dispc.h"
+#include "tidss_drv.h"
+#include "tidss_irq.h"
+#include "tidss_plane.h"
+
+/* call with wait_lock and dispc runtime held */
+static void tidss_irq_update(struct tidss_device *tidss)
+{
+ assert_spin_locked(&tidss->wait_lock);
+
+ dispc_set_irqenable(tidss->dispc, tidss->irq_mask);
+}
+
+void tidss_irq_enable_vblank(struct drm_crtc *crtc)
+{
+ struct drm_device *ddev = crtc->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+ u32 hw_videoport = tcrtc->hw_videoport;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tidss->wait_lock, flags);
+ tidss->irq_mask |= DSS_IRQ_VP_VSYNC_EVEN(hw_videoport) |
+ DSS_IRQ_VP_VSYNC_ODD(hw_videoport);
+ tidss_irq_update(tidss);
+ spin_unlock_irqrestore(&tidss->wait_lock, flags);
+}
+
+void tidss_irq_disable_vblank(struct drm_crtc *crtc)
+{
+ struct drm_device *ddev = crtc->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+ u32 hw_videoport = tcrtc->hw_videoport;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tidss->wait_lock, flags);
+ tidss->irq_mask &= ~(DSS_IRQ_VP_VSYNC_EVEN(hw_videoport) |
+ DSS_IRQ_VP_VSYNC_ODD(hw_videoport));
+ tidss_irq_update(tidss);
+ spin_unlock_irqrestore(&tidss->wait_lock, flags);
+}
+
+irqreturn_t tidss_irq_handler(int irq, void *arg)
+{
+ struct drm_device *ddev = (struct drm_device *)arg;
+ struct tidss_device *tidss = ddev->dev_private;
+ unsigned int id;
+ dispc_irq_t irqstatus;
+
+ if (WARN_ON(!ddev->irq_enabled))
+ return IRQ_NONE;
+
+ irqstatus = dispc_read_and_clear_irqstatus(tidss->dispc);
+
+ for (id = 0; id < tidss->num_crtcs; id++) {
+ struct drm_crtc *crtc = tidss->crtcs[id];
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+ u32 hw_videoport = tcrtc->hw_videoport;
+
+ if (irqstatus & (DSS_IRQ_VP_VSYNC_EVEN(hw_videoport) |
+ DSS_IRQ_VP_VSYNC_ODD(hw_videoport)))
+ tidss_crtc_vblank_irq(crtc);
+
+ if (irqstatus & (DSS_IRQ_VP_FRAME_DONE(hw_videoport)))
+ tidss_crtc_framedone_irq(crtc);
+
+ if (irqstatus & DSS_IRQ_VP_SYNC_LOST(hw_videoport))
+ tidss_crtc_error_irq(crtc, irqstatus);
+ }
+
+ if (irqstatus & DSS_IRQ_DEVICE_OCP_ERR)
+ dev_err_ratelimited(tidss->dev, "OCP error\n");
+
+ return IRQ_HANDLED;
+}
+
+void tidss_irq_resume(struct tidss_device *tidss)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&tidss->wait_lock, flags);
+ tidss_irq_update(tidss);
+ spin_unlock_irqrestore(&tidss->wait_lock, flags);
+}
+
+void tidss_irq_preinstall(struct drm_device *ddev)
+{
+ struct tidss_device *tidss = ddev->dev_private;
+
+ spin_lock_init(&tidss->wait_lock);
+
+ tidss_runtime_get(tidss);
+
+ dispc_set_irqenable(tidss->dispc, 0);
+ dispc_read_and_clear_irqstatus(tidss->dispc);
+
+ tidss_runtime_put(tidss);
+}
+
+int tidss_irq_postinstall(struct drm_device *ddev)
+{
+ struct tidss_device *tidss = ddev->dev_private;
+ unsigned long flags;
+ unsigned int i;
+
+ tidss_runtime_get(tidss);
+
+ spin_lock_irqsave(&tidss->wait_lock, flags);
+
+ tidss->irq_mask = DSS_IRQ_DEVICE_OCP_ERR;
+
+ for (i = 0; i < tidss->num_crtcs; ++i) {
+ struct tidss_crtc *tcrtc = to_tidss_crtc(tidss->crtcs[i]);
+
+ tidss->irq_mask |= DSS_IRQ_VP_SYNC_LOST(tcrtc->hw_videoport);
+
+ tidss->irq_mask |= DSS_IRQ_VP_FRAME_DONE(tcrtc->hw_videoport);
+ }
+
+ tidss_irq_update(tidss);
+
+ spin_unlock_irqrestore(&tidss->wait_lock, flags);
+
+ tidss_runtime_put(tidss);
+
+ return 0;
+}
+
+void tidss_irq_uninstall(struct drm_device *ddev)
+{
+ struct tidss_device *tidss = ddev->dev_private;
+
+ tidss_runtime_get(tidss);
+ dispc_set_irqenable(tidss->dispc, 0);
+ tidss_runtime_put(tidss);
+}
diff --git a/drivers/gpu/drm/tidss/tidss_irq.h b/drivers/gpu/drm/tidss/tidss_irq.h
new file mode 100644
index 000000000000..aa92db403cca
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_irq.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <[email protected]>
+ */
+
+#ifndef __TIDSS_IRQ_H__
+#define __TIDSS_IRQ_H__
+
+#include <linux/types.h>
+
+#include "tidss_drv.h"
+
+/*
+ * The IRQ status from various DISPC IRQ registers are packed into a single
+ * value, where the bits are defined as follows:
+ *
+ * bit group |dev|wb |mrg0|mrg1|mrg2|mrg3|plane0-3| <unused> |
+ * bit use |D |fou|FEOL|FEOL|FEOL|FEOL| UUUU | |
+ * bit number|0 |1-3|4-7 |8-11| 12-19 | 20-23 | 24-31 |
+ *
+ * device bits: D = OCP error
+ * WB bits: f = frame done wb, o = wb buffer overflow,
+ * u = wb buffer uncomplete
+ * vp bits: F = frame done, E = vsync even, O = vsync odd, L = sync lost
+ * plane bits: U = fifo underflow
+ */
+
+#define DSS_IRQ_DEVICE_OCP_ERR BIT(0)
+
+#define DSS_IRQ_DEVICE_FRAMEDONEWB BIT(1)
+#define DSS_IRQ_DEVICE_WBBUFFEROVERFLOW BIT(2)
+#define DSS_IRQ_DEVICE_WBUNCOMPLETEERROR BIT(3)
+#define DSS_IRQ_DEVICE_WB_MASK GENMASK(3, 1)
+
+#define DSS_IRQ_VP_BIT_N(ch, bit) (4 + 4 * (ch) + (bit))
+#define DSS_IRQ_PLANE_BIT_N(plane, bit) \
+ (DSS_IRQ_VP_BIT_N(TIDSS_MAX_PORTS, 0) + 1 * (plane) + (bit))
+
+#define DSS_IRQ_VP_BIT(ch, bit) BIT(DSS_IRQ_VP_BIT_N((ch), (bit)))
+#define DSS_IRQ_PLANE_BIT(plane, bit) \
+ BIT(DSS_IRQ_PLANE_BIT_N((plane), (bit)))
+
+static inline dispc_irq_t DSS_IRQ_VP_MASK(u32 ch)
+{
+ return GENMASK(DSS_IRQ_VP_BIT_N((ch), 3), DSS_IRQ_VP_BIT_N((ch), 0));
+}
+
+static inline dispc_irq_t DSS_IRQ_PLANE_MASK(u32 plane)
+{
+ return GENMASK(DSS_IRQ_PLANE_BIT_N((plane), 0),
+ DSS_IRQ_PLANE_BIT_N((plane), 0));
+}
+
+#define DSS_IRQ_VP_FRAME_DONE(ch) DSS_IRQ_VP_BIT((ch), 0)
+#define DSS_IRQ_VP_VSYNC_EVEN(ch) DSS_IRQ_VP_BIT((ch), 1)
+#define DSS_IRQ_VP_VSYNC_ODD(ch) DSS_IRQ_VP_BIT((ch), 2)
+#define DSS_IRQ_VP_SYNC_LOST(ch) DSS_IRQ_VP_BIT((ch), 3)
+
+#define DSS_IRQ_PLANE_FIFO_UNDERFLOW(plane) DSS_IRQ_PLANE_BIT((plane), 0)
+
+struct drm_crtc;
+struct drm_device;
+
+struct tidss_device;
+
+void tidss_irq_enable_vblank(struct drm_crtc *crtc);
+void tidss_irq_disable_vblank(struct drm_crtc *crtc);
+
+void tidss_irq_preinstall(struct drm_device *ddev);
+int tidss_irq_postinstall(struct drm_device *ddev);
+void tidss_irq_uninstall(struct drm_device *ddev);
+irqreturn_t tidss_irq_handler(int irq, void *arg);
+
+void tidss_irq_resume(struct tidss_device *tidss);
+
+#endif
diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c
new file mode 100644
index 000000000000..7d419960b030
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_kms.c
@@ -0,0 +1,299 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <[email protected]>
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_vblank.h>
+
+#include "tidss_crtc.h"
+#include "tidss_dispc.h"
+#include "tidss_drv.h"
+#include "tidss_encoder.h"
+#include "tidss_kms.h"
+#include "tidss_plane.h"
+
+static void tidss_atomic_commit_tail(struct drm_atomic_state *old_state)
+{
+ struct drm_device *ddev = old_state->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+
+ dev_dbg(ddev->dev, "%s\n", __func__);
+
+ tidss_runtime_get(tidss);
+
+ drm_atomic_helper_commit_modeset_disables(ddev, old_state);
+ drm_atomic_helper_commit_planes(ddev, old_state, 0);
+ drm_atomic_helper_commit_modeset_enables(ddev, old_state);
+
+ drm_atomic_helper_commit_hw_done(old_state);
+ drm_atomic_helper_wait_for_flip_done(ddev, old_state);
+
+ drm_atomic_helper_cleanup_planes(ddev, old_state);
+
+ tidss_runtime_put(tidss);
+}
+
+static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = {
+ .atomic_commit_tail = tidss_atomic_commit_tail,
+};
+
+static int tidss_atomic_check(struct drm_device *ddev,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *opstate;
+ struct drm_plane_state *npstate;
+ struct drm_plane *plane;
+ struct drm_crtc_state *cstate;
+ struct drm_crtc *crtc;
+ int ret, i;
+
+ ret = drm_atomic_helper_check(ddev, state);
+ if (ret)
+ return ret;
+
+ /*
+ * Add all active planes on a CRTC to the atomic state, if
+ * x/y/z position or activity of any plane on that CRTC
+ * changes. This is needed for updating the plane positions in
+ * tidss_crtc_position_planes() which is called from
+ * crtc_atomic_enable() and crtc_atomic_flush(). We have an
+ * extra flag to to mark x,y-position changes and together
+ * with zpos_changed the condition recognizes all the above
+ * cases.
+ */
+ for_each_oldnew_plane_in_state(state, plane, opstate, npstate, i) {
+ if (!npstate->crtc || !npstate->visible)
+ continue;
+
+ if (!opstate->crtc || opstate->crtc_x != npstate->crtc_x ||
+ opstate->crtc_y != npstate->crtc_y) {
+ cstate = drm_atomic_get_crtc_state(state,
+ npstate->crtc);
+ if (IS_ERR(cstate))
+ return PTR_ERR(cstate);
+ to_tidss_crtc_state(cstate)->plane_pos_changed = true;
+ }
+ }
+
+ for_each_new_crtc_in_state(state, crtc, cstate, i) {
+ if (to_tidss_crtc_state(cstate)->plane_pos_changed ||
+ cstate->zpos_changed) {
+ ret = drm_atomic_add_affected_planes(state, crtc);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static const struct drm_mode_config_funcs mode_config_funcs = {
+ .fb_create = drm_gem_fb_create,
+ .atomic_check = tidss_atomic_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static int tidss_dispc_modeset_init(struct tidss_device *tidss)
+{
+ struct device *dev = tidss->dev;
+ unsigned int fourccs_len;
+ const u32 *fourccs = dispc_plane_formats(tidss->dispc, &fourccs_len);
+ unsigned int i;
+
+ struct pipe {
+ u32 hw_videoport;
+ struct drm_bridge *bridge;
+ u32 enc_type;
+ };
+
+ const struct dispc_features *feat = tidss->feat;
+ u32 max_vps = feat->num_vps;
+ u32 max_planes = feat->num_planes;
+
+ struct pipe pipes[TIDSS_MAX_PORTS];
+ u32 num_pipes = 0;
+ u32 crtc_mask;
+
+ /* first find all the connected panels & bridges */
+
+ for (i = 0; i < max_vps; i++) {
+ struct drm_panel *panel;
+ struct drm_bridge *bridge;
+ u32 enc_type = DRM_MODE_ENCODER_NONE;
+ int ret;
+
+ ret = drm_of_find_panel_or_bridge(dev->of_node, i, 0,
+ &panel, &bridge);
+ if (ret == -ENODEV) {
+ dev_dbg(dev, "no panel/bridge for port %d\n", i);
+ continue;
+ } else if (ret) {
+ dev_dbg(dev, "port %d probe returned %d\n", i, ret);
+ return ret;
+ }
+
+ if (panel) {
+ u32 conn_type;
+
+ dev_dbg(dev, "Setting up panel for port %d\n", i);
+
+ switch (feat->vp_bus_type[i]) {
+ case DISPC_VP_OLDI:
+ enc_type = DRM_MODE_ENCODER_LVDS;
+ conn_type = DRM_MODE_CONNECTOR_LVDS;
+ break;
+ case DISPC_VP_DPI:
+ enc_type = DRM_MODE_ENCODER_DPI;
+ conn_type = DRM_MODE_CONNECTOR_LVDS;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ if (panel->connector_type != conn_type) {
+ dev_err(dev,
+ "%s: Panel %s has incompatible connector type for vp%d (%d != %d)\n",
+ __func__, dev_name(panel->dev), i,
+ panel->connector_type, conn_type);
+ return -EINVAL;
+ }
+
+ bridge = devm_drm_panel_bridge_add(dev, panel);
+ if (IS_ERR(bridge)) {
+ dev_err(dev,
+ "failed to set up panel bridge for port %d\n",
+ i);
+ return PTR_ERR(bridge);
+ }
+ }
+
+ pipes[num_pipes].hw_videoport = i;
+ pipes[num_pipes].bridge = bridge;
+ pipes[num_pipes].enc_type = enc_type;
+ num_pipes++;
+ }
+
+ /* all planes can be on any crtc */
+ crtc_mask = (1 << num_pipes) - 1;
+
+ /* then create a plane, a crtc and an encoder for each panel/bridge */
+
+ for (i = 0; i < num_pipes; ++i) {
+ struct tidss_plane *tplane;
+ struct tidss_crtc *tcrtc;
+ struct drm_encoder *enc;
+ u32 hw_plane_id = feat->vid_order[tidss->num_planes];
+ int ret;
+
+ tplane = tidss_plane_create(tidss, hw_plane_id,
+ DRM_PLANE_TYPE_PRIMARY, crtc_mask,
+ fourccs, fourccs_len);
+ if (IS_ERR(tplane)) {
+ dev_err(tidss->dev, "plane create failed\n");
+ return PTR_ERR(tplane);
+ }
+
+ tidss->planes[tidss->num_planes++] = &tplane->plane;
+
+ tcrtc = tidss_crtc_create(tidss, pipes[i].hw_videoport,
+ &tplane->plane);
+ if (IS_ERR(tcrtc)) {
+ dev_err(tidss->dev, "crtc create failed\n");
+ return PTR_ERR(tcrtc);
+ }
+
+ tidss->crtcs[tidss->num_crtcs++] = &tcrtc->crtc;
+
+ enc = tidss_encoder_create(tidss, pipes[i].enc_type,
+ 1 << tcrtc->crtc.index);
+ if (IS_ERR(enc)) {
+ dev_err(tidss->dev, "encoder create failed\n");
+ return PTR_ERR(enc);
+ }
+
+ ret = drm_bridge_attach(enc, pipes[i].bridge, NULL, 0);
+ if (ret) {
+ dev_err(tidss->dev, "bridge attach failed: %d\n", ret);
+ return ret;
+ }
+ }
+
+ /* create overlay planes of the leftover planes */
+
+ while (tidss->num_planes < max_planes) {
+ struct tidss_plane *tplane;
+ u32 hw_plane_id = feat->vid_order[tidss->num_planes];
+
+ tplane = tidss_plane_create(tidss, hw_plane_id,
+ DRM_PLANE_TYPE_OVERLAY, crtc_mask,
+ fourccs, fourccs_len);
+
+ if (IS_ERR(tplane)) {
+ dev_err(tidss->dev, "plane create failed\n");
+ return PTR_ERR(tplane);
+ }
+
+ tidss->planes[tidss->num_planes++] = &tplane->plane;
+ }
+
+ return 0;
+}
+
+int tidss_modeset_init(struct tidss_device *tidss)
+{
+ struct drm_device *ddev = &tidss->ddev;
+ unsigned int i;
+ int ret;
+
+ dev_dbg(tidss->dev, "%s\n", __func__);
+
+ drm_mode_config_init(ddev);
+
+ ddev->mode_config.min_width = 8;
+ ddev->mode_config.min_height = 8;
+ ddev->mode_config.max_width = 8096;
+ ddev->mode_config.max_height = 8096;
+ ddev->mode_config.normalize_zpos = true;
+ ddev->mode_config.funcs = &mode_config_funcs;
+ ddev->mode_config.helper_private = &mode_config_helper_funcs;
+
+ ret = tidss_dispc_modeset_init(tidss);
+ if (ret)
+ goto err_mode_config_cleanup;
+
+ ret = drm_vblank_init(ddev, tidss->num_crtcs);
+ if (ret)
+ goto err_mode_config_cleanup;
+
+ /* Start with vertical blanking interrupt reporting disabled. */
+ for (i = 0; i < tidss->num_crtcs; ++i)
+ drm_crtc_vblank_reset(tidss->crtcs[i]);
+
+ drm_mode_config_reset(ddev);
+
+ dev_dbg(tidss->dev, "%s done\n", __func__);
+
+ return 0;
+
+err_mode_config_cleanup:
+ drm_mode_config_cleanup(ddev);
+ return ret;
+}
+
+void tidss_modeset_cleanup(struct tidss_device *tidss)
+{
+ struct drm_device *ddev = &tidss->ddev;
+
+ drm_mode_config_cleanup(ddev);
+}
diff --git a/drivers/gpu/drm/tidss/tidss_kms.h b/drivers/gpu/drm/tidss/tidss_kms.h
new file mode 100644
index 000000000000..dda5625d0128
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_kms.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <[email protected]>
+ */
+
+#ifndef __TIDSS_KMS_H__
+#define __TIDSS_KMS_H__
+
+struct tidss_device;
+
+int tidss_modeset_init(struct tidss_device *tidss);
+void tidss_modeset_cleanup(struct tidss_device *tidss);
+
+#endif
diff --git a/drivers/gpu/drm/tidss/tidss_plane.c b/drivers/gpu/drm/tidss/tidss_plane.c
new file mode 100644
index 000000000000..ff99b2dd4a17
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_plane.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <[email protected]>
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_fb_cma_helper.h>
+
+#include "tidss_crtc.h"
+#include "tidss_dispc.h"
+#include "tidss_drv.h"
+#include "tidss_plane.h"
+
+/* drm_plane_helper_funcs */
+
+static int tidss_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_device *ddev = plane->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_plane *tplane = to_tidss_plane(plane);
+ const struct drm_format_info *finfo;
+ struct drm_crtc_state *crtc_state;
+ u32 hw_plane = tplane->hw_plane_id;
+ u32 hw_videoport;
+ int ret;
+
+ dev_dbg(ddev->dev, "%s\n", __func__);
+
+ if (!state->crtc) {
+ /*
+ * The visible field is not reset by the DRM core but only
+ * updated by drm_plane_helper_check_state(), set it manually.
+ */
+ state->visible = false;
+ return 0;
+ }
+
+ crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ ret = drm_atomic_helper_check_plane_state(state, crtc_state, 0,
+ INT_MAX, true, true);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * The HW is only able to start drawing at subpixel boundary
+ * (the two first checks bellow). At the end of a row the HW
+ * can only jump integer number of subpixels forward to the
+ * beginning of the next row. So we can only show picture with
+ * integer subpixel width (the third check). However, after
+ * reaching the end of the drawn picture the drawing starts
+ * again at the absolute memory address where top left corner
+ * position of the drawn picture is (so there is no need to
+ * check for odd height).
+ */
+
+ finfo = drm_format_info(state->fb->format->format);
+
+ if ((state->src_x >> 16) % finfo->hsub != 0) {
+ dev_dbg(ddev->dev,
+ "%s: x-position %u not divisible subpixel size %u\n",
+ __func__, (state->src_x >> 16), finfo->hsub);
+ return -EINVAL;
+ }
+
+ if ((state->src_y >> 16) % finfo->vsub != 0) {
+ dev_dbg(ddev->dev,
+ "%s: y-position %u not divisible subpixel size %u\n",
+ __func__, (state->src_y >> 16), finfo->vsub);
+ return -EINVAL;
+ }
+
+ if ((state->src_w >> 16) % finfo->hsub != 0) {
+ dev_dbg(ddev->dev,
+ "%s: src width %u not divisible by subpixel size %u\n",
+ __func__, (state->src_w >> 16), finfo->hsub);
+ return -EINVAL;
+ }
+
+ if (!state->visible)
+ return 0;
+
+ hw_videoport = to_tidss_crtc(state->crtc)->hw_videoport;
+
+ ret = dispc_plane_check(tidss->dispc, hw_plane, state, hw_videoport);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void tidss_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct drm_device *ddev = plane->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_plane *tplane = to_tidss_plane(plane);
+ struct drm_plane_state *state = plane->state;
+ u32 hw_videoport;
+ int ret;
+
+ dev_dbg(ddev->dev, "%s\n", __func__);
+
+ if (!state->visible) {
+ dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, false);
+ return;
+ }
+
+ hw_videoport = to_tidss_crtc(state->crtc)->hw_videoport;
+
+ ret = dispc_plane_setup(tidss->dispc, tplane->hw_plane_id,
+ state, hw_videoport);
+
+ if (ret) {
+ dev_err(plane->dev->dev, "%s: Failed to setup plane %d\n",
+ __func__, tplane->hw_plane_id);
+ dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, false);
+ return;
+ }
+
+ dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, true);
+}
+
+static void tidss_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct drm_device *ddev = plane->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_plane *tplane = to_tidss_plane(plane);
+
+ dev_dbg(ddev->dev, "%s\n", __func__);
+
+ dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, false);
+}
+
+static const struct drm_plane_helper_funcs tidss_plane_helper_funcs = {
+ .atomic_check = tidss_plane_atomic_check,
+ .atomic_update = tidss_plane_atomic_update,
+ .atomic_disable = tidss_plane_atomic_disable,
+};
+
+static const struct drm_plane_funcs tidss_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .reset = drm_atomic_helper_plane_reset,
+ .destroy = drm_plane_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+struct tidss_plane *tidss_plane_create(struct tidss_device *tidss,
+ u32 hw_plane_id, u32 plane_type,
+ u32 crtc_mask, const u32 *formats,
+ u32 num_formats)
+{
+ struct tidss_plane *tplane;
+ enum drm_plane_type type;
+ u32 possible_crtcs;
+ u32 num_planes = tidss->feat->num_planes;
+ u32 color_encodings = (BIT(DRM_COLOR_YCBCR_BT601) |
+ BIT(DRM_COLOR_YCBCR_BT709));
+ u32 color_ranges = (BIT(DRM_COLOR_YCBCR_FULL_RANGE) |
+ BIT(DRM_COLOR_YCBCR_LIMITED_RANGE));
+ u32 default_encoding = DRM_COLOR_YCBCR_BT601;
+ u32 default_range = DRM_COLOR_YCBCR_FULL_RANGE;
+ u32 blend_modes = (BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE));
+ int ret;
+
+ tplane = devm_kzalloc(tidss->dev, sizeof(*tplane), GFP_KERNEL);
+ if (!tplane)
+ return ERR_PTR(-ENOMEM);
+
+ tplane->hw_plane_id = hw_plane_id;
+
+ possible_crtcs = crtc_mask;
+ type = plane_type;
+
+ ret = drm_universal_plane_init(&tidss->ddev, &tplane->plane,
+ possible_crtcs,
+ &tidss_plane_funcs,
+ formats, num_formats,
+ NULL, type, NULL);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ drm_plane_helper_add(&tplane->plane, &tidss_plane_helper_funcs);
+
+ drm_plane_create_zpos_property(&tplane->plane, hw_plane_id, 0,
+ num_planes - 1);
+
+ ret = drm_plane_create_color_properties(&tplane->plane,
+ color_encodings,
+ color_ranges,
+ default_encoding,
+ default_range);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = drm_plane_create_alpha_property(&tplane->plane);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = drm_plane_create_blend_mode_property(&tplane->plane, blend_modes);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return tplane;
+}
diff --git a/drivers/gpu/drm/tidss/tidss_plane.h b/drivers/gpu/drm/tidss/tidss_plane.h
new file mode 100644
index 000000000000..80ff1c5a2535
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_plane.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <[email protected]>
+ */
+
+#ifndef __TIDSS_PLANE_H__
+#define __TIDSS_PLANE_H__
+
+#define to_tidss_plane(p) container_of((p), struct tidss_plane, plane)
+
+struct tidss_device;
+
+struct tidss_plane {
+ struct drm_plane plane;
+
+ u32 hw_plane_id;
+};
+
+struct tidss_plane *tidss_plane_create(struct tidss_device *tidss,
+ u32 hw_plane_id, u32 plane_type,
+ u32 crtc_mask, const u32 *formats,
+ u32 num_formats);
+
+#endif
diff --git a/drivers/gpu/drm/tidss/tidss_scale_coefs.c b/drivers/gpu/drm/tidss/tidss_scale_coefs.c
new file mode 100644
index 000000000000..5ec68389cc68
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_scale_coefs.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Jyri Sarha <[email protected]>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+
+#include "tidss_scale_coefs.h"
+
+/*
+ * These are interpolated with a custom python script from DSS5
+ * (drivers/gpu/drm/omapdrm/dss/dispc_coef.c) coefficients.
+ */
+static const struct tidss_scale_coefs coef5_m32 = {
+ .c2 = { 28, 34, 40, 46, 52, 58, 64, 70, 0, 2, 4, 8, 12, 16, 20, 24, },
+ .c1 = { 132, 138, 144, 150, 156, 162, 168, 174, 76, 84, 92, 98, 104, 110, 116, 124, },
+ .c0 = { 192, 192, 192, 190, 188, 186, 184, 182, 180, },
+};
+
+static const struct tidss_scale_coefs coef5_m26 = {
+ .c2 = { 24, 28, 32, 38, 44, 50, 56, 64, 0, 2, 4, 6, 8, 12, 16, 20, },
+ .c1 = { 132, 138, 144, 152, 160, 166, 172, 178, 72, 80, 88, 94, 100, 108, 116, 124, },
+ .c0 = { 200, 202, 204, 202, 200, 196, 192, 188, 184, },
+};
+
+static const struct tidss_scale_coefs coef5_m22 = {
+ .c2 = { 16, 20, 24, 30, 36, 42, 48, 56, 0, 0, 0, 2, 4, 8, 12, 14, },
+ .c1 = { 132, 140, 148, 156, 164, 172, 180, 186, 64, 72, 80, 88, 96, 104, 112, 122, },
+ .c0 = { 216, 216, 216, 214, 212, 208, 204, 198, 192, },
+};
+
+static const struct tidss_scale_coefs coef5_m19 = {
+ .c2 = { 12, 14, 16, 22, 28, 34, 40, 48, 0, 0, 0, 2, 4, 4, 4, 8, },
+ .c1 = { 128, 140, 152, 160, 168, 176, 184, 192, 56, 64, 72, 82, 92, 100, 108, 118, },
+ .c0 = { 232, 232, 232, 226, 220, 218, 216, 208, 200, },
+};
+
+static const struct tidss_scale_coefs coef5_m16 = {
+ .c2 = { 0, 2, 4, 8, 12, 18, 24, 32, 0, 0, 0, -2, -4, -4, -4, -2, },
+ .c1 = { 124, 138, 152, 164, 176, 186, 196, 206, 40, 48, 56, 68, 80, 90, 100, 112, },
+ .c0 = { 264, 262, 260, 254, 248, 242, 236, 226, 216, },
+};
+
+static const struct tidss_scale_coefs coef5_m14 = {
+ .c2 = { -8, -6, -4, -2, 0, 6, 12, 18, 0, -2, -4, -6, -8, -8, -8, -8, },
+ .c1 = { 120, 134, 148, 164, 180, 194, 208, 220, 24, 32, 40, 52, 64, 78, 92, 106, },
+ .c0 = { 288, 286, 284, 280, 276, 266, 256, 244, 232, },
+};
+
+static const struct tidss_scale_coefs coef5_m13 = {
+ .c2 = { -12, -12, -12, -10, -8, -4, 0, 6, 0, -2, -4, -6, -8, -10, -12, -12, },
+ .c1 = { 112, 130, 148, 164, 180, 196, 212, 228, 12, 22, 32, 44, 56, 70, 84, 98, },
+ .c0 = { 312, 308, 304, 298, 292, 282, 272, 258, 244, },
+};
+
+static const struct tidss_scale_coefs coef5_m12 = {
+ .c2 = { -16, -18, -20, -18, -16, -14, -12, -6, 0, -2, -4, -6, -8, -10, -12, -14, },
+ .c1 = { 104, 124, 144, 164, 184, 202, 220, 238, 0, 10, 20, 30, 40, 56, 72, 88, },
+ .c0 = { 336, 332, 328, 320, 312, 300, 288, 272, 256, },
+};
+
+static const struct tidss_scale_coefs coef5_m11 = {
+ .c2 = { -20, -22, -24, -24, -24, -24, -24, -20, 0, -2, -4, -6, -8, -10, -12, -16, },
+ .c1 = { 92, 114, 136, 158, 180, 204, 228, 250, -16, -8, 0, 12, 24, 38, 52, 72, },
+ .c0 = { 368, 364, 360, 350, 340, 326, 312, 292, 272, },
+};
+
+static const struct tidss_scale_coefs coef5_m10 = {
+ .c2 = { -16, -20, -24, -28, -32, -34, -36, -34, 0, 0, 0, -2, -4, -8, -12, -14, },
+ .c1 = { 72, 96, 120, 148, 176, 204, 232, 260, -32, -26, -20, -10, 0, 16, 32, 52, },
+ .c0 = { 400, 398, 396, 384, 372, 354, 336, 312, 288, },
+};
+
+static const struct tidss_scale_coefs coef5_m9 = {
+ .c2 = { -12, -18, -24, -28, -32, -38, -44, -46, 0, 2, 4, 2, 0, -2, -4, -8, },
+ .c1 = { 40, 68, 96, 128, 160, 196, 232, 268, -48, -46, -44, -36, -28, -14, 0, 20, },
+ .c0 = { 456, 450, 444, 428, 412, 388, 364, 334, 304, },
+};
+
+static const struct tidss_scale_coefs coef5_m8 = {
+ .c2 = { 0, -4, -8, -16, -24, -32, -40, -48, 0, 2, 4, 6, 8, 6, 4, 2, },
+ .c1 = { 0, 28, 56, 94, 132, 176, 220, 266, -56, -60, -64, -62, -60, -50, -40, -20, },
+ .c0 = { 512, 506, 500, 478, 456, 424, 392, 352, 312, },
+};
+
+static const struct tidss_scale_coefs coef3_m32 = {
+ .c1 = { 108, 92, 76, 62, 48, 36, 24, 140, 256, 236, 216, 198, 180, 162, 144, 126, },
+ .c0 = { 296, 294, 292, 288, 284, 278, 272, 136, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m26 = {
+ .c1 = { 104, 90, 76, 60, 44, 32, 20, 138, 256, 236, 216, 198, 180, 160, 140, 122, },
+ .c0 = { 304, 300, 296, 292, 288, 282, 276, 138, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m22 = {
+ .c1 = { 100, 84, 68, 54, 40, 30, 20, 138, 256, 236, 216, 196, 176, 156, 136, 118, },
+ .c0 = { 312, 310, 308, 302, 296, 286, 276, 138, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m19 = {
+ .c1 = { 96, 80, 64, 50, 36, 26, 16, 136, 256, 236, 216, 194, 172, 152, 132, 114, },
+ .c0 = { 320, 318, 316, 310, 304, 292, 280, 140, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m16 = {
+ .c1 = { 88, 72, 56, 44, 32, 22, 12, 134, 256, 234, 212, 190, 168, 148, 128, 108, },
+ .c0 = { 336, 332, 328, 320, 312, 300, 288, 144, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m14 = {
+ .c1 = { 80, 64, 48, 36, 24, 16, 8, 132, 256, 232, 208, 186, 164, 142, 120, 100, },
+ .c0 = { 352, 348, 344, 334, 324, 310, 296, 148, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m13 = {
+ .c1 = { 72, 56, 40, 30, 20, 12, 4, 130, 256, 232, 208, 184, 160, 136, 112, 92, },
+ .c0 = { 368, 364, 360, 346, 332, 316, 300, 150, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m12 = {
+ .c1 = { 64, 50, 36, 26, 16, 10, 4, 130, 256, 230, 204, 178, 152, 128, 104, 84, },
+ .c0 = { 384, 378, 372, 358, 344, 324, 304, 152, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m11 = {
+ .c1 = { 56, 40, 24, 16, 8, 4, 0, 128, 256, 228, 200, 172, 144, 120, 96, 76, },
+ .c0 = { 400, 396, 392, 376, 360, 336, 312, 156, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m10 = {
+ .c1 = { 40, 26, 12, 6, 0, -2, -4, 126, 256, 226, 196, 166, 136, 110, 84, 62, },
+ .c0 = { 432, 424, 416, 396, 376, 348, 320, 160, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m9 = {
+ .c1 = { 24, 12, 0, -4, -8, -8, -8, 124, 256, 222, 188, 154, 120, 92, 64, 44, },
+ .c0 = { 464, 456, 448, 424, 400, 366, 332, 166, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m8 = {
+ .c1 = { 0, -8, -16, -16, -16, -12, -8, 124, 256, 214, 172, 134, 96, 66, 36, 18, },
+ .c0 = { 512, 502, 492, 462, 432, 390, 348, 174, 256, },
+};
+
+const struct tidss_scale_coefs *tidss_get_scale_coefs(struct device *dev,
+ u32 firinc,
+ bool five_taps)
+{
+ int i;
+ int inc;
+ static const struct {
+ int mmin;
+ int mmax;
+ const struct tidss_scale_coefs *coef3;
+ const struct tidss_scale_coefs *coef5;
+ const char *name;
+ } coefs[] = {
+ { 27, 32, &coef3_m32, &coef5_m32, "M32" },
+ { 23, 26, &coef3_m26, &coef5_m26, "M26" },
+ { 20, 22, &coef3_m22, &coef5_m22, "M22" },
+ { 17, 19, &coef3_m19, &coef5_m19, "M19" },
+ { 15, 16, &coef3_m16, &coef5_m16, "M16" },
+ { 14, 14, &coef3_m14, &coef5_m14, "M14" },
+ { 13, 13, &coef3_m13, &coef5_m13, "M13" },
+ { 12, 12, &coef3_m12, &coef5_m12, "M12" },
+ { 11, 11, &coef3_m11, &coef5_m11, "M11" },
+ { 10, 10, &coef3_m10, &coef5_m10, "M10" },
+ { 9, 9, &coef3_m9, &coef5_m9, "M9" },
+ { 4, 8, &coef3_m8, &coef5_m8, "M8" },
+ /*
+ * When upscaling more than two times, blockiness and outlines
+ * around the image are observed when M8 tables are used. M11,
+ * M16 and M19 tables are used to prevent this.
+ */
+ { 3, 3, &coef3_m11, &coef5_m11, "M11" },
+ { 2, 2, &coef3_m16, &coef5_m16, "M16" },
+ { 0, 1, &coef3_m19, &coef5_m19, "M19" },
+ };
+
+ /*
+ * inc is result of 0x200000 * in_size / out_size. This dividing
+ * by 0x40000 scales it down to 8 * in_size / out_size. After
+ * division the actual scaling factor is 8/inc.
+ */
+ inc = firinc / 0x40000;
+ for (i = 0; i < ARRAY_SIZE(coefs); ++i) {
+ if (inc >= coefs[i].mmin && inc <= coefs[i].mmax) {
+ if (five_taps)
+ return coefs[i].coef5;
+ else
+ return coefs[i].coef3;
+ }
+ }
+
+ dev_err(dev, "%s: Coefficients not found for firinc 0x%08x, inc %d\n",
+ __func__, firinc, inc);
+
+ return NULL;
+}
diff --git a/drivers/gpu/drm/tidss/tidss_scale_coefs.h b/drivers/gpu/drm/tidss/tidss_scale_coefs.h
new file mode 100644
index 000000000000..64b5af5b5361
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_scale_coefs.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Jyri Sarha <[email protected]>
+ */
+
+#ifndef __TIDSS_DISPC_COEF_H__
+#define __TIDSS_DISPC_COEF_H__
+
+#include <linux/types.h>
+
+struct tidss_scale_coefs {
+ s16 c2[16];
+ s16 c1[16];
+ u16 c0[9];
+};
+
+const struct tidss_scale_coefs *tidss_get_scale_coefs(struct device *dev,
+ u32 firinc,
+ bool five_taps);
+
+#endif
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index c18a28df6e2c..0791a0200cc3 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -249,7 +249,7 @@ static int tilcdc_init(struct drm_driver *ddrv, struct device *dev)
goto init_failed;
}
- priv->mmio = ioremap_nocache(res->start, resource_size(res));
+ priv->mmio = ioremap(res->start, resource_size(res));
if (!priv->mmio) {
dev_err(dev, "failed to ioremap\n");
ret = -ENOMEM;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c
index 51d034e095f4..28b7f703236e 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_external.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c
@@ -95,7 +95,7 @@ int tilcdc_attach_bridge(struct drm_device *ddev, struct drm_bridge *bridge)
priv->external_encoder->possible_crtcs = BIT(0);
- ret = drm_bridge_attach(priv->external_encoder, bridge, NULL);
+ ret = drm_bridge_attach(priv->external_encoder, bridge, NULL, 0);
if (ret) {
dev_err(ddev->dev, "drm_bridge_attach() failed %d\n", ret);
return ret;
diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig
index a46ac284dd5e..4160e74e4751 100644
--- a/drivers/gpu/drm/tiny/Kconfig
+++ b/drivers/gpu/drm/tiny/Kconfig
@@ -47,6 +47,20 @@ config TINYDRM_ILI9341
If M is selected the module will be called ili9341.
+config TINYDRM_ILI9486
+ tristate "DRM support for ILI9486 display panels"
+ depends on DRM && SPI
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_MIPI_DBI
+ select BACKLIGHT_CLASS_DEVICE
+ help
+ DRM driver for the following Ilitek ILI9486 panels:
+ * PISCREEN 3.5" 320x480 TFT (Ozzmaker 3.5")
+ * RPILCD 3.5" 320x480 TFT (Waveshare 3.5")
+
+ If M is selected the module will be called ili9486.
+
config TINYDRM_MI0283QT
tristate "DRM support for MI0283QT"
depends on DRM && SPI
@@ -85,14 +99,16 @@ config TINYDRM_ST7586
If M is selected the module will be called st7586.
config TINYDRM_ST7735R
- tristate "DRM support for Sitronix ST7735R display panels"
+ tristate "DRM support for Sitronix ST7715R/ST7735R display panels"
depends on DRM && SPI
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_MIPI_DBI
select BACKLIGHT_CLASS_DEVICE
help
- DRM driver Sitronix ST7735R with one of the following LCDs:
- * JD-T18003-T01 1.8" 128x160 TFT
+ DRM driver for Sitronix ST7715R/ST7735R with one of the following
+ LCDs:
+ * Jianda JD-T18003-T01 1.8" 128x160 TFT
+ * Okaya RH128128T 1.44" 128x128 TFT
If M is selected the module will be called st7735r.
diff --git a/drivers/gpu/drm/tiny/Makefile b/drivers/gpu/drm/tiny/Makefile
index 896cf31132d3..c96ceee71453 100644
--- a/drivers/gpu/drm/tiny/Makefile
+++ b/drivers/gpu/drm/tiny/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_DRM_GM12U320) += gm12u320.o
obj-$(CONFIG_TINYDRM_HX8357D) += hx8357d.o
obj-$(CONFIG_TINYDRM_ILI9225) += ili9225.o
obj-$(CONFIG_TINYDRM_ILI9341) += ili9341.o
+obj-$(CONFIG_TINYDRM_ILI9486) += ili9486.o
obj-$(CONFIG_TINYDRM_MI0283QT) += mi0283qt.o
obj-$(CONFIG_TINYDRM_REPAPER) += repaper.o
obj-$(CONFIG_TINYDRM_ST7586) += st7586.o
diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c
index 94fb1f593564..a48173441ae0 100644
--- a/drivers/gpu/drm/tiny/gm12u320.c
+++ b/drivers/gpu/drm/tiny/gm12u320.c
@@ -22,7 +22,6 @@
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
-#include <drm/drm_vblank.h>
static bool eco_mode;
module_param(eco_mode, bool, 0644);
@@ -610,18 +609,10 @@ static void gm12u320_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct drm_plane_state *state = pipe->plane.state;
- struct drm_crtc *crtc = &pipe->crtc;
struct drm_rect rect;
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
gm12u320_fb_mark_dirty(pipe->plane.state->fb, &rect);
-
- if (crtc->state->event) {
- spin_lock_irq(&crtc->dev->event_lock);
- drm_crtc_send_vblank_event(crtc, crtc->state->event);
- crtc->state->event = NULL;
- spin_unlock_irq(&crtc->dev->event_lock);
- }
}
static const struct drm_simple_display_pipe_funcs gm12u320_pipe_funcs = {
diff --git a/drivers/gpu/drm/tiny/ili9225.c b/drivers/gpu/drm/tiny/ili9225.c
index c66acc566c2b..802fb8dde1b6 100644
--- a/drivers/gpu/drm/tiny/ili9225.c
+++ b/drivers/gpu/drm/tiny/ili9225.c
@@ -26,7 +26,6 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_rect.h>
-#include <drm/drm_vblank.h>
#define ILI9225_DRIVER_READ_CODE 0x00
#define ILI9225_DRIVER_OUTPUT_CONTROL 0x01
@@ -165,18 +164,10 @@ static void ili9225_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct drm_plane_state *state = pipe->plane.state;
- struct drm_crtc *crtc = &pipe->crtc;
struct drm_rect rect;
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
ili9225_fb_dirty(state->fb, &rect);
-
- if (crtc->state->event) {
- spin_lock_irq(&crtc->dev->event_lock);
- drm_crtc_send_vblank_event(crtc, crtc->state->event);
- spin_unlock_irq(&crtc->dev->event_lock);
- crtc->state->event = NULL;
- }
}
static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe,
diff --git a/drivers/gpu/drm/tiny/ili9486.c b/drivers/gpu/drm/tiny/ili9486.c
new file mode 100644
index 000000000000..532560aebb1e
--- /dev/null
+++ b/drivers/gpu/drm/tiny/ili9486.c
@@ -0,0 +1,286 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * DRM driver for Ilitek ILI9486 panels
+ *
+ * Copyright 2020 Kamlesh Gurudasani <[email protected]>
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/spi/spi.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_mipi_dbi.h>
+#include <drm/drm_modeset_helper.h>
+
+#define ILI9486_ITFCTR1 0xb0
+#define ILI9486_PWCTRL1 0xc2
+#define ILI9486_VMCTRL1 0xc5
+#define ILI9486_PGAMCTRL 0xe0
+#define ILI9486_NGAMCTRL 0xe1
+#define ILI9486_DGAMCTRL 0xe2
+#define ILI9486_MADCTL_BGR BIT(3)
+#define ILI9486_MADCTL_MV BIT(5)
+#define ILI9486_MADCTL_MX BIT(6)
+#define ILI9486_MADCTL_MY BIT(7)
+
+/*
+ * The PiScreen/waveshare rpi-lcd-35 has a SPI to 16-bit parallel bus converter
+ * in front of the display controller. This means that 8-bit values have to be
+ * transferred as 16-bit.
+ */
+static int waveshare_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par,
+ size_t num)
+{
+ struct spi_device *spi = mipi->spi;
+ void *data = par;
+ u32 speed_hz;
+ int i, ret;
+ __be16 *buf;
+
+ buf = kmalloc(32 * sizeof(u16), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /*
+ * The displays are Raspberry Pi HATs and connected to the 8-bit only
+ * SPI controller, so 16-bit command and parameters need byte swapping
+ * before being transferred as 8-bit on the big endian SPI bus.
+ * Pixel data bytes have already been swapped before this function is
+ * called.
+ */
+ buf[0] = cpu_to_be16(*cmd);
+ gpiod_set_value_cansleep(mipi->dc, 0);
+ speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 2);
+ ret = mipi_dbi_spi_transfer(spi, speed_hz, 8, buf, 2);
+ if (ret || !num)
+ goto free;
+
+ /* 8-bit configuration data, not 16-bit pixel data */
+ if (num <= 32) {
+ for (i = 0; i < num; i++)
+ buf[i] = cpu_to_be16(par[i]);
+ num *= 2;
+ speed_hz = mipi_dbi_spi_cmd_max_speed(spi, num);
+ data = buf;
+ }
+
+ gpiod_set_value_cansleep(mipi->dc, 1);
+ ret = mipi_dbi_spi_transfer(spi, speed_hz, 8, data, num);
+ free:
+ kfree(buf);
+
+ return ret;
+}
+
+static void waveshare_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *plane_state)
+{
+ struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
+ struct mipi_dbi *dbi = &dbidev->dbi;
+ u8 addr_mode;
+ int ret, idx;
+
+ if (!drm_dev_enter(pipe->crtc.dev, &idx))
+ return;
+
+ DRM_DEBUG_KMS("\n");
+
+ ret = mipi_dbi_poweron_conditional_reset(dbidev);
+ if (ret < 0)
+ goto out_exit;
+ if (ret == 1)
+ goto out_enable;
+
+ mipi_dbi_command(dbi, ILI9486_ITFCTR1);
+ mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE);
+ msleep(250);
+
+ mipi_dbi_command(dbi, MIPI_DCS_SET_PIXEL_FORMAT, 0x55);
+
+ mipi_dbi_command(dbi, ILI9486_PWCTRL1, 0x44);
+
+ mipi_dbi_command(dbi, ILI9486_VMCTRL1, 0x00, 0x00, 0x00, 0x00);
+
+ mipi_dbi_command(dbi, ILI9486_PGAMCTRL,
+ 0x0F, 0x1F, 0x1C, 0x0C, 0x0F, 0x08, 0x48, 0x98,
+ 0x37, 0x0A, 0x13, 0x04, 0x11, 0x0D, 0x0);
+ mipi_dbi_command(dbi, ILI9486_NGAMCTRL,
+ 0x0F, 0x32, 0x2E, 0x0B, 0x0D, 0x05, 0x47, 0x75,
+ 0x37, 0x06, 0x10, 0x03, 0x24, 0x20, 0x00);
+ mipi_dbi_command(dbi, ILI9486_DGAMCTRL,
+ 0x0F, 0x32, 0x2E, 0x0B, 0x0D, 0x05, 0x47, 0x75,
+ 0x37, 0x06, 0x10, 0x03, 0x24, 0x20, 0x00);
+
+ mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON);
+ msleep(100);
+
+ out_enable:
+ switch (dbidev->rotation) {
+ case 90:
+ addr_mode = ILI9486_MADCTL_MY;
+ break;
+ case 180:
+ addr_mode = ILI9486_MADCTL_MV;
+ break;
+ case 270:
+ addr_mode = ILI9486_MADCTL_MX;
+ break;
+ default:
+ addr_mode = ILI9486_MADCTL_MV | ILI9486_MADCTL_MY |
+ ILI9486_MADCTL_MX;
+ break;
+ }
+ addr_mode |= ILI9486_MADCTL_BGR;
+ mipi_dbi_command(dbi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
+ mipi_dbi_enable_flush(dbidev, crtc_state, plane_state);
+ out_exit:
+ drm_dev_exit(idx);
+}
+
+static const struct drm_simple_display_pipe_funcs waveshare_pipe_funcs = {
+ .enable = waveshare_enable,
+ .disable = mipi_dbi_pipe_disable,
+ .update = mipi_dbi_pipe_update,
+ .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
+};
+
+static const struct drm_display_mode waveshare_mode = {
+ DRM_SIMPLE_MODE(480, 320, 73, 49),
+};
+
+DEFINE_DRM_GEM_CMA_FOPS(ili9486_fops);
+
+static struct drm_driver ili9486_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
+ .fops = &ili9486_fops,
+ .release = mipi_dbi_release,
+ DRM_GEM_CMA_VMAP_DRIVER_OPS,
+ .debugfs_init = mipi_dbi_debugfs_init,
+ .name = "ili9486",
+ .desc = "Ilitek ILI9486",
+ .date = "20200118",
+ .major = 1,
+ .minor = 0,
+};
+
+static const struct of_device_id ili9486_of_match[] = {
+ { .compatible = "waveshare,rpi-lcd-35" },
+ { .compatible = "ozzmaker,piscreen" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ili9486_of_match);
+
+static const struct spi_device_id ili9486_id[] = {
+ { "ili9486", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ili9486_id);
+
+static int ili9486_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct mipi_dbi_dev *dbidev;
+ struct drm_device *drm;
+ struct mipi_dbi *dbi;
+ struct gpio_desc *dc;
+ u32 rotation = 0;
+ int ret;
+
+ dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
+ if (!dbidev)
+ return -ENOMEM;
+
+ dbi = &dbidev->dbi;
+ drm = &dbidev->drm;
+ ret = devm_drm_dev_init(dev, drm, &ili9486_driver);
+ if (ret) {
+ kfree(dbidev);
+ return ret;
+ }
+
+ drm_mode_config_init(drm);
+
+ dbi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(dbi->reset)) {
+ DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
+ return PTR_ERR(dbi->reset);
+ }
+
+ dc = devm_gpiod_get(dev, "dc", GPIOD_OUT_LOW);
+ if (IS_ERR(dc)) {
+ DRM_DEV_ERROR(dev, "Failed to get gpio 'dc'\n");
+ return PTR_ERR(dc);
+ }
+
+ dbidev->backlight = devm_of_find_backlight(dev);
+ if (IS_ERR(dbidev->backlight))
+ return PTR_ERR(dbidev->backlight);
+
+ device_property_read_u32(dev, "rotation", &rotation);
+
+ ret = mipi_dbi_spi_init(spi, dbi, dc);
+ if (ret)
+ return ret;
+
+ dbi->command = waveshare_command;
+ dbi->read_commands = NULL;
+
+ ret = mipi_dbi_dev_init(dbidev, &waveshare_pipe_funcs,
+ &waveshare_mode, rotation);
+ if (ret)
+ return ret;
+
+ drm_mode_config_reset(drm);
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, drm);
+
+ drm_fbdev_generic_setup(drm, 0);
+
+ return 0;
+}
+
+static int ili9486_remove(struct spi_device *spi)
+{
+ struct drm_device *drm = spi_get_drvdata(spi);
+
+ drm_dev_unplug(drm);
+ drm_atomic_helper_shutdown(drm);
+
+ return 0;
+}
+
+static void ili9486_shutdown(struct spi_device *spi)
+{
+ drm_atomic_helper_shutdown(spi_get_drvdata(spi));
+}
+
+static struct spi_driver ili9486_spi_driver = {
+ .driver = {
+ .name = "ili9486",
+ .of_match_table = ili9486_of_match,
+ },
+ .id_table = ili9486_id,
+ .probe = ili9486_probe,
+ .remove = ili9486_remove,
+ .shutdown = ili9486_shutdown,
+};
+module_spi_driver(ili9486_spi_driver);
+
+MODULE_DESCRIPTION("Ilitek ILI9486 DRM driver");
+MODULE_AUTHOR("Kamlesh Gurudasani <[email protected]>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c
index 76d179200775..f5ebcaf7ee3a 100644
--- a/drivers/gpu/drm/tiny/repaper.c
+++ b/drivers/gpu/drm/tiny/repaper.c
@@ -17,7 +17,7 @@
#include <linux/dma-buf.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/property.h>
#include <linux/sched/clock.h>
#include <linux/spi/spi.h>
#include <linux/thermal.h>
@@ -33,13 +33,13 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modes.h>
#include <drm/drm_rect.h>
-#include <drm/drm_vblank.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#define REPAPER_RID_G2_COG_ID 0x12
enum repaper_model {
+ /* 0 is reserved to avoid clashing with NULL */
E1144CS021 = 1,
E1190CS021,
E2200CS021,
@@ -856,18 +856,10 @@ static void repaper_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct drm_plane_state *state = pipe->plane.state;
- struct drm_crtc *crtc = &pipe->crtc;
struct drm_rect rect;
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
repaper_fb_dirty(state->fb);
-
- if (crtc->state->event) {
- spin_lock_irq(&crtc->dev->event_lock);
- drm_crtc_send_vblank_event(crtc, crtc->state->event);
- spin_unlock_irq(&crtc->dev->event_lock);
- crtc->state->event = NULL;
- }
}
static const struct drm_simple_display_pipe_funcs repaper_pipe_funcs = {
@@ -995,21 +987,21 @@ static int repaper_probe(struct spi_device *spi)
{
const struct drm_display_mode *mode;
const struct spi_device_id *spi_id;
- const struct of_device_id *match;
struct device *dev = &spi->dev;
enum repaper_model model;
const char *thermal_zone;
struct repaper_epd *epd;
size_t line_buffer_size;
struct drm_device *drm;
+ const void *match;
int ret;
- match = of_match_device(repaper_of_match, dev);
+ match = device_get_match_data(dev);
if (match) {
- model = (enum repaper_model)match->data;
+ model = (enum repaper_model)match;
} else {
spi_id = spi_get_device_id(spi);
- model = spi_id->driver_data;
+ model = (enum repaper_model)spi_id->driver_data;
}
/* The SPI device is used to allocate dma memory */
@@ -1197,7 +1189,6 @@ static void repaper_shutdown(struct spi_device *spi)
static struct spi_driver repaper_spi_driver = {
.driver = {
.name = "repaper",
- .owner = THIS_MODULE,
.of_match_table = repaper_of_match,
},
.id_table = repaper_id,
diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/tiny/st7586.c
index 060cc756194f..9ef559dd3191 100644
--- a/drivers/gpu/drm/tiny/st7586.c
+++ b/drivers/gpu/drm/tiny/st7586.c
@@ -23,7 +23,6 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_rect.h>
-#include <drm/drm_vblank.h>
/* controller-specific commands */
#define ST7586_DISP_MODE_GRAY 0x38
@@ -159,18 +158,10 @@ static void st7586_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct drm_plane_state *state = pipe->plane.state;
- struct drm_crtc *crtc = &pipe->crtc;
struct drm_rect rect;
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
st7586_fb_dirty(state->fb, &rect);
-
- if (crtc->state->event) {
- spin_lock_irq(&crtc->dev->event_lock);
- drm_crtc_send_vblank_event(crtc, crtc->state->event);
- spin_unlock_irq(&crtc->dev->event_lock);
- crtc->state->event = NULL;
- }
}
static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
diff --git a/drivers/gpu/drm/tiny/st7735r.c b/drivers/gpu/drm/tiny/st7735r.c
index 3f4487c71684..3cd9b8d9888d 100644
--- a/drivers/gpu/drm/tiny/st7735r.c
+++ b/drivers/gpu/drm/tiny/st7735r.c
@@ -1,8 +1,10 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * DRM driver for Sitronix ST7735R panels
+ * DRM driver for display panels connected to a Sitronix ST7715R or ST7735R
+ * display controller in SPI mode.
*
* Copyright 2017 David Lechner <[email protected]>
+ * Copyright (C) 2019 Glider bvba
*/
#include <linux/backlight.h>
@@ -37,12 +39,28 @@
#define ST7735R_MY BIT(7)
#define ST7735R_MX BIT(6)
#define ST7735R_MV BIT(5)
+#define ST7735R_RGB BIT(3)
+
+struct st7735r_cfg {
+ const struct drm_display_mode mode;
+ unsigned int left_offset;
+ unsigned int top_offset;
+ unsigned int write_only:1;
+ unsigned int rgb:1; /* RGB (vs. BGR) */
+};
+
+struct st7735r_priv {
+ struct mipi_dbi_dev dbidev; /* Must be first for .release() */
+ const struct st7735r_cfg *cfg;
+};
-static void jd_t18003_t01_pipe_enable(struct drm_simple_display_pipe *pipe,
- struct drm_crtc_state *crtc_state,
- struct drm_plane_state *plane_state)
+static void st7735r_pipe_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *plane_state)
{
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
+ struct st7735r_priv *priv = container_of(dbidev, struct st7735r_priv,
+ dbidev);
struct mipi_dbi *dbi = &dbidev->dbi;
int ret, idx;
u8 addr_mode;
@@ -87,6 +105,10 @@ static void jd_t18003_t01_pipe_enable(struct drm_simple_display_pipe *pipe,
addr_mode = ST7735R_MY | ST7735R_MV;
break;
}
+
+ if (priv->cfg->rgb)
+ addr_mode |= ST7735R_RGB;
+
mipi_dbi_command(dbi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
mipi_dbi_command(dbi, MIPI_DCS_SET_PIXEL_FORMAT,
MIPI_DCS_PIXEL_FMT_16BIT);
@@ -109,15 +131,24 @@ out_exit:
drm_dev_exit(idx);
}
-static const struct drm_simple_display_pipe_funcs jd_t18003_t01_pipe_funcs = {
- .enable = jd_t18003_t01_pipe_enable,
+static const struct drm_simple_display_pipe_funcs st7735r_pipe_funcs = {
+ .enable = st7735r_pipe_enable,
.disable = mipi_dbi_pipe_disable,
.update = mipi_dbi_pipe_update,
.prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
};
-static const struct drm_display_mode jd_t18003_t01_mode = {
- DRM_SIMPLE_MODE(128, 160, 28, 35),
+static const struct st7735r_cfg jd_t18003_t01_cfg = {
+ .mode = { DRM_SIMPLE_MODE(128, 160, 28, 35) },
+ /* Cannot read from Adafruit 1.8" display via SPI */
+ .write_only = true,
+};
+
+static const struct st7735r_cfg rh128128t_cfg = {
+ .mode = { DRM_SIMPLE_MODE(128, 128, 25, 26) },
+ .left_offset = 2,
+ .top_offset = 3,
+ .rgb = true,
};
DEFINE_DRM_GEM_CMA_FOPS(st7735r_fops);
@@ -136,13 +167,14 @@ static struct drm_driver st7735r_driver = {
};
static const struct of_device_id st7735r_of_match[] = {
- { .compatible = "jianda,jd-t18003-t01" },
+ { .compatible = "jianda,jd-t18003-t01", .data = &jd_t18003_t01_cfg },
+ { .compatible = "okaya,rh128128t", .data = &rh128128t_cfg },
{ },
};
MODULE_DEVICE_TABLE(of, st7735r_of_match);
static const struct spi_device_id st7735r_id[] = {
- { "jd-t18003-t01", 0 },
+ { "jd-t18003-t01", (uintptr_t)&jd_t18003_t01_cfg },
{ },
};
MODULE_DEVICE_TABLE(spi, st7735r_id);
@@ -150,17 +182,26 @@ MODULE_DEVICE_TABLE(spi, st7735r_id);
static int st7735r_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
+ const struct st7735r_cfg *cfg;
struct mipi_dbi_dev *dbidev;
+ struct st7735r_priv *priv;
struct drm_device *drm;
struct mipi_dbi *dbi;
struct gpio_desc *dc;
u32 rotation = 0;
int ret;
- dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
- if (!dbidev)
+ cfg = device_get_match_data(&spi->dev);
+ if (!cfg)
+ cfg = (void *)spi_get_device_id(spi)->driver_data;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
+ dbidev = &priv->dbidev;
+ priv->cfg = cfg;
+
dbi = &dbidev->dbi;
drm = &dbidev->drm;
ret = devm_drm_dev_init(dev, drm, &st7735r_driver);
@@ -193,10 +234,14 @@ static int st7735r_probe(struct spi_device *spi)
if (ret)
return ret;
- /* Cannot read from Adafruit 1.8" display via SPI */
- dbi->read_commands = NULL;
+ if (cfg->write_only)
+ dbi->read_commands = NULL;
+
+ dbidev->left_offset = cfg->left_offset;
+ dbidev->top_offset = cfg->top_offset;
- ret = mipi_dbi_dev_init(dbidev, &jd_t18003_t01_pipe_funcs, &jd_t18003_t01_mode, rotation);
+ ret = mipi_dbi_dev_init(dbidev, &st7735r_pipe_funcs, &cfg->mode,
+ rotation);
if (ret)
return ret;
@@ -231,7 +276,6 @@ static void st7735r_shutdown(struct spi_device *spi)
static struct spi_driver st7735r_spi_driver = {
.driver = {
.name = "st7735r",
- .owner = THIS_MODULE,
.of_match_table = st7735r_of_match,
},
.id_table = st7735r_id,
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 5df596fb0280..9e07c3f75156 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -145,34 +145,12 @@ static inline uint32_t ttm_bo_type_flags(unsigned type)
return 1 << (type);
}
-static void ttm_bo_release_list(struct kref *list_kref)
-{
- struct ttm_buffer_object *bo =
- container_of(list_kref, struct ttm_buffer_object, list_kref);
- size_t acc_size = bo->acc_size;
-
- BUG_ON(kref_read(&bo->list_kref));
- BUG_ON(kref_read(&bo->kref));
- BUG_ON(bo->mem.mm_node != NULL);
- BUG_ON(!list_empty(&bo->lru));
- BUG_ON(!list_empty(&bo->ddestroy));
- ttm_tt_destroy(bo->ttm);
- atomic_dec(&ttm_bo_glob.bo_count);
- dma_fence_put(bo->moving);
- if (!ttm_bo_uses_embedded_gem_object(bo))
- dma_resv_fini(&bo->base._resv);
- bo->destroy(bo);
- ttm_mem_global_free(&ttm_mem_glob, acc_size);
-}
-
static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man;
- dma_resv_assert_held(bo->base.resv);
-
if (!list_empty(&bo->lru))
return;
@@ -181,21 +159,14 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
man = &bdev->man[mem->mem_type];
list_add_tail(&bo->lru, &man->lru[bo->priority]);
- kref_get(&bo->list_kref);
if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm &&
!(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
TTM_PAGE_FLAG_SWAPPED))) {
list_add_tail(&bo->swap, &ttm_bo_glob.swap_lru[bo->priority]);
- kref_get(&bo->list_kref);
}
}
-static void ttm_bo_ref_bug(struct kref *list_kref)
-{
- BUG();
-}
-
static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
@@ -203,12 +174,10 @@ static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
if (!list_empty(&bo->swap)) {
list_del_init(&bo->swap);
- kref_put(&bo->list_kref, ttm_bo_ref_bug);
notify = true;
}
if (!list_empty(&bo->lru)) {
list_del_init(&bo->lru);
- kref_put(&bo->list_kref, ttm_bo_ref_bug);
notify = true;
}
@@ -372,14 +341,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
}
moved:
- if (bo->evicted) {
- if (bdev->driver->invalidate_caches) {
- ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
- if (ret)
- pr_err("Can not flush read caches\n");
- }
- bo->evicted = false;
- }
+ bo->evicted = false;
if (bo->mem.mm_node)
bo->offset = (bo->mem.start << PAGE_SHIFT) +
@@ -428,92 +390,49 @@ static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
BUG_ON(!dma_resv_trylock(&bo->base._resv));
r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv);
+ dma_resv_unlock(&bo->base._resv);
if (r)
- dma_resv_unlock(&bo->base._resv);
+ return r;
+
+ if (bo->type != ttm_bo_type_sg) {
+ /* This works because the BO is about to be destroyed and nobody
+ * reference it any more. The only tricky case is the trylock on
+ * the resv object while holding the lru_lock.
+ */
+ spin_lock(&ttm_bo_glob.lru_lock);
+ bo->base.resv = &bo->base._resv;
+ spin_unlock(&ttm_bo_glob.lru_lock);
+ }
return r;
}
static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
{
+ struct dma_resv *resv = &bo->base._resv;
struct dma_resv_list *fobj;
struct dma_fence *fence;
int i;
- fobj = dma_resv_get_list(&bo->base._resv);
- fence = dma_resv_get_excl(&bo->base._resv);
+ rcu_read_lock();
+ fobj = rcu_dereference(resv->fence);
+ fence = rcu_dereference(resv->fence_excl);
if (fence && !fence->ops->signaled)
dma_fence_enable_sw_signaling(fence);
for (i = 0; fobj && i < fobj->shared_count; ++i) {
- fence = rcu_dereference_protected(fobj->shared[i],
- dma_resv_held(bo->base.resv));
+ fence = rcu_dereference(fobj->shared[i]);
if (!fence->ops->signaled)
dma_fence_enable_sw_signaling(fence);
}
-}
-
-static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
-{
- struct ttm_bo_device *bdev = bo->bdev;
- int ret;
-
- ret = ttm_bo_individualize_resv(bo);
- if (ret) {
- /* Last resort, if we fail to allocate memory for the
- * fences block for the BO to become idle
- */
- dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
- 30 * HZ);
- spin_lock(&ttm_bo_glob.lru_lock);
- goto error;
- }
-
- spin_lock(&ttm_bo_glob.lru_lock);
- ret = dma_resv_trylock(bo->base.resv) ? 0 : -EBUSY;
- if (!ret) {
- if (dma_resv_test_signaled_rcu(&bo->base._resv, true)) {
- ttm_bo_del_from_lru(bo);
- spin_unlock(&ttm_bo_glob.lru_lock);
- if (bo->base.resv != &bo->base._resv)
- dma_resv_unlock(&bo->base._resv);
-
- ttm_bo_cleanup_memtype_use(bo);
- dma_resv_unlock(bo->base.resv);
- return;
- }
-
- ttm_bo_flush_all_fences(bo);
-
- /*
- * Make NO_EVICT bos immediately available to
- * shrinkers, now that they are queued for
- * destruction.
- */
- if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
- bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
- ttm_bo_move_to_lru_tail(bo, NULL);
- }
-
- dma_resv_unlock(bo->base.resv);
- }
- if (bo->base.resv != &bo->base._resv)
- dma_resv_unlock(&bo->base._resv);
-
-error:
- kref_get(&bo->list_kref);
- list_add_tail(&bo->ddestroy, &bdev->ddestroy);
- spin_unlock(&ttm_bo_glob.lru_lock);
-
- schedule_delayed_work(&bdev->wq,
- ((HZ / 100) < 1) ? 1 : HZ / 100);
+ rcu_read_unlock();
}
/**
* function ttm_bo_cleanup_refs
- * If bo idle, remove from delayed- and lru lists, and unref.
- * If not idle, do nothing.
+ * If bo idle, remove from lru lists, and unref.
+ * If not idle, block if possible.
*
* Must be called with lru_lock and reservation held, this function
* will drop the lru lock and optionally the reservation lock before returning.
@@ -527,14 +446,9 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
bool interruptible, bool no_wait_gpu,
bool unlock_resv)
{
- struct dma_resv *resv;
+ struct dma_resv *resv = &bo->base._resv;
int ret;
- if (unlikely(list_empty(&bo->ddestroy)))
- resv = bo->base.resv;
- else
- resv = &bo->base._resv;
-
if (dma_resv_test_signaled_rcu(resv, true))
ret = 0;
else
@@ -547,9 +461,8 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
dma_resv_unlock(bo->base.resv);
spin_unlock(&ttm_bo_glob.lru_lock);
- lret = dma_resv_wait_timeout_rcu(resv, true,
- interruptible,
- 30 * HZ);
+ lret = dma_resv_wait_timeout_rcu(resv, true, interruptible,
+ 30 * HZ);
if (lret < 0)
return lret;
@@ -581,14 +494,14 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
ttm_bo_del_from_lru(bo);
list_del_init(&bo->ddestroy);
- kref_put(&bo->list_kref, ttm_bo_ref_bug);
-
spin_unlock(&ttm_bo_glob.lru_lock);
ttm_bo_cleanup_memtype_use(bo);
if (unlock_resv)
dma_resv_unlock(bo->base.resv);
+ ttm_bo_put(bo);
+
return 0;
}
@@ -610,8 +523,9 @@ static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object,
ddestroy);
- kref_get(&bo->list_kref);
list_move_tail(&bo->ddestroy, &removed);
+ if (!ttm_bo_get_unless_zero(bo))
+ continue;
if (remove_all || bo->base.resv != &bo->base._resv) {
spin_unlock(&glob->lru_lock);
@@ -626,7 +540,7 @@ static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
spin_unlock(&glob->lru_lock);
}
- kref_put(&bo->list_kref, ttm_bo_release_list);
+ ttm_bo_put(bo);
spin_lock(&glob->lru_lock);
}
list_splice_tail(&removed, &bdev->ddestroy);
@@ -652,16 +566,69 @@ static void ttm_bo_release(struct kref *kref)
container_of(kref, struct ttm_buffer_object, kref);
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
+ size_t acc_size = bo->acc_size;
+ int ret;
- if (bo->bdev->driver->release_notify)
- bo->bdev->driver->release_notify(bo);
+ if (!bo->deleted) {
+ ret = ttm_bo_individualize_resv(bo);
+ if (ret) {
+ /* Last resort, if we fail to allocate memory for the
+ * fences block for the BO to become idle
+ */
+ dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
+ 30 * HZ);
+ }
- drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
- ttm_mem_io_lock(man, false);
- ttm_mem_io_free_vm(bo);
- ttm_mem_io_unlock(man);
- ttm_bo_cleanup_refs_or_queue(bo);
- kref_put(&bo->list_kref, ttm_bo_release_list);
+ if (bo->bdev->driver->release_notify)
+ bo->bdev->driver->release_notify(bo);
+
+ drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
+ ttm_mem_io_lock(man, false);
+ ttm_mem_io_free_vm(bo);
+ ttm_mem_io_unlock(man);
+ }
+
+ if (!dma_resv_test_signaled_rcu(bo->base.resv, true)) {
+ /* The BO is not idle, resurrect it for delayed destroy */
+ ttm_bo_flush_all_fences(bo);
+ bo->deleted = true;
+
+ spin_lock(&ttm_bo_glob.lru_lock);
+
+ /*
+ * Make NO_EVICT bos immediately available to
+ * shrinkers, now that they are queued for
+ * destruction.
+ */
+ if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
+ bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
+ ttm_bo_del_from_lru(bo);
+ ttm_bo_add_mem_to_lru(bo, &bo->mem);
+ }
+
+ kref_init(&bo->kref);
+ list_add_tail(&bo->ddestroy, &bdev->ddestroy);
+ spin_unlock(&ttm_bo_glob.lru_lock);
+
+ schedule_delayed_work(&bdev->wq,
+ ((HZ / 100) < 1) ? 1 : HZ / 100);
+ return;
+ }
+
+ spin_lock(&ttm_bo_glob.lru_lock);
+ ttm_bo_del_from_lru(bo);
+ list_del(&bo->ddestroy);
+ spin_unlock(&ttm_bo_glob.lru_lock);
+
+ ttm_bo_cleanup_memtype_use(bo);
+
+ BUG_ON(bo->mem.mm_node != NULL);
+ atomic_dec(&ttm_bo_glob.bo_count);
+ dma_fence_put(bo->moving);
+ if (!ttm_bo_uses_embedded_gem_object(bo))
+ dma_resv_fini(&bo->base._resv);
+ bo->destroy(bo);
+ ttm_mem_global_free(&ttm_mem_glob, acc_size);
}
void ttm_bo_put(struct ttm_buffer_object *bo)
@@ -764,8 +731,7 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
if (bo->base.resv == ctx->resv) {
dma_resv_assert_held(bo->base.resv);
- if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT
- || !list_empty(&bo->ddestroy))
+ if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT)
ret = true;
*locked = false;
if (busy)
@@ -846,6 +812,11 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
dma_resv_unlock(bo->base.resv);
continue;
}
+ if (!ttm_bo_get_unless_zero(bo)) {
+ if (locked)
+ dma_resv_unlock(bo->base.resv);
+ continue;
+ }
break;
}
@@ -857,21 +828,19 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
}
if (!bo) {
- if (busy_bo)
- kref_get(&busy_bo->list_kref);
+ if (busy_bo && !ttm_bo_get_unless_zero(busy_bo))
+ busy_bo = NULL;
spin_unlock(&ttm_bo_glob.lru_lock);
ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
if (busy_bo)
- kref_put(&busy_bo->list_kref, ttm_bo_release_list);
+ ttm_bo_put(busy_bo);
return ret;
}
- kref_get(&bo->list_kref);
-
- if (!list_empty(&bo->ddestroy)) {
+ if (bo->deleted) {
ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
ctx->no_wait_gpu, locked);
- kref_put(&bo->list_kref, ttm_bo_release_list);
+ ttm_bo_put(bo);
return ret;
}
@@ -881,7 +850,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
if (locked)
ttm_bo_unreserve(bo);
- kref_put(&bo->list_kref, ttm_bo_release_list);
+ ttm_bo_put(bo);
return ret;
}
@@ -1226,6 +1195,18 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
uint32_t new_flags;
dma_resv_assert_held(bo->base.resv);
+
+ /*
+ * Remove the backing store if no placement is given.
+ */
+ if (!placement->num_placement && !placement->num_busy_placement) {
+ ret = ttm_bo_pipeline_gutting(bo);
+ if (ret)
+ return ret;
+
+ return ttm_tt_create(bo, false);
+ }
+
/*
* Check whether we need to move buffer.
*/
@@ -1293,7 +1274,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
kref_init(&bo->kref);
- kref_init(&bo->list_kref);
INIT_LIST_HEAD(&bo->lru);
INIT_LIST_HEAD(&bo->ddestroy);
INIT_LIST_HEAD(&bo->swap);
@@ -1813,11 +1793,18 @@ int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
spin_lock(&glob->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
list_for_each_entry(bo, &glob->swap_lru[i], swap) {
- if (ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
- NULL)) {
- ret = 0;
- break;
+ if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
+ NULL))
+ continue;
+
+ if (!ttm_bo_get_unless_zero(bo)) {
+ if (locked)
+ dma_resv_unlock(bo->base.resv);
+ continue;
}
+
+ ret = 0;
+ break;
}
if (!ret)
break;
@@ -1828,11 +1815,9 @@ int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
return ret;
}
- kref_get(&bo->list_kref);
-
- if (!list_empty(&bo->ddestroy)) {
+ if (bo->deleted) {
ret = ttm_bo_cleanup_refs(bo, false, false, locked);
- kref_put(&bo->list_kref, ttm_bo_release_list);
+ ttm_bo_put(bo);
return ret;
}
@@ -1886,7 +1871,7 @@ out:
*/
if (locked)
dma_resv_unlock(bo->base.resv);
- kref_put(&bo->list_kref, ttm_bo_release_list);
+ ttm_bo_put(bo);
return ret;
}
EXPORT_SYMBOL(ttm_bo_swapout);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 2b0e5a088da0..52d2b71f1588 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -218,7 +218,7 @@ static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *m
if (mem->placement & TTM_PL_FLAG_WC)
addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
else
- addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
+ addr = ioremap(mem->bus.base + mem->bus.offset, mem->bus.size);
if (!addr) {
(void) ttm_mem_io_lock(man, false);
ttm_mem_io_free(bdev, mem);
@@ -507,14 +507,14 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
fbo->base.moving = NULL;
drm_vma_node_reset(&fbo->base.base.vma_node);
- kref_init(&fbo->base.list_kref);
kref_init(&fbo->base.kref);
fbo->base.destroy = &ttm_transfered_destroy;
fbo->base.acc_size = 0;
- if (bo->base.resv == &bo->base._resv)
+ if (bo->type != ttm_bo_type_sg)
fbo->base.base.resv = &fbo->base.base._resv;
dma_resv_init(&fbo->base.base._resv);
+ fbo->base.base.dev = NULL;
ret = dma_resv_trylock(&fbo->base.base._resv);
WARN_ON(!ret);
@@ -564,7 +564,7 @@ static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
size);
else
- map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
+ map->virtual = ioremap(bo->mem.bus.base + bo->mem.bus.offset + offset,
size);
}
return (!map->virtual) ? -ENOMEM : 0;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index bf876faea592..faefaaef7909 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -604,7 +604,7 @@ static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
p = pool->name;
for (i = 0; i < ARRAY_SIZE(t); i++) {
if (type & t[i]) {
- p += snprintf(p, sizeof(pool->name) - (p - pool->name),
+ p += scnprintf(p, sizeof(pool->name) - (p - pool->name),
"%s", n[i]);
}
}
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index e9671d38b4a0..0afdfb0d1fe1 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -109,7 +109,6 @@ static const struct drm_connector_helper_funcs udl_connector_helper_funcs = {
};
static const struct drm_connector_funcs udl_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
.reset = drm_atomic_helper_connector_reset,
.detect = udl_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index 22af17959053..d59ebac70b15 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -375,8 +375,6 @@ udl_simple_display_pipe_enable(struct drm_simple_display_pipe *pipe,
char *wrptr;
int color_depth = UDL_COLOR_DEPTH_16BPP;
- crtc_state->no_vblank = true;
-
buf = (char *)udl->mode_buf;
/* This first section has to do with setting the base address on the
@@ -428,14 +426,6 @@ udl_simple_display_pipe_disable(struct drm_simple_display_pipe *pipe)
udl_submit_urb(dev, urb, buf - (char *)urb->transfer_buffer);
}
-static int
-udl_simple_display_pipe_check(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *plane_state,
- struct drm_crtc_state *crtc_state)
-{
- return 0;
-}
-
static void
udl_simple_display_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_plane_state)
@@ -457,7 +447,6 @@ struct drm_simple_display_pipe_funcs udl_simple_display_pipe_funcs = {
.mode_valid = udl_simple_display_pipe_mode_valid,
.enable = udl_simple_display_pipe_enable,
.disable = udl_simple_display_pipe_disable,
- .check = udl_simple_display_pipe_check,
.update = udl_simple_display_pipe_update,
.prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
};
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index 9a35c555ec52..ac2603334587 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -254,27 +254,42 @@ struct v3d_csd_job {
};
/**
- * _wait_for - magic (register) wait macro
+ * __wait_for - magic wait macro
*
- * Does the right thing for modeset paths when run under kdgb or similar atomic
- * contexts. Note that it's important that we check the condition again after
- * having timed out, since the timeout could be due to preemption or similar and
- * we've never had a chance to check the condition before the timeout.
+ * Macro to help avoid open coding check/wait/timeout patterns. Note that it's
+ * important that we check the condition again after having timed out, since the
+ * timeout could be due to preemption or similar and we've never had a chance to
+ * check the condition before the timeout.
*/
-#define wait_for(COND, MS) ({ \
- unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
- int ret__ = 0; \
- while (!(COND)) { \
- if (time_after(jiffies, timeout__)) { \
- if (!(COND)) \
- ret__ = -ETIMEDOUT; \
+#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
+ const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
+ long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \
+ int ret__; \
+ might_sleep(); \
+ for (;;) { \
+ const bool expired__ = ktime_after(ktime_get_raw(), end__); \
+ OP; \
+ /* Guarantee COND check prior to timeout */ \
+ barrier(); \
+ if (COND) { \
+ ret__ = 0; \
break; \
} \
- msleep(1); \
+ if (expired__) { \
+ ret__ = -ETIMEDOUT; \
+ break; \
+ } \
+ usleep_range(wait__, wait__ * 2); \
+ if (wait__ < (Wmax)) \
+ wait__ <<= 1; \
} \
ret__; \
})
+#define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \
+ (Wmax))
+#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000)
+
static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
{
/* nsecs_to_jiffies64() does not guard against overflow */
diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c
index 19612132c8a3..0883a435e62b 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_mode.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c
@@ -18,7 +18,6 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_vblank.h>
#include "hgsmi_channels.h"
#include "vbox_drv.h"
@@ -226,17 +225,6 @@ static void vbox_crtc_atomic_disable(struct drm_crtc *crtc,
static void vbox_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
- struct drm_pending_vblank_event *event;
- unsigned long flags;
-
- if (crtc->state && crtc->state->event) {
- event = crtc->state->event;
- crtc->state->event = NULL;
-
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
- drm_crtc_send_vblank_event(crtc, event);
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- }
}
static const struct drm_crtc_helper_funcs vbox_crtc_helper_funcs = {
@@ -838,6 +826,7 @@ static int vbox_connector_init(struct drm_device *dev,
static const struct drm_mode_config_funcs vbox_mode_funcs = {
.fb_create = drm_gem_fb_create_with_dirty,
+ .mode_valid = drm_vram_helper_mode_valid,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
diff --git a/drivers/gpu/drm/vboxvideo/vboxvideo.h b/drivers/gpu/drm/vboxvideo/vboxvideo.h
index 0592004f71aa..a5de40fe1a76 100644
--- a/drivers/gpu/drm/vboxvideo/vboxvideo.h
+++ b/drivers/gpu/drm/vboxvideo/vboxvideo.h
@@ -138,7 +138,7 @@ struct vbva_buffer {
u32 data_len;
/* variable size for the rest of the vbva_buffer area in VRAM. */
- u8 data[0];
+ u8 data[];
} __packed;
#define VBVA_MAX_RECORD_SIZE (128 * 1024 * 1024)
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index b00e20f5ce05..1208258ad3b2 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -84,13 +84,14 @@ static const struct debugfs_reg32 crtc_regs[] = {
VC4_REG32(PV_HACT_ACT),
};
-bool vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
- bool in_vblank_irq, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode)
+static bool vc4_crtc_get_scanout_position(struct drm_crtc *crtc,
+ bool in_vblank_irq,
+ int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
{
+ struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct drm_crtc *crtc = drm_crtc_from_index(dev, crtc_id);
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
u32 val;
int fifo_lines;
@@ -1030,6 +1031,7 @@ static const struct drm_crtc_funcs vc4_crtc_funcs = {
.gamma_set = drm_atomic_helper_legacy_gamma_set,
.enable_vblank = vc4_enable_vblank,
.disable_vblank = vc4_disable_vblank,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = {
@@ -1039,6 +1041,7 @@ static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = {
.atomic_flush = vc4_crtc_atomic_flush,
.atomic_enable = vc4_crtc_atomic_enable,
.atomic_disable = vc4_crtc_atomic_disable,
+ .get_scanout_position = vc4_crtc_get_scanout_position,
};
static const struct vc4_crtc_data pv0_data = {
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
index c586325de2a5..6dfede03396e 100644
--- a/drivers/gpu/drm/vc4/vc4_dpi.c
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -252,7 +252,7 @@ static int vc4_dpi_init_bridge(struct vc4_dpi *dpi)
bridge = drm_panel_bridge_add_typed(panel,
DRM_MODE_CONNECTOR_DPI);
- return drm_bridge_attach(dpi->encoder, bridge, NULL);
+ return drm_bridge_attach(dpi->encoder, bridge, NULL, 0);
}
static int vc4_dpi_bind(struct device *dev, struct device *master, void *data)
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 5e6fb6c2307f..76f93b662766 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -190,9 +190,6 @@ static struct drm_driver vc4_drm_driver = {
.irq_postinstall = vc4_irq_postinstall,
.irq_uninstall = vc4_irq_uninstall,
- .get_scanout_position = vc4_crtc_get_scanoutpos,
- .get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
-
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = vc4_debugfs_init,
#endif
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 6627b20c99e9..139d25a8328e 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -65,7 +65,7 @@ struct vc4_perfmon {
* Note that counter values can't be reset, but you can fake a reset by
* destroying the perfmon and creating a new one.
*/
- u64 counters[0];
+ u64 counters[];
};
struct vc4_dev {
@@ -677,32 +677,41 @@ struct vc4_validated_shader_info {
};
/**
- * _wait_for - magic (register) wait macro
+ * __wait_for - magic wait macro
*
- * Does the right thing for modeset paths when run under kdgb or similar atomic
- * contexts. Note that it's important that we check the condition again after
- * having timed out, since the timeout could be due to preemption or similar and
- * we've never had a chance to check the condition before the timeout.
+ * Macro to help avoid open coding check/wait/timeout patterns. Note that it's
+ * important that we check the condition again after having timed out, since the
+ * timeout could be due to preemption or similar and we've never had a chance to
+ * check the condition before the timeout.
*/
-#define _wait_for(COND, MS, W) ({ \
- unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
- int ret__ = 0; \
- while (!(COND)) { \
- if (time_after(jiffies, timeout__)) { \
- if (!(COND)) \
- ret__ = -ETIMEDOUT; \
+#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
+ const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
+ long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \
+ int ret__; \
+ might_sleep(); \
+ for (;;) { \
+ const bool expired__ = ktime_after(ktime_get_raw(), end__); \
+ OP; \
+ /* Guarantee COND check prior to timeout */ \
+ barrier(); \
+ if (COND) { \
+ ret__ = 0; \
break; \
} \
- if (W && drm_can_sleep()) { \
- msleep(W); \
- } else { \
- cpu_relax(); \
+ if (expired__) { \
+ ret__ = -ETIMEDOUT; \
+ break; \
} \
+ usleep_range(wait__, wait__ * 2); \
+ if (wait__ < (Wmax)) \
+ wait__ <<= 1; \
} \
ret__; \
})
-#define wait_for(COND, MS) _wait_for(COND, MS, 1)
+#define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \
+ (Wmax))
+#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000)
/* vc4_bo.c */
struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size);
@@ -743,10 +752,6 @@ void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo);
/* vc4_crtc.c */
extern struct platform_driver vc4_crtc_driver;
-bool vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
- bool in_vblank_irq, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode);
void vc4_crtc_handle_vblank(struct vc4_crtc *crtc);
void vc4_crtc_txp_armed(struct drm_crtc_state *state);
void vc4_crtc_get_margins(struct drm_crtc_state *state,
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index fd8a2eb60505..d99b1d526651 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -1619,7 +1619,7 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
DRM_MODE_ENCODER_DSI, NULL);
drm_encoder_helper_add(dsi->encoder, &vc4_dsi_encoder_helper_funcs);
- ret = drm_bridge_attach(dsi->encoder, dsi->bridge, NULL);
+ ret = drm_bridge_attach(dsi->encoder, dsi->bridge, NULL, 0);
if (ret) {
dev_err(dev, "bridge attach failed: %d\n", ret);
return ret;
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 4934127f0d76..91e408f7a56e 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -139,7 +139,7 @@ static enum vc4_scaling_mode vc4_get_scaling_mode(u32 src, u32 dst)
static bool plane_enabled(struct drm_plane_state *state)
{
- return state->fb && state->crtc;
+ return state->fb && !WARN_ON(!state->crtc);
}
static struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane)
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 5bd60ded3d81..909eba43664a 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -196,9 +196,10 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
return ERR_CAST(obj);
ret = drm_gem_handle_create(file, &obj->base, handle);
- drm_gem_object_put_unlocked(&obj->base);
- if (ret)
+ if (ret) {
+ drm_gem_object_put_unlocked(&obj->base);
return ERR_PTR(ret);
+ }
return &obj->base;
}
@@ -221,7 +222,9 @@ static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
args->size = gem_object->size;
args->pitch = pitch;
- DRM_DEBUG("Created object of size %lld\n", size);
+ drm_gem_object_put_unlocked(gem_object);
+
+ DRM_DEBUG("Created object of size %llu\n", args->size);
return 0;
}
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index d13a3897506e..551fa31629af 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -188,8 +188,8 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
kfree(vsg->desc_pages);
/* fall through */
case dr_via_pages_locked:
- put_user_pages_dirty_lock(vsg->pages, vsg->num_pages,
- (vsg->direction == DMA_FROM_DEVICE));
+ unpin_user_pages_dirty_lock(vsg->pages, vsg->num_pages,
+ (vsg->direction == DMA_FROM_DEVICE));
/* fall through */
case dr_via_pages_alloc:
vfree(vsg->pages);
@@ -239,7 +239,7 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
vsg->pages = vzalloc(array_size(sizeof(struct page *), vsg->num_pages));
if (NULL == vsg->pages)
return -ENOMEM;
- ret = get_user_pages_fast((unsigned long)xfer->mem_addr,
+ ret = pin_user_pages_fast((unsigned long)xfer->mem_addr,
vsg->num_pages,
vsg->direction == DMA_FROM_DEVICE ? FOLL_WRITE : 0,
vsg->pages);
diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
index 5156e6b279db..e27120d512b0 100644
--- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c
+++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
@@ -47,6 +47,7 @@ static int virtio_gpu_features(struct seq_file *m, void *data)
virtio_add_bool(m, "virgl", vgdev->has_virgl_3d);
virtio_add_bool(m, "edid", vgdev->has_edid);
+ virtio_add_bool(m, "indirect", vgdev->has_indirect);
virtio_add_int(m, "cap sets", vgdev->num_capsets);
virtio_add_int(m, "scanouts", vgdev->num_scanouts);
return 0;
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 0966208ec30d..2b7e6ae65546 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -30,7 +30,6 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_vblank.h>
#include "virtgpu_drv.h"
@@ -91,6 +90,7 @@ static void virtio_gpu_crtc_mode_set_nofb(struct drm_crtc *crtc)
virtio_gpu_cmd_set_scanout(vgdev, output->index, 0,
crtc->mode.hdisplay,
crtc->mode.vdisplay, 0, 0);
+ virtio_gpu_notify(vgdev);
}
static void virtio_gpu_crtc_atomic_enable(struct drm_crtc *crtc,
@@ -109,6 +109,7 @@ static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc,
struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
virtio_gpu_cmd_set_scanout(vgdev, output->index, 0, 0, 0, 0, 0);
+ virtio_gpu_notify(vgdev);
output->enabled = false;
}
@@ -121,13 +122,6 @@ static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc,
static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
- unsigned long flags;
-
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
- if (crtc->state->event)
- drm_crtc_send_vblank_event(crtc, crtc->state->event);
- crtc->state->event = NULL;
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
}
static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = {
@@ -332,6 +326,7 @@ static void vgdev_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_commit_modeset_enables(dev, state);
drm_atomic_helper_commit_planes(dev, state, 0);
+ drm_atomic_helper_fake_vblank(state);
drm_atomic_helper_commit_hw_done(state);
drm_atomic_helper_wait_for_vblanks(dev, state);
@@ -375,6 +370,5 @@ void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev)
for (i = 0 ; i < vgdev->num_scanouts; ++i)
kfree(vgdev->outputs[i].edid);
- drm_atomic_helper_shutdown(vgdev->ddev);
drm_mode_config_cleanup(vgdev->ddev);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 8cf27af3ad53..ab4bed78e656 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -31,6 +31,7 @@
#include <linux/pci.h>
#include <drm/drm.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
@@ -135,7 +136,8 @@ static void virtio_gpu_remove(struct virtio_device *vdev)
{
struct drm_device *dev = vdev->priv;
- drm_dev_unregister(dev);
+ drm_dev_unplug(dev);
+ drm_atomic_helper_shutdown(dev);
virtio_gpu_deinit(dev);
drm_dev_put(dev);
}
@@ -214,4 +216,6 @@ static struct drm_driver driver = {
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
+
+ .release = virtio_gpu_release,
};
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 7e69c06e168e..c1824bdf2418 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -32,6 +32,7 @@
#include <linux/virtio_gpu.h>
#include <drm/drm_atomic.h>
+#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
@@ -68,15 +69,21 @@ struct virtio_gpu_object_params {
struct virtio_gpu_object {
struct drm_gem_shmem_object base;
uint32_t hw_res_handle;
-
- struct sg_table *pages;
- uint32_t mapped;
bool dumb;
bool created;
};
#define gem_to_virtio_gpu_obj(gobj) \
container_of((gobj), struct virtio_gpu_object, base.base)
+struct virtio_gpu_object_shmem {
+ struct virtio_gpu_object base;
+ struct sg_table *pages;
+ uint32_t mapped;
+};
+
+#define to_virtio_gpu_shmem(virtio_gpu_object) \
+ container_of((virtio_gpu_object), struct virtio_gpu_object_shmem, base)
+
struct virtio_gpu_object_array {
struct ww_acquire_ctx ticket;
struct list_head next;
@@ -114,6 +121,7 @@ struct virtio_gpu_vbuffer {
char *resp_buf;
int resp_size;
virtio_gpu_resp_cb resp_cb;
+ void *resp_cb_data;
struct virtio_gpu_object_array *objs;
struct list_head list;
@@ -175,10 +183,8 @@ struct virtio_gpu_device {
struct virtio_gpu_queue ctrlq;
struct virtio_gpu_queue cursorq;
struct kmem_cache *vbufs;
- bool vqs_ready;
- bool disable_notify;
- bool pending_notify;
+ atomic_t pending_commands;
struct ida resource_ida;
@@ -193,6 +199,7 @@ struct virtio_gpu_device {
bool has_virgl_3d;
bool has_edid;
+ bool has_indirect;
struct work_struct config_changed_work;
@@ -207,6 +214,8 @@ struct virtio_gpu_device {
struct virtio_gpu_fpriv {
uint32_t ctx_id;
+ bool context_created;
+ struct mutex context_lock;
};
/* virtio_ioctl.c */
@@ -216,6 +225,7 @@ extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
/* virtio_kms.c */
int virtio_gpu_init(struct drm_device *dev);
void virtio_gpu_deinit(struct drm_device *dev);
+void virtio_gpu_release(struct drm_device *dev);
int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file);
void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file);
@@ -262,7 +272,7 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
- uint32_t resource_id);
+ struct virtio_gpu_object *bo);
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
uint64_t offset,
uint32_t width, uint32_t height,
@@ -279,9 +289,8 @@ void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
uint32_t x, uint32_t y);
int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj,
- struct virtio_gpu_fence *fence);
-void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_object *obj);
+ struct virtio_gpu_mem_entry *ents,
+ unsigned int nents);
int virtio_gpu_attach_status_page(struct virtio_gpu_device *vgdev);
int virtio_gpu_detach_status_page(struct virtio_gpu_device *vgdev);
void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
@@ -332,8 +341,7 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work);
void virtio_gpu_dequeue_cursor_func(struct work_struct *work);
void virtio_gpu_dequeue_fence_func(struct work_struct *work);
-void virtio_gpu_disable_notify(struct virtio_gpu_device *vgdev);
-void virtio_gpu_enable_notify(struct virtio_gpu_device *vgdev);
+void virtio_gpu_notify(struct virtio_gpu_device *vgdev);
/* virtio_gpu_display.c */
void virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev);
@@ -355,12 +363,16 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev,
u64 last_seq);
/* virtio_gpu_object */
+void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo);
struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
size_t size);
int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_params *params,
struct virtio_gpu_object **bo_ptr,
struct virtio_gpu_fence *fence);
+
+bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo);
+
/* virtgpu_prime.c */
struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
struct drm_device *dev, struct dma_buf_attachment *attach,
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index 0a2b62279647..0d6152c99a27 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -123,6 +123,7 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id,
objs);
+ virtio_gpu_notify(vgdev);
return 0;
}
@@ -143,6 +144,7 @@ void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
virtio_gpu_cmd_context_detach_resource(vgdev, vfpriv->ctx_id,
objs);
+ virtio_gpu_notify(vgdev);
}
struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents)
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 205ec4abae2b..336cc9143205 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -33,13 +33,34 @@
#include "virtgpu_drv.h"
+static void virtio_gpu_create_context(struct drm_device *dev,
+ struct drm_file *file)
+{
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
+ char dbgname[TASK_COMM_LEN];
+
+ mutex_lock(&vfpriv->context_lock);
+ if (vfpriv->context_created)
+ goto out_unlock;
+
+ get_task_comm(dbgname, current);
+ virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
+ strlen(dbgname), dbgname);
+ virtio_gpu_notify(vgdev);
+ vfpriv->context_created = true;
+
+out_unlock:
+ mutex_unlock(&vfpriv->context_lock);
+}
+
static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct drm_virtgpu_map *virtio_gpu_map = data;
- return virtio_gpu_mode_dumb_mmap(file_priv, vgdev->ddev,
+ return virtio_gpu_mode_dumb_mmap(file, vgdev->ddev,
virtio_gpu_map->handle,
&virtio_gpu_map->offset);
}
@@ -51,11 +72,11 @@ static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
* VIRTIO_GPUReleaseInfo struct (first XXX bytes)
*/
static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
- struct drm_file *drm_file)
+ struct drm_file *file)
{
struct drm_virtgpu_execbuffer *exbuf = data;
struct virtio_gpu_device *vgdev = dev->dev_private;
- struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
+ struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
struct virtio_gpu_fence *out_fence;
int ret;
uint32_t *bo_handles = NULL;
@@ -74,6 +95,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
exbuf->fence_fd = -1;
+ virtio_gpu_create_context(dev, file);
if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
struct dma_fence *in_fence;
@@ -116,7 +138,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
goto out_unused_fd;
}
- buflist = virtio_gpu_array_from_handles(drm_file, bo_handles,
+ buflist = virtio_gpu_array_from_handles(file, bo_handles,
exbuf->num_bo_handles);
if (!buflist) {
ret = -ENOENT;
@@ -126,22 +148,22 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
bo_handles = NULL;
}
- if (buflist) {
- ret = virtio_gpu_array_lock_resv(buflist);
- if (ret)
- goto out_unused_fd;
- }
-
buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
if (IS_ERR(buf)) {
ret = PTR_ERR(buf);
- goto out_unresv;
+ goto out_unused_fd;
+ }
+
+ if (buflist) {
+ ret = virtio_gpu_array_lock_resv(buflist);
+ if (ret)
+ goto out_memdup;
}
out_fence = virtio_gpu_fence_alloc(vgdev);
if(!out_fence) {
ret = -ENOMEM;
- goto out_memdup;
+ goto out_unresv;
}
if (out_fence_fd >= 0) {
@@ -158,13 +180,14 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
vfpriv->ctx_id, buflist, out_fence);
+ virtio_gpu_notify(vgdev);
return 0;
-out_memdup:
- kvfree(buf);
out_unresv:
if (buflist)
virtio_gpu_array_unlock_resv(buflist);
+out_memdup:
+ kvfree(buf);
out_unused_fd:
kvfree(bo_handles);
if (buflist)
@@ -177,7 +200,7 @@ out_unused_fd:
}
static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct drm_virtgpu_getparam *param = data;
@@ -200,7 +223,7 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
}
static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct drm_virtgpu_resource_create *rc = data;
@@ -211,7 +234,17 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
uint32_t handle = 0;
struct virtio_gpu_object_params params = { 0 };
- if (vgdev->has_virgl_3d == false) {
+ if (vgdev->has_virgl_3d) {
+ virtio_gpu_create_context(dev, file);
+ params.virgl = true;
+ params.target = rc->target;
+ params.bind = rc->bind;
+ params.depth = rc->depth;
+ params.array_size = rc->array_size;
+ params.last_level = rc->last_level;
+ params.nr_samples = rc->nr_samples;
+ params.flags = rc->flags;
+ } else {
if (rc->depth > 1)
return -EINVAL;
if (rc->nr_samples > 1)
@@ -228,16 +261,6 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
params.width = rc->width;
params.height = rc->height;
params.size = rc->size;
- if (vgdev->has_virgl_3d) {
- params.virgl = true;
- params.target = rc->target;
- params.bind = rc->bind;
- params.depth = rc->depth;
- params.array_size = rc->array_size;
- params.last_level = rc->last_level;
- params.nr_samples = rc->nr_samples;
- params.flags = rc->flags;
- }
/* allocate a single page size object */
if (params.size == 0)
params.size = PAGE_SIZE;
@@ -251,7 +274,7 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
return ret;
obj = &qobj->base.base;
- ret = drm_gem_handle_create(file_priv, obj, &handle);
+ ret = drm_gem_handle_create(file, obj, &handle);
if (ret) {
drm_gem_object_release(obj);
return ret;
@@ -264,13 +287,13 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
}
static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_virtgpu_resource_info *ri = data;
struct drm_gem_object *gobj = NULL;
struct virtio_gpu_object *qobj = NULL;
- gobj = drm_gem_object_lookup(file_priv, ri->bo_handle);
+ gobj = drm_gem_object_lookup(file, ri->bo_handle);
if (gobj == NULL)
return -ENOENT;
@@ -297,6 +320,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
if (vgdev->has_virgl_3d == false)
return -ENOSYS;
+ virtio_gpu_create_context(dev, file);
objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
if (objs == NULL)
return -ENOENT;
@@ -314,6 +338,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
(vgdev, vfpriv->ctx_id, offset, args->level,
&args->box, objs, fence);
dma_fence_put(&fence->f);
+ virtio_gpu_notify(vgdev);
return 0;
err_unlock:
@@ -344,6 +369,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
args->box.w, args->box.h, args->box.x, args->box.y,
objs, NULL);
} else {
+ virtio_gpu_create_context(dev, file);
ret = virtio_gpu_array_lock_resv(objs);
if (ret != 0)
goto err_put_free;
@@ -359,6 +385,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
args->level, &args->box, objs, fence);
dma_fence_put(&fence->f);
}
+ virtio_gpu_notify(vgdev);
return 0;
err_unlock:
@@ -445,6 +472,7 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
/* not in cache - need to talk to hw */
virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
&cache_ent);
+ virtio_gpu_notify(vgdev);
copy_exit:
ret = wait_event_timeout(vgdev->resp_wq,
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 2f5773e43557..023a030ca7b9 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -44,6 +44,7 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)
if (vgdev->has_edid)
virtio_gpu_cmd_get_edids(vgdev);
virtio_gpu_cmd_get_display_info(vgdev);
+ virtio_gpu_notify(vgdev);
drm_helper_hpd_irq_event(vgdev->ddev);
events_clear |= VIRTIO_GPU_EVENT_DISPLAY;
}
@@ -51,22 +52,11 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)
events_clear, &events_clear);
}
-static int virtio_gpu_context_create(struct virtio_gpu_device *vgdev,
- uint32_t nlen, const char *name)
-{
- int handle = ida_alloc(&vgdev->ctx_id_ida, GFP_KERNEL);
-
- if (handle < 0)
- return handle;
- handle += 1;
- virtio_gpu_cmd_context_create(vgdev, handle, nlen, name);
- return handle;
-}
-
static void virtio_gpu_context_destroy(struct virtio_gpu_device *vgdev,
uint32_t ctx_id)
{
virtio_gpu_cmd_context_destroy(vgdev, ctx_id);
+ virtio_gpu_notify(vgdev);
ida_free(&vgdev->ctx_id_ida, ctx_id - 1);
}
@@ -92,6 +82,7 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
}
for (i = 0; i < num_capsets; i++) {
virtio_gpu_cmd_get_capset_info(vgdev, i);
+ virtio_gpu_notify(vgdev);
ret = wait_event_timeout(vgdev->resp_wq,
vgdev->capsets[i].id > 0, 5 * HZ);
if (ret == 0) {
@@ -159,6 +150,9 @@ int virtio_gpu_init(struct drm_device *dev)
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) {
vgdev->has_edid = true;
}
+ if (virtio_has_feature(vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC)) {
+ vgdev->has_indirect = true;
+ }
DRM_INFO("features: %cvirgl %cedid\n",
vgdev->has_virgl_3d ? '+' : '-',
@@ -196,13 +190,13 @@ int virtio_gpu_init(struct drm_device *dev)
virtio_gpu_modeset_init(vgdev);
virtio_device_ready(vgdev->vdev);
- vgdev->vqs_ready = true;
if (num_capsets)
virtio_gpu_get_capsets(vgdev, num_capsets);
if (vgdev->has_edid)
virtio_gpu_cmd_get_edids(vgdev);
virtio_gpu_cmd_get_display_info(vgdev);
+ virtio_gpu_notify(vgdev);
wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending,
5 * HZ);
return 0;
@@ -231,12 +225,16 @@ void virtio_gpu_deinit(struct drm_device *dev)
struct virtio_gpu_device *vgdev = dev->dev_private;
flush_work(&vgdev->obj_free_work);
- vgdev->vqs_ready = false;
flush_work(&vgdev->ctrlq.dequeue_work);
flush_work(&vgdev->cursorq.dequeue_work);
flush_work(&vgdev->config_changed_work);
vgdev->vdev->config->reset(vgdev->vdev);
vgdev->vdev->config->del_vqs(vgdev->vdev);
+}
+
+void virtio_gpu_release(struct drm_device *dev)
+{
+ struct virtio_gpu_device *vgdev = dev->dev_private;
virtio_gpu_modeset_fini(vgdev);
virtio_gpu_free_vbufs(vgdev);
@@ -249,8 +247,7 @@ int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv;
- int id;
- char dbgname[TASK_COMM_LEN];
+ int handle;
/* can't create contexts without 3d renderer */
if (!vgdev->has_virgl_3d)
@@ -261,14 +258,15 @@ int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
if (!vfpriv)
return -ENOMEM;
- get_task_comm(dbgname, current);
- id = virtio_gpu_context_create(vgdev, strlen(dbgname), dbgname);
- if (id < 0) {
+ mutex_init(&vfpriv->context_lock);
+
+ handle = ida_alloc(&vgdev->ctx_id_ida, GFP_KERNEL);
+ if (handle < 0) {
kfree(vfpriv);
- return id;
+ return handle;
}
- vfpriv->ctx_id = id;
+ vfpriv->ctx_id = handle + 1;
file->driver_priv = vfpriv;
return 0;
}
@@ -284,6 +282,7 @@ void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file)
vfpriv = file->driver_priv;
virtio_gpu_context_destroy(vgdev, vfpriv->ctx_id);
+ mutex_destroy(&vfpriv->context_lock);
kfree(vfpriv);
file->driver_priv = NULL;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index 017a9e0fc3bb..2bfb13d1932e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -23,6 +23,7 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/dma-mapping.h>
#include <linux/moduleparam.h>
#include "virtgpu_drv.h"
@@ -42,8 +43,8 @@ static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
* "f91a9dd35715 Fix unlinking resources from hash
* table." (Feb 2019) fixes the bug.
*/
- static int handle;
- handle++;
+ static atomic_t seqno = ATOMIC_INIT(0);
+ int handle = atomic_inc_return(&seqno);
*resid = handle + 1;
} else {
int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
@@ -61,21 +62,46 @@ static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t
}
}
-static void virtio_gpu_free_object(struct drm_gem_object *obj)
+void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
{
- struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
- if (bo->pages)
- virtio_gpu_object_detach(vgdev, bo);
- if (bo->created)
- virtio_gpu_cmd_unref_resource(vgdev, bo->hw_res_handle);
virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
+ if (virtio_gpu_is_shmem(bo)) {
+ struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
+
+ if (shmem->pages) {
+ if (shmem->mapped) {
+ dma_unmap_sg(vgdev->vdev->dev.parent,
+ shmem->pages->sgl, shmem->mapped,
+ DMA_TO_DEVICE);
+ shmem->mapped = 0;
+ }
+
+ sg_free_table(shmem->pages);
+ shmem->pages = NULL;
+ drm_gem_shmem_unpin(&bo->base.base);
+ }
+
+ drm_gem_shmem_free_object(&bo->base.base);
+ }
+}
+
+static void virtio_gpu_free_object(struct drm_gem_object *obj)
+{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+ struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
- drm_gem_shmem_free_object(obj);
+ if (bo->created) {
+ virtio_gpu_cmd_unref_resource(vgdev, bo);
+ virtio_gpu_notify(vgdev);
+ /* completion handler calls virtio_gpu_cleanup_object() */
+ return;
+ }
+ virtio_gpu_cleanup_object(bo);
}
-static const struct drm_gem_object_funcs virtio_gpu_gem_funcs = {
+static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = {
.free = virtio_gpu_free_object,
.open = virtio_gpu_gem_object_open,
.close = virtio_gpu_gem_object_close,
@@ -86,9 +112,14 @@ static const struct drm_gem_object_funcs virtio_gpu_gem_funcs = {
.get_sg_table = drm_gem_shmem_get_sg_table,
.vmap = drm_gem_shmem_vmap,
.vunmap = drm_gem_shmem_vunmap,
- .mmap = &drm_gem_shmem_mmap,
+ .mmap = drm_gem_shmem_mmap,
};
+bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo)
+{
+ return bo->base.base.funcs == &virtio_gpu_shmem_funcs;
+}
+
struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
size_t size)
{
@@ -98,10 +129,58 @@ struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
if (!bo)
return NULL;
- bo->base.base.funcs = &virtio_gpu_gem_funcs;
+ bo->base.base.funcs = &virtio_gpu_shmem_funcs;
+ bo->base.map_cached = true;
return &bo->base.base;
}
+static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *bo,
+ struct virtio_gpu_mem_entry **ents,
+ unsigned int *nents)
+{
+ bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+ struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
+ struct scatterlist *sg;
+ int si, ret;
+
+ ret = drm_gem_shmem_pin(&bo->base.base);
+ if (ret < 0)
+ return -EINVAL;
+
+ shmem->pages = drm_gem_shmem_get_sg_table(&bo->base.base);
+ if (!shmem->pages) {
+ drm_gem_shmem_unpin(&bo->base.base);
+ return -EINVAL;
+ }
+
+ if (use_dma_api) {
+ shmem->mapped = dma_map_sg(vgdev->vdev->dev.parent,
+ shmem->pages->sgl,
+ shmem->pages->nents,
+ DMA_TO_DEVICE);
+ *nents = shmem->mapped;
+ } else {
+ *nents = shmem->pages->nents;
+ }
+
+ *ents = kmalloc_array(*nents, sizeof(struct virtio_gpu_mem_entry),
+ GFP_KERNEL);
+ if (!(*ents)) {
+ DRM_ERROR("failed to allocate ent list\n");
+ return -ENOMEM;
+ }
+
+ for_each_sg(shmem->pages->sgl, sg, *nents, si) {
+ (*ents)[si].addr = cpu_to_le64(use_dma_api
+ ? sg_dma_address(sg)
+ : sg_phys(sg));
+ (*ents)[si].length = cpu_to_le32(sg->length);
+ (*ents)[si].padding = 0;
+ }
+ return 0;
+}
+
int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_params *params,
struct virtio_gpu_object **bo_ptr,
@@ -110,6 +189,8 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_array *objs = NULL;
struct drm_gem_shmem_object *shmem_obj;
struct virtio_gpu_object *bo;
+ struct virtio_gpu_mem_entry *ents;
+ unsigned int nents;
int ret;
*bo_ptr = NULL;
@@ -146,12 +227,19 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
objs, fence);
}
- ret = virtio_gpu_object_attach(vgdev, bo, NULL);
+ ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
+ if (ret != 0) {
+ virtio_gpu_free_object(&shmem_obj->base);
+ return ret;
+ }
+
+ ret = virtio_gpu_object_attach(vgdev, bo, ents, nents);
if (ret != 0) {
virtio_gpu_free_object(&shmem_obj->base);
return ret;
}
+ virtio_gpu_notify(vgdev);
*bo_ptr = bo;
return 0;
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index d1c3f5fbfee4..52d24179bcec 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -148,14 +148,13 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
plane->state->src_w >> 16,
plane->state->src_h >> 16,
0, 0);
+ virtio_gpu_notify(vgdev);
return;
}
if (!drm_atomic_helper_damage_merged(old_state, plane->state, &rect))
return;
- virtio_gpu_disable_notify(vgdev);
-
bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]);
if (bo->dumb)
virtio_gpu_update_dumb_bo(vgdev, plane->state, &rect);
@@ -186,8 +185,7 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
rect.y1,
rect.x2 - rect.x1,
rect.y2 - rect.y1);
-
- virtio_gpu_enable_notify(vgdev);
+ virtio_gpu_notify(vgdev);
}
static int virtio_gpu_cursor_prepare_fb(struct drm_plane *plane,
@@ -266,6 +264,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
plane->state->crtc_w,
plane->state->crtc_h,
0, 0, objs, vgfb->fence);
+ virtio_gpu_notify(vgdev);
dma_fence_wait(&vgfb->fence->f, true);
dma_fence_put(&vgfb->fence->f);
vgfb->fence = NULL;
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 5914e79d3429..73854915ec34 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -95,7 +95,8 @@ virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
if (!vbuf)
return ERR_PTR(-ENOMEM);
- BUG_ON(size > MAX_INLINE_CMD_SIZE);
+ BUG_ON(size > MAX_INLINE_CMD_SIZE ||
+ size < sizeof(struct virtio_gpu_ctrl_hdr));
vbuf->buf = (void *)vbuf + sizeof(*vbuf);
vbuf->size = size;
@@ -109,21 +110,14 @@ virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
return vbuf;
}
-static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_vbuffer **vbuffer_p,
- int size)
+static struct virtio_gpu_ctrl_hdr *
+virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer *vbuf)
{
- struct virtio_gpu_vbuffer *vbuf;
-
- vbuf = virtio_gpu_get_vbuf(vgdev, size,
- sizeof(struct virtio_gpu_ctrl_hdr),
- NULL, NULL);
- if (IS_ERR(vbuf)) {
- *vbuffer_p = NULL;
- return ERR_CAST(vbuf);
- }
- *vbuffer_p = vbuf;
- return vbuf->buf;
+ /* this assumes a vbuf contains a command that starts with a
+ * virtio_gpu_ctrl_hdr, which is true for both ctrl and cursor
+ * virtqueues.
+ */
+ return (struct virtio_gpu_ctrl_hdr *)vbuf->buf;
}
static struct virtio_gpu_update_cursor*
@@ -161,6 +155,25 @@ static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
return (struct virtio_gpu_command *)vbuf->buf;
}
+static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer **vbuffer_p,
+ int size)
+{
+ return virtio_gpu_alloc_cmd_resp(vgdev, NULL, vbuffer_p, size,
+ sizeof(struct virtio_gpu_ctrl_hdr),
+ NULL);
+}
+
+static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer **vbuffer_p,
+ int size,
+ virtio_gpu_resp_cb cb)
+{
+ return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size,
+ sizeof(struct virtio_gpu_ctrl_hdr),
+ NULL);
+}
+
static void free_vbuf(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf)
{
@@ -209,12 +222,12 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
- if (resp->type >= cpu_to_le32(VIRTIO_GPU_RESP_ERR_UNSPEC)) {
+ if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) {
struct virtio_gpu_ctrl_hdr *cmd;
- cmd = (struct virtio_gpu_ctrl_hdr *)entry->buf;
- DRM_ERROR("response 0x%x (command 0x%x)\n",
- le32_to_cpu(resp->type),
- le32_to_cpu(cmd->type));
+ cmd = virtio_gpu_vbuf_ctrl_hdr(entry);
+ DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n",
+ le32_to_cpu(resp->type),
+ le32_to_cpu(cmd->type));
} else
DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
}
@@ -307,109 +320,107 @@ static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
return sgt;
}
-static bool virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_vbuffer *vbuf,
- struct scatterlist *vout)
- __releases(&vgdev->ctrlq.qlock)
- __acquires(&vgdev->ctrlq.qlock)
+static void virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf,
+ struct virtio_gpu_fence *fence,
+ int elemcnt,
+ struct scatterlist **sgs,
+ int outcnt,
+ int incnt)
{
struct virtqueue *vq = vgdev->ctrlq.vq;
- struct scatterlist *sgs[3], vcmd, vresp;
- int outcnt = 0, incnt = 0;
- bool notify = false;
- int ret;
+ int ret, idx;
- if (!vgdev->vqs_ready)
- return notify;
+ if (!drm_dev_enter(vgdev->ddev, &idx)) {
+ if (fence && vbuf->objs)
+ virtio_gpu_array_unlock_resv(vbuf->objs);
+ free_vbuf(vgdev, vbuf);
+ return;
+ }
- sg_init_one(&vcmd, vbuf->buf, vbuf->size);
- sgs[outcnt + incnt] = &vcmd;
- outcnt++;
+ if (vgdev->has_indirect)
+ elemcnt = 1;
- if (vout) {
- sgs[outcnt + incnt] = vout;
- outcnt++;
+again:
+ spin_lock(&vgdev->ctrlq.qlock);
+
+ if (vq->num_free < elemcnt) {
+ spin_unlock(&vgdev->ctrlq.qlock);
+ virtio_gpu_notify(vgdev);
+ wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt);
+ goto again;
}
- if (vbuf->resp_size) {
- sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
- sgs[outcnt + incnt] = &vresp;
- incnt++;
+ /* now that the position of the vbuf in the virtqueue is known, we can
+ * finally set the fence id
+ */
+ if (fence) {
+ virtio_gpu_fence_emit(vgdev, virtio_gpu_vbuf_ctrl_hdr(vbuf),
+ fence);
+ if (vbuf->objs) {
+ virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
+ virtio_gpu_array_unlock_resv(vbuf->objs);
+ }
}
-retry:
ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
- if (ret == -ENOSPC) {
- spin_unlock(&vgdev->ctrlq.qlock);
- wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
- spin_lock(&vgdev->ctrlq.qlock);
- goto retry;
- } else {
- trace_virtio_gpu_cmd_queue(vq,
- (struct virtio_gpu_ctrl_hdr *)vbuf->buf);
+ WARN_ON(ret);
- notify = virtqueue_kick_prepare(vq);
- }
- return notify;
+ trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf));
+
+ atomic_inc(&vgdev->pending_commands);
+
+ spin_unlock(&vgdev->ctrlq.qlock);
+
+ drm_dev_exit(idx);
}
static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf,
- struct virtio_gpu_ctrl_hdr *hdr,
struct virtio_gpu_fence *fence)
{
- struct virtqueue *vq = vgdev->ctrlq.vq;
- struct scatterlist *vout = NULL, sg;
+ struct scatterlist *sgs[3], vcmd, vout, vresp;
struct sg_table *sgt = NULL;
- bool notify;
- int outcnt = 0;
+ int elemcnt = 0, outcnt = 0, incnt = 0;
+ /* set up vcmd */
+ sg_init_one(&vcmd, vbuf->buf, vbuf->size);
+ elemcnt++;
+ sgs[outcnt] = &vcmd;
+ outcnt++;
+
+ /* set up vout */
if (vbuf->data_size) {
if (is_vmalloc_addr(vbuf->data_buf)) {
+ int sg_ents;
sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
- &outcnt);
- if (!sgt)
+ &sg_ents);
+ if (!sgt) {
+ if (fence && vbuf->objs)
+ virtio_gpu_array_unlock_resv(vbuf->objs);
return;
- vout = sgt->sgl;
+ }
+
+ elemcnt += sg_ents;
+ sgs[outcnt] = sgt->sgl;
} else {
- sg_init_one(&sg, vbuf->data_buf, vbuf->data_size);
- vout = &sg;
- outcnt = 1;
+ sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
+ elemcnt++;
+ sgs[outcnt] = &vout;
}
+ outcnt++;
}
-again:
- spin_lock(&vgdev->ctrlq.qlock);
-
- /*
- * Make sure we have enouth space in the virtqueue. If not
- * wait here until we have.
- *
- * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
- * to wait for free space, which can result in fence ids being
- * submitted out-of-order.
- */
- if (vq->num_free < 2 + outcnt) {
- spin_unlock(&vgdev->ctrlq.qlock);
- wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
- goto again;
+ /* set up vresp */
+ if (vbuf->resp_size) {
+ sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
+ elemcnt++;
+ sgs[outcnt + incnt] = &vresp;
+ incnt++;
}
- if (hdr && fence) {
- virtio_gpu_fence_emit(vgdev, hdr, fence);
- if (vbuf->objs) {
- virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
- virtio_gpu_array_unlock_resv(vbuf->objs);
- }
- }
- notify = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf, vout);
- spin_unlock(&vgdev->ctrlq.qlock);
- if (notify) {
- if (vgdev->disable_notify)
- vgdev->pending_notify = true;
- else
- virtqueue_notify(vgdev->ctrlq.vq);
- }
+ virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt,
+ incnt);
if (sgt) {
sg_free_table(sgt);
@@ -417,25 +428,26 @@ again:
}
}
-void virtio_gpu_disable_notify(struct virtio_gpu_device *vgdev)
+void virtio_gpu_notify(struct virtio_gpu_device *vgdev)
{
- vgdev->disable_notify = true;
-}
-
-void virtio_gpu_enable_notify(struct virtio_gpu_device *vgdev)
-{
- vgdev->disable_notify = false;
+ bool notify;
- if (!vgdev->pending_notify)
+ if (!atomic_read(&vgdev->pending_commands))
return;
- vgdev->pending_notify = false;
- virtqueue_notify(vgdev->ctrlq.vq);
+
+ spin_lock(&vgdev->ctrlq.qlock);
+ atomic_set(&vgdev->pending_commands, 0);
+ notify = virtqueue_kick_prepare(vgdev->ctrlq.vq);
+ spin_unlock(&vgdev->ctrlq.qlock);
+
+ if (notify)
+ virtqueue_notify(vgdev->ctrlq.vq);
}
static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf)
{
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL, NULL);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL);
}
static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
@@ -443,12 +455,13 @@ static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
{
struct virtqueue *vq = vgdev->cursorq.vq;
struct scatterlist *sgs[1], ccmd;
+ int idx, ret, outcnt;
bool notify;
- int ret;
- int outcnt;
- if (!vgdev->vqs_ready)
+ if (!drm_dev_enter(vgdev->ddev, &idx)) {
+ free_vbuf(vgdev, vbuf);
return;
+ }
sg_init_one(&ccmd, vbuf->buf, vbuf->size);
sgs[0] = &ccmd;
@@ -464,7 +477,7 @@ retry:
goto retry;
} else {
trace_virtio_gpu_cmd_queue(vq,
- (struct virtio_gpu_ctrl_hdr *)vbuf->buf);
+ virtio_gpu_vbuf_ctrl_hdr(vbuf));
notify = virtqueue_kick_prepare(vq);
}
@@ -473,6 +486,8 @@ retry:
if (notify)
virtqueue_notify(vq);
+
+ drm_dev_exit(idx);
}
/* just create gem objects for userspace and long lived objects,
@@ -499,39 +514,36 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
cmd_p->width = cpu_to_le32(params->width);
cmd_p->height = cpu_to_le32(params->height);
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
bo->created = true;
}
-void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
- uint32_t resource_id)
+static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf)
{
- struct virtio_gpu_resource_unref *cmd_p;
- struct virtio_gpu_vbuffer *vbuf;
+ struct virtio_gpu_object *bo;
- cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
- memset(cmd_p, 0, sizeof(*cmd_p));
+ bo = vbuf->resp_cb_data;
+ vbuf->resp_cb_data = NULL;
- cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
- cmd_p->resource_id = cpu_to_le32(resource_id);
-
- virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ virtio_gpu_cleanup_object(bo);
}
-static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
- uint32_t resource_id,
- struct virtio_gpu_fence *fence)
+void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *bo)
{
- struct virtio_gpu_resource_detach_backing *cmd_p;
+ struct virtio_gpu_resource_unref *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
- cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+ cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p),
+ virtio_gpu_cmd_unref_cb);
memset(cmd_p, 0, sizeof(*cmd_p));
- cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
- cmd_p->resource_id = cpu_to_le32(resource_id);
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ vbuf->resp_cb_data = bo;
+ virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
}
void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
@@ -588,10 +600,11 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_transfer_to_host_2d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+ struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
if (use_dma_api)
dma_sync_sg_for_device(vgdev->vdev->dev.parent,
- bo->pages->sgl, bo->pages->nents,
+ shmem->pages->sgl, shmem->pages->nents,
DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
@@ -606,7 +619,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
cmd_p->r.x = cpu_to_le32(x);
cmd_p->r.y = cpu_to_le32(y);
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
static void
@@ -629,7 +642,7 @@ virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
vbuf->data_buf = ents;
vbuf->data_size = sizeof(*ents) * nents;
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
@@ -939,7 +952,6 @@ void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
-
}
void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
@@ -988,7 +1000,8 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
cmd_p->flags = cpu_to_le32(params->flags);
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
+
bo->created = true;
}
@@ -1003,10 +1016,11 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_transfer_host_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+ struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
if (use_dma_api)
dma_sync_sg_for_device(vgdev->vdev->dev.parent,
- bo->pages->sgl, bo->pages->nents,
+ shmem->pages->sgl, shmem->pages->nents,
DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
@@ -1021,7 +1035,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
cmd_p->offset = cpu_to_le64(offset);
cmd_p->level = cpu_to_le32(level);
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
@@ -1047,7 +1061,7 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
cmd_p->offset = cpu_to_le64(offset);
cmd_p->level = cpu_to_le32(level);
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
@@ -1070,94 +1084,19 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
cmd_p->size = cpu_to_le32(data_size);
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj,
- struct virtio_gpu_fence *fence)
+ struct virtio_gpu_mem_entry *ents,
+ unsigned int nents)
{
- bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
- struct virtio_gpu_mem_entry *ents;
- struct scatterlist *sg;
- int si, nents, ret;
-
- if (WARN_ON_ONCE(!obj->created))
- return -EINVAL;
- if (WARN_ON_ONCE(obj->pages))
- return -EINVAL;
-
- ret = drm_gem_shmem_pin(&obj->base.base);
- if (ret < 0)
- return -EINVAL;
-
- obj->pages = drm_gem_shmem_get_sg_table(&obj->base.base);
- if (obj->pages == NULL) {
- drm_gem_shmem_unpin(&obj->base.base);
- return -EINVAL;
- }
-
- if (use_dma_api) {
- obj->mapped = dma_map_sg(vgdev->vdev->dev.parent,
- obj->pages->sgl, obj->pages->nents,
- DMA_TO_DEVICE);
- nents = obj->mapped;
- } else {
- nents = obj->pages->nents;
- }
-
- /* gets freed when the ring has consumed it */
- ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry),
- GFP_KERNEL);
- if (!ents) {
- DRM_ERROR("failed to allocate ent list\n");
- return -ENOMEM;
- }
-
- for_each_sg(obj->pages->sgl, sg, nents, si) {
- ents[si].addr = cpu_to_le64(use_dma_api
- ? sg_dma_address(sg)
- : sg_phys(sg));
- ents[si].length = cpu_to_le32(sg->length);
- ents[si].padding = 0;
- }
-
virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
- ents, nents,
- fence);
+ ents, nents, NULL);
return 0;
}
-void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_object *obj)
-{
- bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
-
- if (WARN_ON_ONCE(!obj->pages))
- return;
-
- if (use_dma_api && obj->mapped) {
- struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev);
- /* detach backing and wait for the host process it ... */
- virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, fence);
- dma_fence_wait(&fence->f, true);
- dma_fence_put(&fence->f);
-
- /* ... then tear down iommu mappings */
- dma_unmap_sg(vgdev->vdev->dev.parent,
- obj->pages->sgl, obj->mapped,
- DMA_TO_DEVICE);
- obj->mapped = 0;
- } else {
- virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL);
- }
-
- sg_free_table(obj->pages);
- obj->pages = NULL;
-
- drm_gem_shmem_unpin(&obj->base.base);
-}
-
void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
struct virtio_gpu_output *output)
{
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index 74f703b8d22a..ac85e17428f8 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -76,10 +76,12 @@ static void vkms_disable_vblank(struct drm_crtc *crtc)
hrtimer_cancel(&out->vblank_hrtimer);
}
-bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
- int *max_error, ktime_t *vblank_time,
- bool in_vblank_irq)
+static bool vkms_get_vblank_timestamp(struct drm_crtc *crtc,
+ int *max_error, ktime_t *vblank_time,
+ bool in_vblank_irq)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev);
struct vkms_output *output = &vkmsdev->output;
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
@@ -154,6 +156,7 @@ static const struct drm_crtc_funcs vkms_crtc_funcs = {
.atomic_destroy_state = vkms_atomic_crtc_destroy_state,
.enable_vblank = vkms_enable_vblank,
.disable_vblank = vkms_disable_vblank,
+ .get_vblank_timestamp = vkms_get_vblank_timestamp,
.get_crc_sources = vkms_get_crc_sources,
.set_crc_source = vkms_set_crc_source,
.verify_crc_source = vkms_verify_crc_source,
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index 25bd7519295f..860de052e820 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -103,7 +103,6 @@ static struct drm_driver vkms_driver = {
.dumb_create = vkms_dumb_create,
.gem_vm_ops = &vkms_gem_vm_ops,
.gem_free_object_unlocked = vkms_gem_free_object,
- .get_vblank_timestamp = vkms_get_vblank_timestamp,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import_sg_table = vkms_prime_import_sg_table,
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index 7d52e24564db..eda04ffba7b1 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -111,10 +111,6 @@ struct vkms_gem_object {
int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_plane *primary, struct drm_plane *cursor);
-bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
- int *max_error, ktime_t *vblank_time,
- bool in_vblank_irq);
-
int vkms_output_init(struct vkms_device *vkmsdev, int index);
struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev,
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
index 5fc8f85aaf3d..6d31265a2ab7 100644
--- a/drivers/gpu/drm/vkms/vkms_plane.c
+++ b/drivers/gpu/drm/vkms/vkms_plane.c
@@ -117,7 +117,7 @@ static int vkms_plane_atomic_check(struct drm_plane *plane,
bool can_position = false;
int ret;
- if (!state->fb | !state->crtc)
+ if (!state->fb || WARN_ON(!state->crtc))
return 0;
crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 065015d2a8f6..3b41cf63110a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -1241,7 +1241,8 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
* actually call into the already enabled manager, when
* binding the MOB.
*/
- if (!(dev_priv->capabilities & SVGA_CAP_DX))
+ if (!(dev_priv->capabilities & SVGA_CAP_DX) ||
+ !dev_priv->has_mob)
return -ENOMEM;
ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 827458f49112..4f58364421ce 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -29,6 +29,7 @@
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/mem_encrypt.h>
#include <drm/drm_drv.h>
#include <drm/drm_ioctl.h>
@@ -575,6 +576,10 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
[vmw_dma_map_populate] = "Caching DMA mappings.",
[vmw_dma_map_bind] = "Giving up DMA mappings early."};
+ /* TTM currently doesn't fully support SEV encryption. */
+ if (mem_encrypt_active())
+ return -EINVAL;
+
if (vmw_force_coherent)
dev_priv->map_mode = vmw_dma_alloc_coherent;
else if (vmw_restrict_iommu)
@@ -682,8 +687,10 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
ret = vmw_dma_select_mode(dev_priv);
if (unlikely(ret != 0)) {
- DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
+ DRM_INFO("Restricting capabilities since DMA not available.\n");
refuse_dma = true;
+ if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
+ DRM_INFO("Disabling 3D acceleration.\n");
}
dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
@@ -866,7 +873,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->has_gmr = false;
}
- if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
+ if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS && !refuse_dma) {
dev_priv->has_mob = true;
if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
VMW_PL_MOB) != 0) {
@@ -1399,9 +1406,6 @@ static const struct file_operations vmwgfx_driver_fops = {
static struct drm_driver driver = {
.driver_features =
DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC,
- .get_vblank_counter = vmw_get_vblank_counter,
- .enable_vblank = vmw_enable_vblank,
- .disable_vblank = vmw_disable_vblank,
.ioctls = vmw_ioctls,
.num_ioctls = ARRAY_SIZE(vmw_ioctls),
.master_set = vmw_master_set,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 86b69397d166..b70d73225707 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -480,19 +480,6 @@ struct vmw_private {
bool has_sm4_1;
/*
- * VGA registers.
- */
-
- struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS];
- uint32_t vga_width;
- uint32_t vga_height;
- uint32_t vga_bpp;
- uint32_t vga_bpl;
- uint32_t vga_pitchlock;
-
- uint32_t num_displays;
-
- /*
* Framebuffer info.
*/
@@ -900,7 +887,6 @@ extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
extern void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes);
extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
uint32_t *seqno);
-extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason);
extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
@@ -947,7 +933,6 @@ extern struct ttm_placement vmw_mob_placement;
extern struct ttm_placement vmw_mob_ne_placement;
extern struct ttm_placement vmw_nonfixed_placement;
extern struct ttm_bo_driver vmw_bo_driver;
-extern int vmw_dma_quiescent(struct drm_device *dev);
extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo);
extern const struct vmw_sg_table *
@@ -1085,8 +1070,6 @@ int vmw_fb_on(struct vmw_private *vmw_priv);
int vmw_kms_init(struct vmw_private *dev_priv);
int vmw_kms_close(struct vmw_private *dev_priv);
-int vmw_kms_save_vga(struct vmw_private *vmw_priv);
-int vmw_kms_restore_vga(struct vmw_private *vmw_priv);
int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
@@ -1100,9 +1083,9 @@ int vmw_kms_write_svga(struct vmw_private *vmw_priv,
bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
uint32_t pitch,
uint32_t height);
-u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
-int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe);
-void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe);
+u32 vmw_get_vblank_counter(struct drm_crtc *crtc);
+int vmw_enable_vblank(struct drm_crtc *crtc);
+void vmw_disable_vblank(struct drm_crtc *crtc);
int vmw_kms_present(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
@@ -1139,7 +1122,6 @@ int vmw_overlay_init(struct vmw_private *dev_priv);
int vmw_overlay_close(struct vmw_private *dev_priv);
int vmw_overlay_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int vmw_overlay_stop_all(struct vmw_private *dev_priv);
int vmw_overlay_resume_all(struct vmw_private *dev_priv);
int vmw_overlay_pause_all(struct vmw_private *dev_priv);
int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out);
@@ -1186,10 +1168,6 @@ extern void vmw_otables_takedown(struct vmw_private *dev_priv);
extern const struct vmw_user_resource_conv *user_context_converter;
-extern int vmw_context_check(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- int id,
- struct vmw_resource **p_res);
extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
@@ -1219,7 +1197,6 @@ vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res);
extern const struct vmw_user_resource_conv *user_surface_converter;
-extern void vmw_surface_res_free(struct vmw_resource *res);
extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
@@ -1230,11 +1207,6 @@ extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern int vmw_surface_check(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- uint32_t handle, int *id);
-extern int vmw_surface_validate(struct vmw_private *dev_priv,
- struct vmw_surface *srf);
int vmw_surface_gb_priv_define(struct drm_device *dev,
uint32_t user_accounting_size,
SVGA3dSurfaceAllFlags svga3d_flags,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index e5252ef3812f..6941689085ed 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -169,10 +169,8 @@ void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
{
u32 *fifo_mem = dev_priv->mmio_virt;
- preempt_disable();
if (cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0)
vmw_write(dev_priv, SVGA_REG_SYNC, reason);
- preempt_enable();
}
void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index f47d5710cc95..52e086a5691e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1897,87 +1897,6 @@ int vmw_kms_write_svga(struct vmw_private *vmw_priv,
return 0;
}
-int vmw_kms_save_vga(struct vmw_private *vmw_priv)
-{
- struct vmw_vga_topology_state *save;
- uint32_t i;
-
- vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
- vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
- vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
- if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
- vmw_priv->vga_pitchlock =
- vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
- else if (vmw_fifo_have_pitchlock(vmw_priv))
- vmw_priv->vga_pitchlock = vmw_mmio_read(vmw_priv->mmio_virt +
- SVGA_FIFO_PITCHLOCK);
-
- if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
- return 0;
-
- vmw_priv->num_displays = vmw_read(vmw_priv,
- SVGA_REG_NUM_GUEST_DISPLAYS);
-
- if (vmw_priv->num_displays == 0)
- vmw_priv->num_displays = 1;
-
- for (i = 0; i < vmw_priv->num_displays; ++i) {
- save = &vmw_priv->vga_save[i];
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
- save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY);
- save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X);
- save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y);
- save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
- save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
- if (i == 0 && vmw_priv->num_displays == 1 &&
- save->width == 0 && save->height == 0) {
-
- /*
- * It should be fairly safe to assume that these
- * values are uninitialized.
- */
-
- save->width = vmw_priv->vga_width - save->pos_x;
- save->height = vmw_priv->vga_height - save->pos_y;
- }
- }
-
- return 0;
-}
-
-int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
-{
- struct vmw_vga_topology_state *save;
- uint32_t i;
-
- vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
- vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
- vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
- if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
- vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
- vmw_priv->vga_pitchlock);
- else if (vmw_fifo_have_pitchlock(vmw_priv))
- vmw_mmio_write(vmw_priv->vga_pitchlock,
- vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
-
- if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
- return 0;
-
- for (i = 0; i < vmw_priv->num_displays; ++i) {
- save = &vmw_priv->vga_save[i];
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
- }
-
- return 0;
-}
-
bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
uint32_t pitch,
uint32_t height)
@@ -1991,7 +1910,7 @@ bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
/**
* Function called by DRM code called with vbl_lock held.
*/
-u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
+u32 vmw_get_vblank_counter(struct drm_crtc *crtc)
{
return 0;
}
@@ -1999,7 +1918,7 @@ u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
/**
* Function called by DRM code called with vbl_lock held.
*/
-int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe)
+int vmw_enable_vblank(struct drm_crtc *crtc)
{
return -EINVAL;
}
@@ -2007,7 +1926,7 @@ int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe)
/**
* Function called by DRM code called with vbl_lock held.
*/
-void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe)
+void vmw_disable_vblank(struct drm_crtc *crtc)
{
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 5702219ec38f..16dafff5cab1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -236,6 +236,9 @@ static const struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
.atomic_duplicate_state = vmw_du_crtc_duplicate_state,
.atomic_destroy_state = vmw_du_crtc_destroy_state,
.set_config = drm_atomic_helper_set_config,
+ .get_vblank_counter = vmw_get_vblank_counter,
+ .enable_vblank = vmw_enable_vblank,
+ .disable_vblank = vmw_disable_vblank,
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index fdb52f6d29fb..cd7ed1650d60 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -354,37 +354,6 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
}
/**
- * Stop all streams.
- *
- * Used by the fb code when starting.
- *
- * Takes the overlay lock.
- */
-int vmw_overlay_stop_all(struct vmw_private *dev_priv)
-{
- struct vmw_overlay *overlay = dev_priv->overlay_priv;
- int i, ret;
-
- if (!overlay)
- return 0;
-
- mutex_lock(&overlay->mutex);
-
- for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
- struct vmw_stream *stream = &overlay->stream[i];
- if (!stream->buf)
- continue;
-
- ret = vmw_overlay_stop(dev_priv, i, false, false);
- WARN_ON(ret != 0);
- }
-
- mutex_unlock(&overlay->mutex);
-
- return 0;
-}
-
-/**
* Try to resume all paused streams.
*
* Used by the kms code after moving a new scanout buffer to vram.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
index f07aa857587c..60cfbfadd3f2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
@@ -69,7 +69,7 @@ struct vmw_bo_dirty {
unsigned int ref_count;
unsigned long bitmap_size;
size_t size;
- unsigned long bitmap[0];
+ unsigned long bitmap[];
};
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index e5a283263211..32a22e4eddb1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -319,6 +319,9 @@ static const struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
.atomic_destroy_state = vmw_du_crtc_destroy_state,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
+ .get_vblank_counter = vmw_get_vblank_counter,
+ .enable_vblank = vmw_enable_vblank,
+ .disable_vblank = vmw_disable_vblank,
};
/*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 41a96fb49835..68aecb6d9f87 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -916,6 +916,9 @@ static const struct drm_crtc_funcs vmw_stdu_crtc_funcs = {
.atomic_destroy_state = vmw_du_crtc_destroy_state,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
+ .get_vblank_counter = vmw_get_vblank_counter,
+ .enable_vblank = vmw_enable_vblank,
+ .disable_vblank = vmw_disable_vblank,
};
@@ -1885,7 +1888,7 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
/* Do nothing if Screen Target support is turned off */
- if (!VMWGFX_ENABLE_SCREEN_TARGET_OTABLE)
+ if (!VMWGFX_ENABLE_SCREEN_TARGET_OTABLE || !dev_priv->has_mob)
return -ENOSYS;
if (!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 3ce630aa4fde..ec893cd17b50 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -79,7 +79,7 @@ struct vmw_surface_dirty {
struct svga3dsurface_cache cache;
size_t size;
u32 num_subres;
- SVGA3dBox boxes[0];
+ SVGA3dBox boxes[];
};
static void vmw_user_surface_free(struct vmw_resource *res);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index d8ea3dd10af0..3f3b2c7a208a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -736,11 +736,6 @@ out_no_init:
return NULL;
}
-static int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
-{
- return 0;
-}
-
static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man)
{
@@ -866,7 +861,6 @@ struct ttm_bo_driver vmw_bo_driver = {
.ttm_tt_create = &vmw_ttm_tt_create,
.ttm_tt_populate = &vmw_ttm_populate,
.ttm_tt_unpopulate = &vmw_ttm_unpopulate,
- .invalidate_caches = vmw_invalidate_caches,
.init_mem_type = vmw_init_mem_type,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = vmw_evict_flags,
diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.c b/drivers/gpu/drm/xen/xen_drm_front_kms.c
index 4f34c5208180..78096bbcd226 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_kms.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_kms.c
@@ -220,6 +220,24 @@ static bool display_send_page_flip(struct drm_simple_display_pipe *pipe,
return false;
}
+static int display_check(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state,
+ struct drm_crtc_state *crtc_state)
+{
+ /*
+ * Xen doesn't initialize vblanking via drm_vblank_init(), so
+ * DRM helpers assume that it doesn't handle vblanking and start
+ * sending out fake VBLANK events automatically.
+ *
+ * As xen contains it's own logic for sending out VBLANK events
+ * in send_pending_event(), disable no_vblank (i.e., the xen
+ * driver has vblanking support).
+ */
+ crtc_state->no_vblank = false;
+
+ return 0;
+}
+
static void display_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_plane_state)
{
@@ -284,6 +302,7 @@ static const struct drm_simple_display_pipe_funcs display_funcs = {
.enable = display_enable,
.disable = display_disable,
.prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
+ .check = display_check,
.update = display_update,
};
diff --git a/drivers/gpu/drm/zte/zx_plane.c b/drivers/gpu/drm/zte/zx_plane.c
index 086c50fac689..c8f7b21fa09e 100644
--- a/drivers/gpu/drm/zte/zx_plane.c
+++ b/drivers/gpu/drm/zte/zx_plane.c
@@ -54,7 +54,7 @@ static int zx_vl_plane_atomic_check(struct drm_plane *plane,
int min_scale = FRAC_16_16(1, 8);
int max_scale = FRAC_16_16(8, 1);
- if (!crtc || !fb)
+ if (!crtc || WARN_ON(!fb))
return 0;
crtc_state = drm_atomic_get_existing_crtc_state(plane_state->state,
@@ -281,7 +281,7 @@ static int zx_gl_plane_atomic_check(struct drm_plane *plane,
struct drm_crtc *crtc = plane_state->crtc;
struct drm_crtc_state *crtc_state;
- if (!crtc || !fb)
+ if (!crtc || WARN_ON(!fb))
return 0;
crtc_state = drm_atomic_get_existing_crtc_state(plane_state->state,